Compare commits
4 Commits
feature/is
...
feature/is
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ce952d97b2 | ||
| 53d3637b3e | |||
|
|
ae7195c829 | ||
| ad1f17bb56 |
@@ -8,6 +8,7 @@ dependencies = [
|
|||||||
"pydantic>=2.5,<3",
|
"pydantic>=2.5,<3",
|
||||||
"pydantic-settings>=2.1,<3",
|
"pydantic-settings>=2.1,<3",
|
||||||
"google-genai>=1.0,<2",
|
"google-genai>=1.0,<2",
|
||||||
|
"scipy>=1.11,<2",
|
||||||
]
|
]
|
||||||
|
|
||||||
[project.optional-dependencies]
|
[project.optional-dependencies]
|
||||||
|
|||||||
110
src/core/criticality.py
Normal file
110
src/core/criticality.py
Normal file
@@ -0,0 +1,110 @@
|
|||||||
|
"""Criticality assessment for urgency-based response system.
|
||||||
|
|
||||||
|
Evaluates market conditions to determine response urgency and enable
|
||||||
|
faster reactions in critical situations.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from enum import StrEnum
|
||||||
|
|
||||||
|
|
||||||
|
class CriticalityLevel(StrEnum):
|
||||||
|
"""Urgency levels for market conditions and trading decisions."""
|
||||||
|
|
||||||
|
CRITICAL = "CRITICAL" # <5s timeout - Emergency response required
|
||||||
|
HIGH = "HIGH" # <30s timeout - Elevated priority
|
||||||
|
NORMAL = "NORMAL" # <60s timeout - Standard processing
|
||||||
|
LOW = "LOW" # No timeout - Batch processing
|
||||||
|
|
||||||
|
|
||||||
|
class CriticalityAssessor:
|
||||||
|
"""Assesses market conditions to determine response criticality level."""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
critical_pnl_threshold: float = -2.5,
|
||||||
|
critical_price_change_threshold: float = 5.0,
|
||||||
|
critical_volume_surge_threshold: float = 10.0,
|
||||||
|
high_volatility_threshold: float = 70.0,
|
||||||
|
low_volatility_threshold: float = 30.0,
|
||||||
|
) -> None:
|
||||||
|
"""Initialize the criticality assessor.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
critical_pnl_threshold: P&L % that triggers CRITICAL (default -2.5%)
|
||||||
|
critical_price_change_threshold: Price change % that triggers CRITICAL
|
||||||
|
(default 5.0% in 1 minute)
|
||||||
|
critical_volume_surge_threshold: Volume surge ratio that triggers CRITICAL
|
||||||
|
(default 10x average)
|
||||||
|
high_volatility_threshold: Volatility score that triggers HIGH
|
||||||
|
(default 70.0)
|
||||||
|
low_volatility_threshold: Volatility score below which is LOW
|
||||||
|
(default 30.0)
|
||||||
|
"""
|
||||||
|
self.critical_pnl_threshold = critical_pnl_threshold
|
||||||
|
self.critical_price_change_threshold = critical_price_change_threshold
|
||||||
|
self.critical_volume_surge_threshold = critical_volume_surge_threshold
|
||||||
|
self.high_volatility_threshold = high_volatility_threshold
|
||||||
|
self.low_volatility_threshold = low_volatility_threshold
|
||||||
|
|
||||||
|
def assess_market_conditions(
|
||||||
|
self,
|
||||||
|
pnl_pct: float,
|
||||||
|
volatility_score: float,
|
||||||
|
volume_surge: float,
|
||||||
|
price_change_1m: float = 0.0,
|
||||||
|
is_market_open: bool = True,
|
||||||
|
) -> CriticalityLevel:
|
||||||
|
"""Assess criticality level based on market conditions.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
pnl_pct: Current P&L percentage
|
||||||
|
volatility_score: Momentum score from VolatilityAnalyzer (0-100)
|
||||||
|
volume_surge: Volume surge ratio (current / average)
|
||||||
|
price_change_1m: 1-minute price change percentage
|
||||||
|
is_market_open: Whether the market is currently open
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
CriticalityLevel indicating required response urgency
|
||||||
|
"""
|
||||||
|
# Market closed or very quiet → LOW priority (batch processing)
|
||||||
|
if not is_market_open or volatility_score < self.low_volatility_threshold:
|
||||||
|
return CriticalityLevel.LOW
|
||||||
|
|
||||||
|
# CRITICAL conditions: immediate action required
|
||||||
|
# 1. P&L near circuit breaker (-2.5% is close to -3.0% breaker)
|
||||||
|
if pnl_pct <= self.critical_pnl_threshold:
|
||||||
|
return CriticalityLevel.CRITICAL
|
||||||
|
|
||||||
|
# 2. Large sudden price movement (>5% in 1 minute)
|
||||||
|
if abs(price_change_1m) >= self.critical_price_change_threshold:
|
||||||
|
return CriticalityLevel.CRITICAL
|
||||||
|
|
||||||
|
# 3. Extreme volume surge (>10x average) indicates major event
|
||||||
|
if volume_surge >= self.critical_volume_surge_threshold:
|
||||||
|
return CriticalityLevel.CRITICAL
|
||||||
|
|
||||||
|
# HIGH priority: elevated volatility requires faster response
|
||||||
|
if volatility_score >= self.high_volatility_threshold:
|
||||||
|
return CriticalityLevel.HIGH
|
||||||
|
|
||||||
|
# NORMAL: standard trading conditions
|
||||||
|
return CriticalityLevel.NORMAL
|
||||||
|
|
||||||
|
def get_timeout(self, level: CriticalityLevel) -> float | None:
|
||||||
|
"""Get timeout in seconds for a given criticality level.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
level: Criticality level
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Timeout in seconds, or None for no timeout (LOW priority)
|
||||||
|
"""
|
||||||
|
timeout_map = {
|
||||||
|
CriticalityLevel.CRITICAL: 5.0,
|
||||||
|
CriticalityLevel.HIGH: 30.0,
|
||||||
|
CriticalityLevel.NORMAL: 60.0,
|
||||||
|
CriticalityLevel.LOW: None,
|
||||||
|
}
|
||||||
|
return timeout_map[level]
|
||||||
291
src/core/priority_queue.py
Normal file
291
src/core/priority_queue.py
Normal file
@@ -0,0 +1,291 @@
|
|||||||
|
"""Priority-based task queue for latency control.
|
||||||
|
|
||||||
|
Implements a thread-safe priority queue with timeout enforcement and metrics tracking.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import heapq
|
||||||
|
import logging
|
||||||
|
import time
|
||||||
|
from collections.abc import Callable, Coroutine
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
from src.core.criticality import CriticalityLevel
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(order=True)
|
||||||
|
class PriorityTask:
|
||||||
|
"""Task with priority and timestamp for queue ordering."""
|
||||||
|
|
||||||
|
# Lower priority value = higher urgency (CRITICAL=0, HIGH=1, NORMAL=2, LOW=3)
|
||||||
|
priority: int
|
||||||
|
timestamp: float
|
||||||
|
# Task data not used in comparison
|
||||||
|
task_id: str = field(compare=False)
|
||||||
|
task_data: dict[str, Any] = field(compare=False, default_factory=dict)
|
||||||
|
callback: Callable[[], Coroutine[Any, Any, Any]] | None = field(
|
||||||
|
compare=False, default=None
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class QueueMetrics:
|
||||||
|
"""Metrics for priority queue performance monitoring."""
|
||||||
|
|
||||||
|
total_enqueued: int = 0
|
||||||
|
total_dequeued: int = 0
|
||||||
|
total_timeouts: int = 0
|
||||||
|
total_errors: int = 0
|
||||||
|
current_size: int = 0
|
||||||
|
# Average wait time per criticality level (in seconds)
|
||||||
|
avg_wait_time: dict[CriticalityLevel, float] = field(default_factory=dict)
|
||||||
|
# P95 wait time per criticality level
|
||||||
|
p95_wait_time: dict[CriticalityLevel, float] = field(default_factory=dict)
|
||||||
|
|
||||||
|
|
||||||
|
class PriorityTaskQueue:
|
||||||
|
"""Thread-safe priority queue with timeout enforcement."""
|
||||||
|
|
||||||
|
# Priority mapping for criticality levels
|
||||||
|
PRIORITY_MAP = {
|
||||||
|
CriticalityLevel.CRITICAL: 0,
|
||||||
|
CriticalityLevel.HIGH: 1,
|
||||||
|
CriticalityLevel.NORMAL: 2,
|
||||||
|
CriticalityLevel.LOW: 3,
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self, max_size: int = 1000) -> None:
|
||||||
|
"""Initialize the priority task queue.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
max_size: Maximum queue size (default 1000)
|
||||||
|
"""
|
||||||
|
self._queue: list[PriorityTask] = []
|
||||||
|
self._lock = asyncio.Lock()
|
||||||
|
self._max_size = max_size
|
||||||
|
self._metrics = QueueMetrics()
|
||||||
|
# Track wait times for metrics
|
||||||
|
self._wait_times: dict[CriticalityLevel, list[float]] = {
|
||||||
|
level: [] for level in CriticalityLevel
|
||||||
|
}
|
||||||
|
|
||||||
|
async def enqueue(
|
||||||
|
self,
|
||||||
|
task_id: str,
|
||||||
|
criticality: CriticalityLevel,
|
||||||
|
task_data: dict[str, Any],
|
||||||
|
callback: Callable[[], Coroutine[Any, Any, Any]] | None = None,
|
||||||
|
) -> bool:
|
||||||
|
"""Add a task to the priority queue.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
task_id: Unique identifier for the task
|
||||||
|
criticality: Criticality level determining priority
|
||||||
|
task_data: Data associated with the task
|
||||||
|
callback: Optional async callback to execute
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if enqueued successfully, False if queue is full
|
||||||
|
"""
|
||||||
|
async with self._lock:
|
||||||
|
if len(self._queue) >= self._max_size:
|
||||||
|
logger.warning(
|
||||||
|
"Priority queue full (size=%d), rejecting task %s",
|
||||||
|
len(self._queue),
|
||||||
|
task_id,
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
priority = self.PRIORITY_MAP[criticality]
|
||||||
|
timestamp = time.time()
|
||||||
|
|
||||||
|
task = PriorityTask(
|
||||||
|
priority=priority,
|
||||||
|
timestamp=timestamp,
|
||||||
|
task_id=task_id,
|
||||||
|
task_data=task_data,
|
||||||
|
callback=callback,
|
||||||
|
)
|
||||||
|
|
||||||
|
heapq.heappush(self._queue, task)
|
||||||
|
self._metrics.total_enqueued += 1
|
||||||
|
self._metrics.current_size = len(self._queue)
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
"Enqueued task %s with criticality %s (priority=%d, queue_size=%d)",
|
||||||
|
task_id,
|
||||||
|
criticality.value,
|
||||||
|
priority,
|
||||||
|
len(self._queue),
|
||||||
|
)
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
async def dequeue(self, timeout: float | None = None) -> PriorityTask | None:
|
||||||
|
"""Remove and return the highest priority task from the queue.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
timeout: Maximum time to wait for a task (seconds)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
PriorityTask if available, None if queue is empty or timeout
|
||||||
|
"""
|
||||||
|
start_time = time.time()
|
||||||
|
deadline = start_time + timeout if timeout else None
|
||||||
|
|
||||||
|
while True:
|
||||||
|
async with self._lock:
|
||||||
|
if self._queue:
|
||||||
|
task = heapq.heappop(self._queue)
|
||||||
|
self._metrics.total_dequeued += 1
|
||||||
|
self._metrics.current_size = len(self._queue)
|
||||||
|
|
||||||
|
# Calculate wait time
|
||||||
|
wait_time = time.time() - task.timestamp
|
||||||
|
criticality = self._get_criticality_from_priority(task.priority)
|
||||||
|
self._wait_times[criticality].append(wait_time)
|
||||||
|
self._update_wait_time_metrics()
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
"Dequeued task %s (priority=%d, wait_time=%.2fs, queue_size=%d)",
|
||||||
|
task.task_id,
|
||||||
|
task.priority,
|
||||||
|
wait_time,
|
||||||
|
len(self._queue),
|
||||||
|
)
|
||||||
|
|
||||||
|
return task
|
||||||
|
|
||||||
|
# Queue is empty
|
||||||
|
if deadline and time.time() >= deadline:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Wait a bit before checking again
|
||||||
|
await asyncio.sleep(0.1)
|
||||||
|
|
||||||
|
async def execute_with_timeout(
|
||||||
|
self,
|
||||||
|
task: PriorityTask,
|
||||||
|
timeout: float | None,
|
||||||
|
) -> Any:
|
||||||
|
"""Execute a task with timeout enforcement.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
task: Task to execute
|
||||||
|
timeout: Timeout in seconds (None = no timeout)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Result from task callback
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
asyncio.TimeoutError: If task exceeds timeout
|
||||||
|
Exception: Any exception raised by the task callback
|
||||||
|
"""
|
||||||
|
if not task.callback:
|
||||||
|
logger.warning("Task %s has no callback, skipping execution", task.task_id)
|
||||||
|
return None
|
||||||
|
|
||||||
|
criticality = self._get_criticality_from_priority(task.priority)
|
||||||
|
|
||||||
|
try:
|
||||||
|
if timeout:
|
||||||
|
result = await asyncio.wait_for(task.callback(), timeout=timeout)
|
||||||
|
else:
|
||||||
|
result = await task.callback()
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
"Task %s completed successfully (criticality=%s)",
|
||||||
|
task.task_id,
|
||||||
|
criticality.value,
|
||||||
|
)
|
||||||
|
return result
|
||||||
|
|
||||||
|
except TimeoutError:
|
||||||
|
self._metrics.total_timeouts += 1
|
||||||
|
logger.error(
|
||||||
|
"Task %s timed out after %.2fs (criticality=%s)",
|
||||||
|
task.task_id,
|
||||||
|
timeout or 0.0,
|
||||||
|
criticality.value,
|
||||||
|
)
|
||||||
|
raise
|
||||||
|
|
||||||
|
except Exception as exc:
|
||||||
|
self._metrics.total_errors += 1
|
||||||
|
logger.exception(
|
||||||
|
"Task %s failed with error (criticality=%s): %s",
|
||||||
|
task.task_id,
|
||||||
|
criticality.value,
|
||||||
|
exc,
|
||||||
|
)
|
||||||
|
raise
|
||||||
|
|
||||||
|
def _get_criticality_from_priority(self, priority: int) -> CriticalityLevel:
|
||||||
|
"""Convert priority back to criticality level."""
|
||||||
|
for level, prio in self.PRIORITY_MAP.items():
|
||||||
|
if prio == priority:
|
||||||
|
return level
|
||||||
|
return CriticalityLevel.NORMAL
|
||||||
|
|
||||||
|
def _update_wait_time_metrics(self) -> None:
|
||||||
|
"""Update average and p95 wait time metrics."""
|
||||||
|
for level, times in self._wait_times.items():
|
||||||
|
if not times:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Keep only last 1000 measurements to avoid memory bloat
|
||||||
|
if len(times) > 1000:
|
||||||
|
self._wait_times[level] = times[-1000:]
|
||||||
|
times = self._wait_times[level]
|
||||||
|
|
||||||
|
# Calculate average
|
||||||
|
self._metrics.avg_wait_time[level] = sum(times) / len(times)
|
||||||
|
|
||||||
|
# Calculate P95
|
||||||
|
sorted_times = sorted(times)
|
||||||
|
p95_idx = int(len(sorted_times) * 0.95)
|
||||||
|
self._metrics.p95_wait_time[level] = sorted_times[p95_idx]
|
||||||
|
|
||||||
|
async def get_metrics(self) -> QueueMetrics:
|
||||||
|
"""Get current queue metrics.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
QueueMetrics with current statistics
|
||||||
|
"""
|
||||||
|
async with self._lock:
|
||||||
|
return QueueMetrics(
|
||||||
|
total_enqueued=self._metrics.total_enqueued,
|
||||||
|
total_dequeued=self._metrics.total_dequeued,
|
||||||
|
total_timeouts=self._metrics.total_timeouts,
|
||||||
|
total_errors=self._metrics.total_errors,
|
||||||
|
current_size=self._metrics.current_size,
|
||||||
|
avg_wait_time=dict(self._metrics.avg_wait_time),
|
||||||
|
p95_wait_time=dict(self._metrics.p95_wait_time),
|
||||||
|
)
|
||||||
|
|
||||||
|
async def size(self) -> int:
|
||||||
|
"""Get current queue size.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Number of tasks in queue
|
||||||
|
"""
|
||||||
|
async with self._lock:
|
||||||
|
return len(self._queue)
|
||||||
|
|
||||||
|
async def clear(self) -> int:
|
||||||
|
"""Clear all tasks from the queue.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Number of tasks cleared
|
||||||
|
"""
|
||||||
|
async with self._lock:
|
||||||
|
count = len(self._queue)
|
||||||
|
self._queue.clear()
|
||||||
|
self._metrics.current_size = 0
|
||||||
|
logger.info("Cleared %d tasks from priority queue", count)
|
||||||
|
return count
|
||||||
@@ -0,0 +1,19 @@
|
|||||||
|
"""Evolution engine for self-improving trading strategies."""
|
||||||
|
|
||||||
|
from src.evolution.ab_test import ABTester, ABTestResult, StrategyPerformance
|
||||||
|
from src.evolution.optimizer import EvolutionOptimizer
|
||||||
|
from src.evolution.performance_tracker import (
|
||||||
|
PerformanceDashboard,
|
||||||
|
PerformanceTracker,
|
||||||
|
StrategyMetrics,
|
||||||
|
)
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"EvolutionOptimizer",
|
||||||
|
"ABTester",
|
||||||
|
"ABTestResult",
|
||||||
|
"StrategyPerformance",
|
||||||
|
"PerformanceTracker",
|
||||||
|
"PerformanceDashboard",
|
||||||
|
"StrategyMetrics",
|
||||||
|
]
|
||||||
|
|||||||
220
src/evolution/ab_test.py
Normal file
220
src/evolution/ab_test.py
Normal file
@@ -0,0 +1,220 @@
|
|||||||
|
"""A/B Testing framework for strategy comparison.
|
||||||
|
|
||||||
|
Runs multiple strategies in parallel, tracks their performance,
|
||||||
|
and uses statistical significance testing to determine winners.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
import scipy.stats as stats
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class StrategyPerformance:
|
||||||
|
"""Performance metrics for a single strategy."""
|
||||||
|
|
||||||
|
strategy_name: str
|
||||||
|
total_trades: int
|
||||||
|
wins: int
|
||||||
|
losses: int
|
||||||
|
total_pnl: float
|
||||||
|
avg_pnl: float
|
||||||
|
win_rate: float
|
||||||
|
sharpe_ratio: float | None = None
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ABTestResult:
|
||||||
|
"""Result of an A/B test between two strategies."""
|
||||||
|
|
||||||
|
strategy_a: str
|
||||||
|
strategy_b: str
|
||||||
|
winner: str | None
|
||||||
|
p_value: float
|
||||||
|
confidence_level: float
|
||||||
|
is_significant: bool
|
||||||
|
performance_a: StrategyPerformance
|
||||||
|
performance_b: StrategyPerformance
|
||||||
|
|
||||||
|
|
||||||
|
class ABTester:
|
||||||
|
"""A/B testing framework for comparing trading strategies."""
|
||||||
|
|
||||||
|
def __init__(self, significance_level: float = 0.05) -> None:
|
||||||
|
"""Initialize A/B tester.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
significance_level: P-value threshold for statistical significance (default 0.05)
|
||||||
|
"""
|
||||||
|
self._significance_level = significance_level
|
||||||
|
|
||||||
|
def calculate_performance(
|
||||||
|
self, trades: list[dict[str, Any]], strategy_name: str
|
||||||
|
) -> StrategyPerformance:
|
||||||
|
"""Calculate performance metrics for a strategy.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
trades: List of trade records with pnl values
|
||||||
|
strategy_name: Name of the strategy
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
StrategyPerformance object with calculated metrics
|
||||||
|
"""
|
||||||
|
if not trades:
|
||||||
|
return StrategyPerformance(
|
||||||
|
strategy_name=strategy_name,
|
||||||
|
total_trades=0,
|
||||||
|
wins=0,
|
||||||
|
losses=0,
|
||||||
|
total_pnl=0.0,
|
||||||
|
avg_pnl=0.0,
|
||||||
|
win_rate=0.0,
|
||||||
|
sharpe_ratio=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
total_trades = len(trades)
|
||||||
|
wins = sum(1 for t in trades if t.get("pnl", 0) > 0)
|
||||||
|
losses = sum(1 for t in trades if t.get("pnl", 0) < 0)
|
||||||
|
pnls = [t.get("pnl", 0.0) for t in trades]
|
||||||
|
total_pnl = sum(pnls)
|
||||||
|
avg_pnl = total_pnl / total_trades if total_trades > 0 else 0.0
|
||||||
|
win_rate = (wins / total_trades * 100) if total_trades > 0 else 0.0
|
||||||
|
|
||||||
|
# Calculate Sharpe ratio (risk-adjusted return)
|
||||||
|
sharpe_ratio = None
|
||||||
|
if len(pnls) > 1:
|
||||||
|
mean_return = avg_pnl
|
||||||
|
std_return = (
|
||||||
|
sum((p - mean_return) ** 2 for p in pnls) / (len(pnls) - 1)
|
||||||
|
) ** 0.5
|
||||||
|
if std_return > 0:
|
||||||
|
sharpe_ratio = mean_return / std_return
|
||||||
|
|
||||||
|
return StrategyPerformance(
|
||||||
|
strategy_name=strategy_name,
|
||||||
|
total_trades=total_trades,
|
||||||
|
wins=wins,
|
||||||
|
losses=losses,
|
||||||
|
total_pnl=round(total_pnl, 2),
|
||||||
|
avg_pnl=round(avg_pnl, 2),
|
||||||
|
win_rate=round(win_rate, 2),
|
||||||
|
sharpe_ratio=round(sharpe_ratio, 4) if sharpe_ratio else None,
|
||||||
|
)
|
||||||
|
|
||||||
|
def compare_strategies(
|
||||||
|
self,
|
||||||
|
trades_a: list[dict[str, Any]],
|
||||||
|
trades_b: list[dict[str, Any]],
|
||||||
|
strategy_a_name: str = "Strategy A",
|
||||||
|
strategy_b_name: str = "Strategy B",
|
||||||
|
) -> ABTestResult:
|
||||||
|
"""Compare two strategies using statistical testing.
|
||||||
|
|
||||||
|
Uses a two-sample t-test to determine if performance difference is significant.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
trades_a: List of trades from strategy A
|
||||||
|
trades_b: List of trades from strategy B
|
||||||
|
strategy_a_name: Name of strategy A
|
||||||
|
strategy_b_name: Name of strategy B
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
ABTestResult with comparison details
|
||||||
|
"""
|
||||||
|
perf_a = self.calculate_performance(trades_a, strategy_a_name)
|
||||||
|
perf_b = self.calculate_performance(trades_b, strategy_b_name)
|
||||||
|
|
||||||
|
# Extract PnL arrays for statistical testing
|
||||||
|
pnls_a = [t.get("pnl", 0.0) for t in trades_a]
|
||||||
|
pnls_b = [t.get("pnl", 0.0) for t in trades_b]
|
||||||
|
|
||||||
|
# Perform two-sample t-test
|
||||||
|
if len(pnls_a) > 1 and len(pnls_b) > 1:
|
||||||
|
t_stat, p_value = stats.ttest_ind(pnls_a, pnls_b, equal_var=False)
|
||||||
|
is_significant = p_value < self._significance_level
|
||||||
|
confidence_level = (1 - p_value) * 100
|
||||||
|
else:
|
||||||
|
# Not enough data for statistical test
|
||||||
|
p_value = 1.0
|
||||||
|
is_significant = False
|
||||||
|
confidence_level = 0.0
|
||||||
|
|
||||||
|
# Determine winner based on average PnL
|
||||||
|
winner = None
|
||||||
|
if is_significant:
|
||||||
|
if perf_a.avg_pnl > perf_b.avg_pnl:
|
||||||
|
winner = strategy_a_name
|
||||||
|
elif perf_b.avg_pnl > perf_a.avg_pnl:
|
||||||
|
winner = strategy_b_name
|
||||||
|
|
||||||
|
return ABTestResult(
|
||||||
|
strategy_a=strategy_a_name,
|
||||||
|
strategy_b=strategy_b_name,
|
||||||
|
winner=winner,
|
||||||
|
p_value=round(p_value, 4),
|
||||||
|
confidence_level=round(confidence_level, 2),
|
||||||
|
is_significant=is_significant,
|
||||||
|
performance_a=perf_a,
|
||||||
|
performance_b=perf_b,
|
||||||
|
)
|
||||||
|
|
||||||
|
def should_deploy(
|
||||||
|
self,
|
||||||
|
result: ABTestResult,
|
||||||
|
min_win_rate: float = 60.0,
|
||||||
|
min_trades: int = 20,
|
||||||
|
) -> bool:
|
||||||
|
"""Determine if a winning strategy should be deployed.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
result: A/B test result
|
||||||
|
min_win_rate: Minimum win rate percentage for deployment (default 60%)
|
||||||
|
min_trades: Minimum number of trades required (default 20)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if the winning strategy meets deployment criteria
|
||||||
|
"""
|
||||||
|
if not result.is_significant or result.winner is None:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Get performance of winning strategy
|
||||||
|
if result.winner == result.strategy_a:
|
||||||
|
winning_perf = result.performance_a
|
||||||
|
else:
|
||||||
|
winning_perf = result.performance_b
|
||||||
|
|
||||||
|
# Check deployment criteria
|
||||||
|
has_enough_trades = winning_perf.total_trades >= min_trades
|
||||||
|
has_good_win_rate = winning_perf.win_rate >= min_win_rate
|
||||||
|
is_profitable = winning_perf.avg_pnl > 0
|
||||||
|
|
||||||
|
meets_criteria = has_enough_trades and has_good_win_rate and is_profitable
|
||||||
|
|
||||||
|
if meets_criteria:
|
||||||
|
logger.info(
|
||||||
|
"Strategy '%s' meets deployment criteria: "
|
||||||
|
"win_rate=%.2f%%, trades=%d, avg_pnl=%.2f",
|
||||||
|
result.winner,
|
||||||
|
winning_perf.win_rate,
|
||||||
|
winning_perf.total_trades,
|
||||||
|
winning_perf.avg_pnl,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.info(
|
||||||
|
"Strategy '%s' does NOT meet deployment criteria: "
|
||||||
|
"win_rate=%.2f%% (min %.2f%%), trades=%d (min %d), avg_pnl=%.2f",
|
||||||
|
result.winner if result.winner else "unknown",
|
||||||
|
winning_perf.win_rate if result.winner else 0.0,
|
||||||
|
min_win_rate,
|
||||||
|
winning_perf.total_trades if result.winner else 0,
|
||||||
|
min_trades,
|
||||||
|
winning_perf.avg_pnl if result.winner else 0.0,
|
||||||
|
)
|
||||||
|
|
||||||
|
return meets_criteria
|
||||||
@@ -1,10 +1,10 @@
|
|||||||
"""Evolution Engine — analyzes trade logs and generates new strategies.
|
"""Evolution Engine — analyzes trade logs and generates new strategies.
|
||||||
|
|
||||||
This module:
|
This module:
|
||||||
1. Reads trade_logs.db to identify failing patterns
|
1. Uses DecisionLogger.get_losing_decisions() to identify failing patterns
|
||||||
2. Asks Gemini to generate a new strategy class
|
2. Analyzes failure patterns by time, market conditions, stock characteristics
|
||||||
3. Runs pytest on the generated file
|
3. Asks Gemini to generate improved strategy recommendations
|
||||||
4. Creates a simulated PR if tests pass
|
4. Generates new strategy classes with enhanced decision-making logic
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
@@ -14,6 +14,7 @@ import logging
|
|||||||
import sqlite3
|
import sqlite3
|
||||||
import subprocess
|
import subprocess
|
||||||
import textwrap
|
import textwrap
|
||||||
|
from collections import Counter
|
||||||
from datetime import UTC, datetime
|
from datetime import UTC, datetime
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any
|
from typing import Any
|
||||||
@@ -21,6 +22,8 @@ from typing import Any
|
|||||||
from google import genai
|
from google import genai
|
||||||
|
|
||||||
from src.config import Settings
|
from src.config import Settings
|
||||||
|
from src.db import init_db
|
||||||
|
from src.logging.decision_logger import DecisionLog, DecisionLogger
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -53,29 +56,105 @@ class EvolutionOptimizer:
|
|||||||
self._db_path = settings.DB_PATH
|
self._db_path = settings.DB_PATH
|
||||||
self._client = genai.Client(api_key=settings.GEMINI_API_KEY)
|
self._client = genai.Client(api_key=settings.GEMINI_API_KEY)
|
||||||
self._model_name = settings.GEMINI_MODEL
|
self._model_name = settings.GEMINI_MODEL
|
||||||
|
self._conn = init_db(self._db_path)
|
||||||
|
self._decision_logger = DecisionLogger(self._conn)
|
||||||
|
|
||||||
# ------------------------------------------------------------------
|
# ------------------------------------------------------------------
|
||||||
# Analysis
|
# Analysis
|
||||||
# ------------------------------------------------------------------
|
# ------------------------------------------------------------------
|
||||||
|
|
||||||
def analyze_failures(self, limit: int = 50) -> list[dict[str, Any]]:
|
def analyze_failures(self, limit: int = 50) -> list[dict[str, Any]]:
|
||||||
"""Find trades where high confidence led to losses."""
|
"""Find high-confidence decisions that resulted in losses.
|
||||||
conn = sqlite3.connect(self._db_path)
|
|
||||||
conn.row_factory = sqlite3.Row
|
Uses DecisionLogger.get_losing_decisions() to retrieve failures.
|
||||||
try:
|
|
||||||
rows = conn.execute(
|
|
||||||
"""
|
"""
|
||||||
SELECT stock_code, action, confidence, pnl, rationale, timestamp
|
losing_decisions = self._decision_logger.get_losing_decisions(
|
||||||
FROM trades
|
min_confidence=80, min_loss=-100.0
|
||||||
WHERE confidence >= 80 AND pnl < 0
|
)
|
||||||
ORDER BY pnl ASC
|
|
||||||
LIMIT ?
|
# Limit results
|
||||||
""",
|
if len(losing_decisions) > limit:
|
||||||
(limit,),
|
losing_decisions = losing_decisions[:limit]
|
||||||
).fetchall()
|
|
||||||
return [dict(r) for r in rows]
|
# Convert to dict format for analysis
|
||||||
finally:
|
failures = []
|
||||||
conn.close()
|
for decision in losing_decisions:
|
||||||
|
failures.append({
|
||||||
|
"decision_id": decision.decision_id,
|
||||||
|
"timestamp": decision.timestamp,
|
||||||
|
"stock_code": decision.stock_code,
|
||||||
|
"market": decision.market,
|
||||||
|
"exchange_code": decision.exchange_code,
|
||||||
|
"action": decision.action,
|
||||||
|
"confidence": decision.confidence,
|
||||||
|
"rationale": decision.rationale,
|
||||||
|
"outcome_pnl": decision.outcome_pnl,
|
||||||
|
"outcome_accuracy": decision.outcome_accuracy,
|
||||||
|
"context_snapshot": decision.context_snapshot,
|
||||||
|
"input_data": decision.input_data,
|
||||||
|
})
|
||||||
|
|
||||||
|
return failures
|
||||||
|
|
||||||
|
def identify_failure_patterns(
|
||||||
|
self, failures: list[dict[str, Any]]
|
||||||
|
) -> dict[str, Any]:
|
||||||
|
"""Identify patterns in losing decisions.
|
||||||
|
|
||||||
|
Analyzes:
|
||||||
|
- Time patterns (hour of day, day of week)
|
||||||
|
- Market conditions (volatility, volume)
|
||||||
|
- Stock characteristics (price range, market)
|
||||||
|
- Common failure modes in rationale
|
||||||
|
"""
|
||||||
|
if not failures:
|
||||||
|
return {"pattern_count": 0, "patterns": {}}
|
||||||
|
|
||||||
|
patterns = {
|
||||||
|
"markets": Counter(),
|
||||||
|
"actions": Counter(),
|
||||||
|
"hours": Counter(),
|
||||||
|
"avg_confidence": 0.0,
|
||||||
|
"avg_loss": 0.0,
|
||||||
|
"total_failures": len(failures),
|
||||||
|
}
|
||||||
|
|
||||||
|
total_confidence = 0
|
||||||
|
total_loss = 0.0
|
||||||
|
|
||||||
|
for failure in failures:
|
||||||
|
# Market distribution
|
||||||
|
patterns["markets"][failure.get("market", "UNKNOWN")] += 1
|
||||||
|
|
||||||
|
# Action distribution
|
||||||
|
patterns["actions"][failure.get("action", "UNKNOWN")] += 1
|
||||||
|
|
||||||
|
# Time pattern (extract hour from ISO timestamp)
|
||||||
|
timestamp = failure.get("timestamp", "")
|
||||||
|
if timestamp:
|
||||||
|
try:
|
||||||
|
dt = datetime.fromisoformat(timestamp)
|
||||||
|
patterns["hours"][dt.hour] += 1
|
||||||
|
except (ValueError, AttributeError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Aggregate metrics
|
||||||
|
total_confidence += failure.get("confidence", 0)
|
||||||
|
total_loss += failure.get("outcome_pnl", 0.0)
|
||||||
|
|
||||||
|
patterns["avg_confidence"] = (
|
||||||
|
round(total_confidence / len(failures), 2) if failures else 0.0
|
||||||
|
)
|
||||||
|
patterns["avg_loss"] = (
|
||||||
|
round(total_loss / len(failures), 2) if failures else 0.0
|
||||||
|
)
|
||||||
|
|
||||||
|
# Convert Counters to regular dicts for JSON serialization
|
||||||
|
patterns["markets"] = dict(patterns["markets"])
|
||||||
|
patterns["actions"] = dict(patterns["actions"])
|
||||||
|
patterns["hours"] = dict(patterns["hours"])
|
||||||
|
|
||||||
|
return patterns
|
||||||
|
|
||||||
def get_performance_summary(self) -> dict[str, Any]:
|
def get_performance_summary(self) -> dict[str, Any]:
|
||||||
"""Return aggregate performance metrics from trade logs."""
|
"""Return aggregate performance metrics from trade logs."""
|
||||||
@@ -109,14 +188,25 @@ class EvolutionOptimizer:
|
|||||||
async def generate_strategy(self, failures: list[dict[str, Any]]) -> Path | None:
|
async def generate_strategy(self, failures: list[dict[str, Any]]) -> Path | None:
|
||||||
"""Ask Gemini to generate a new strategy based on failure analysis.
|
"""Ask Gemini to generate a new strategy based on failure analysis.
|
||||||
|
|
||||||
|
Integrates failure patterns and market conditions to create improved strategies.
|
||||||
Returns the path to the generated strategy file, or None on failure.
|
Returns the path to the generated strategy file, or None on failure.
|
||||||
"""
|
"""
|
||||||
|
# Identify failure patterns first
|
||||||
|
patterns = self.identify_failure_patterns(failures)
|
||||||
|
|
||||||
prompt = (
|
prompt = (
|
||||||
"You are a quantitative trading strategy developer.\n"
|
"You are a quantitative trading strategy developer.\n"
|
||||||
"Analyze these failed trades and generate an improved strategy.\n\n"
|
"Analyze these failed trades and their patterns, then generate an improved strategy.\n\n"
|
||||||
f"Failed trades:\n{json.dumps(failures, indent=2, default=str)}\n\n"
|
f"Failure Patterns:\n{json.dumps(patterns, indent=2)}\n\n"
|
||||||
"Generate a Python class that inherits from BaseStrategy.\n"
|
f"Sample Failed Trades (first 5):\n"
|
||||||
"The class must have an `evaluate(self, market_data: dict) -> dict` method.\n"
|
f"{json.dumps(failures[:5], indent=2, default=str)}\n\n"
|
||||||
|
"Based on these patterns, generate an improved trading strategy.\n"
|
||||||
|
"The strategy should:\n"
|
||||||
|
"1. Avoid the identified failure patterns\n"
|
||||||
|
"2. Consider market-specific conditions\n"
|
||||||
|
"3. Adjust confidence based on historical performance\n\n"
|
||||||
|
"Generate a Python method body that inherits from BaseStrategy.\n"
|
||||||
|
"The method signature is: evaluate(self, market_data: dict) -> dict\n"
|
||||||
"The method must return a dict with keys: action, confidence, rationale.\n"
|
"The method must return a dict with keys: action, confidence, rationale.\n"
|
||||||
"Respond with ONLY the method body (Python code), no class definition.\n"
|
"Respond with ONLY the method body (Python code), no class definition.\n"
|
||||||
)
|
)
|
||||||
@@ -147,10 +237,15 @@ class EvolutionOptimizer:
|
|||||||
# Indent the body for the class method
|
# Indent the body for the class method
|
||||||
indented_body = textwrap.indent(body, " ")
|
indented_body = textwrap.indent(body, " ")
|
||||||
|
|
||||||
|
# Generate rationale from patterns
|
||||||
|
rationale = f"Auto-evolved from {len(failures)} failures. "
|
||||||
|
rationale += f"Primary failure markets: {list(patterns.get('markets', {}).keys())}. "
|
||||||
|
rationale += f"Average loss: {patterns.get('avg_loss', 0.0)}"
|
||||||
|
|
||||||
content = STRATEGY_TEMPLATE.format(
|
content = STRATEGY_TEMPLATE.format(
|
||||||
name=version,
|
name=version,
|
||||||
timestamp=datetime.now(UTC).isoformat(),
|
timestamp=datetime.now(UTC).isoformat(),
|
||||||
rationale="Auto-evolved from failure analysis",
|
rationale=rationale,
|
||||||
class_name=class_name,
|
class_name=class_name,
|
||||||
body=indented_body.strip(),
|
body=indented_body.strip(),
|
||||||
)
|
)
|
||||||
|
|||||||
303
src/evolution/performance_tracker.py
Normal file
303
src/evolution/performance_tracker.py
Normal file
@@ -0,0 +1,303 @@
|
|||||||
|
"""Performance tracking system for strategy monitoring.
|
||||||
|
|
||||||
|
Tracks win rates, monitors improvement over time,
|
||||||
|
and provides performance metrics dashboard.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import sqlite3
|
||||||
|
from dataclasses import asdict, dataclass
|
||||||
|
from datetime import UTC, datetime, timedelta
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class StrategyMetrics:
|
||||||
|
"""Performance metrics for a strategy over a time period."""
|
||||||
|
|
||||||
|
strategy_name: str
|
||||||
|
period_start: str
|
||||||
|
period_end: str
|
||||||
|
total_trades: int
|
||||||
|
wins: int
|
||||||
|
losses: int
|
||||||
|
holds: int
|
||||||
|
win_rate: float
|
||||||
|
avg_pnl: float
|
||||||
|
total_pnl: float
|
||||||
|
best_trade: float
|
||||||
|
worst_trade: float
|
||||||
|
avg_confidence: float
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class PerformanceDashboard:
|
||||||
|
"""Comprehensive performance dashboard."""
|
||||||
|
|
||||||
|
generated_at: str
|
||||||
|
overall_metrics: StrategyMetrics
|
||||||
|
daily_metrics: list[StrategyMetrics]
|
||||||
|
weekly_metrics: list[StrategyMetrics]
|
||||||
|
improvement_trend: dict[str, Any]
|
||||||
|
|
||||||
|
|
||||||
|
class PerformanceTracker:
|
||||||
|
"""Tracks and monitors strategy performance over time."""
|
||||||
|
|
||||||
|
def __init__(self, db_path: str) -> None:
|
||||||
|
"""Initialize performance tracker.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
db_path: Path to the trade logs database
|
||||||
|
"""
|
||||||
|
self._db_path = db_path
|
||||||
|
|
||||||
|
def get_strategy_metrics(
|
||||||
|
self,
|
||||||
|
strategy_name: str | None = None,
|
||||||
|
start_date: str | None = None,
|
||||||
|
end_date: str | None = None,
|
||||||
|
) -> StrategyMetrics:
|
||||||
|
"""Get performance metrics for a strategy over a time period.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
strategy_name: Name of the strategy (None = all strategies)
|
||||||
|
start_date: Start date in ISO format (None = beginning of time)
|
||||||
|
end_date: End date in ISO format (None = now)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
StrategyMetrics object with performance data
|
||||||
|
"""
|
||||||
|
conn = sqlite3.connect(self._db_path)
|
||||||
|
conn.row_factory = sqlite3.Row
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Build query with optional filters
|
||||||
|
query = """
|
||||||
|
SELECT
|
||||||
|
COUNT(*) as total_trades,
|
||||||
|
SUM(CASE WHEN pnl > 0 THEN 1 ELSE 0 END) as wins,
|
||||||
|
SUM(CASE WHEN pnl < 0 THEN 1 ELSE 0 END) as losses,
|
||||||
|
SUM(CASE WHEN action = 'HOLD' THEN 1 ELSE 0 END) as holds,
|
||||||
|
COALESCE(AVG(CASE WHEN pnl IS NOT NULL THEN pnl END), 0) as avg_pnl,
|
||||||
|
COALESCE(SUM(CASE WHEN pnl IS NOT NULL THEN pnl ELSE 0 END), 0) as total_pnl,
|
||||||
|
COALESCE(MAX(pnl), 0) as best_trade,
|
||||||
|
COALESCE(MIN(pnl), 0) as worst_trade,
|
||||||
|
COALESCE(AVG(confidence), 0) as avg_confidence,
|
||||||
|
MIN(timestamp) as period_start,
|
||||||
|
MAX(timestamp) as period_end
|
||||||
|
FROM trades
|
||||||
|
WHERE 1=1
|
||||||
|
"""
|
||||||
|
params: list[Any] = []
|
||||||
|
|
||||||
|
if start_date:
|
||||||
|
query += " AND timestamp >= ?"
|
||||||
|
params.append(start_date)
|
||||||
|
|
||||||
|
if end_date:
|
||||||
|
query += " AND timestamp <= ?"
|
||||||
|
params.append(end_date)
|
||||||
|
|
||||||
|
# Note: Currently trades table doesn't have strategy_name column
|
||||||
|
# This is a placeholder for future extension
|
||||||
|
|
||||||
|
row = conn.execute(query, params).fetchone()
|
||||||
|
|
||||||
|
total_trades = row["total_trades"] or 0
|
||||||
|
wins = row["wins"] or 0
|
||||||
|
win_rate = (wins / total_trades * 100) if total_trades > 0 else 0.0
|
||||||
|
|
||||||
|
return StrategyMetrics(
|
||||||
|
strategy_name=strategy_name or "default",
|
||||||
|
period_start=row["period_start"] or "",
|
||||||
|
period_end=row["period_end"] or "",
|
||||||
|
total_trades=total_trades,
|
||||||
|
wins=wins,
|
||||||
|
losses=row["losses"] or 0,
|
||||||
|
holds=row["holds"] or 0,
|
||||||
|
win_rate=round(win_rate, 2),
|
||||||
|
avg_pnl=round(row["avg_pnl"], 2),
|
||||||
|
total_pnl=round(row["total_pnl"], 2),
|
||||||
|
best_trade=round(row["best_trade"], 2),
|
||||||
|
worst_trade=round(row["worst_trade"], 2),
|
||||||
|
avg_confidence=round(row["avg_confidence"], 2),
|
||||||
|
)
|
||||||
|
finally:
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
def get_daily_metrics(
|
||||||
|
self, days: int = 7, strategy_name: str | None = None
|
||||||
|
) -> list[StrategyMetrics]:
|
||||||
|
"""Get daily performance metrics for the last N days.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
days: Number of days to retrieve (default 7)
|
||||||
|
strategy_name: Name of the strategy (None = all strategies)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of StrategyMetrics, one per day
|
||||||
|
"""
|
||||||
|
metrics = []
|
||||||
|
end_date = datetime.now(UTC)
|
||||||
|
|
||||||
|
for i in range(days):
|
||||||
|
day_end = end_date - timedelta(days=i)
|
||||||
|
day_start = day_end - timedelta(days=1)
|
||||||
|
|
||||||
|
day_metrics = self.get_strategy_metrics(
|
||||||
|
strategy_name=strategy_name,
|
||||||
|
start_date=day_start.isoformat(),
|
||||||
|
end_date=day_end.isoformat(),
|
||||||
|
)
|
||||||
|
metrics.append(day_metrics)
|
||||||
|
|
||||||
|
return metrics
|
||||||
|
|
||||||
|
def get_weekly_metrics(
|
||||||
|
self, weeks: int = 4, strategy_name: str | None = None
|
||||||
|
) -> list[StrategyMetrics]:
|
||||||
|
"""Get weekly performance metrics for the last N weeks.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
weeks: Number of weeks to retrieve (default 4)
|
||||||
|
strategy_name: Name of the strategy (None = all strategies)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of StrategyMetrics, one per week
|
||||||
|
"""
|
||||||
|
metrics = []
|
||||||
|
end_date = datetime.now(UTC)
|
||||||
|
|
||||||
|
for i in range(weeks):
|
||||||
|
week_end = end_date - timedelta(weeks=i)
|
||||||
|
week_start = week_end - timedelta(weeks=1)
|
||||||
|
|
||||||
|
week_metrics = self.get_strategy_metrics(
|
||||||
|
strategy_name=strategy_name,
|
||||||
|
start_date=week_start.isoformat(),
|
||||||
|
end_date=week_end.isoformat(),
|
||||||
|
)
|
||||||
|
metrics.append(week_metrics)
|
||||||
|
|
||||||
|
return metrics
|
||||||
|
|
||||||
|
def calculate_improvement_trend(
|
||||||
|
self, metrics_history: list[StrategyMetrics]
|
||||||
|
) -> dict[str, Any]:
|
||||||
|
"""Calculate improvement trend from historical metrics.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
metrics_history: List of StrategyMetrics ordered from oldest to newest
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary with trend analysis
|
||||||
|
"""
|
||||||
|
if len(metrics_history) < 2:
|
||||||
|
return {
|
||||||
|
"trend": "insufficient_data",
|
||||||
|
"win_rate_change": 0.0,
|
||||||
|
"pnl_change": 0.0,
|
||||||
|
"confidence_change": 0.0,
|
||||||
|
}
|
||||||
|
|
||||||
|
oldest = metrics_history[0]
|
||||||
|
newest = metrics_history[-1]
|
||||||
|
|
||||||
|
win_rate_change = newest.win_rate - oldest.win_rate
|
||||||
|
pnl_change = newest.avg_pnl - oldest.avg_pnl
|
||||||
|
confidence_change = newest.avg_confidence - oldest.avg_confidence
|
||||||
|
|
||||||
|
# Determine overall trend
|
||||||
|
if win_rate_change > 5.0 and pnl_change > 0:
|
||||||
|
trend = "improving"
|
||||||
|
elif win_rate_change < -5.0 or pnl_change < 0:
|
||||||
|
trend = "declining"
|
||||||
|
else:
|
||||||
|
trend = "stable"
|
||||||
|
|
||||||
|
return {
|
||||||
|
"trend": trend,
|
||||||
|
"win_rate_change": round(win_rate_change, 2),
|
||||||
|
"pnl_change": round(pnl_change, 2),
|
||||||
|
"confidence_change": round(confidence_change, 2),
|
||||||
|
"period_count": len(metrics_history),
|
||||||
|
}
|
||||||
|
|
||||||
|
def generate_dashboard(
|
||||||
|
self, strategy_name: str | None = None
|
||||||
|
) -> PerformanceDashboard:
|
||||||
|
"""Generate a comprehensive performance dashboard.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
strategy_name: Name of the strategy (None = all strategies)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
PerformanceDashboard with all metrics
|
||||||
|
"""
|
||||||
|
# Get overall metrics
|
||||||
|
overall_metrics = self.get_strategy_metrics(strategy_name=strategy_name)
|
||||||
|
|
||||||
|
# Get daily metrics (last 7 days)
|
||||||
|
daily_metrics = self.get_daily_metrics(days=7, strategy_name=strategy_name)
|
||||||
|
|
||||||
|
# Get weekly metrics (last 4 weeks)
|
||||||
|
weekly_metrics = self.get_weekly_metrics(weeks=4, strategy_name=strategy_name)
|
||||||
|
|
||||||
|
# Calculate improvement trend
|
||||||
|
improvement_trend = self.calculate_improvement_trend(weekly_metrics[::-1])
|
||||||
|
|
||||||
|
return PerformanceDashboard(
|
||||||
|
generated_at=datetime.now(UTC).isoformat(),
|
||||||
|
overall_metrics=overall_metrics,
|
||||||
|
daily_metrics=daily_metrics,
|
||||||
|
weekly_metrics=weekly_metrics,
|
||||||
|
improvement_trend=improvement_trend,
|
||||||
|
)
|
||||||
|
|
||||||
|
def export_dashboard_json(
|
||||||
|
self, dashboard: PerformanceDashboard
|
||||||
|
) -> str:
|
||||||
|
"""Export dashboard as JSON string.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
dashboard: PerformanceDashboard object
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
JSON string representation
|
||||||
|
"""
|
||||||
|
data = {
|
||||||
|
"generated_at": dashboard.generated_at,
|
||||||
|
"overall_metrics": asdict(dashboard.overall_metrics),
|
||||||
|
"daily_metrics": [asdict(m) for m in dashboard.daily_metrics],
|
||||||
|
"weekly_metrics": [asdict(m) for m in dashboard.weekly_metrics],
|
||||||
|
"improvement_trend": dashboard.improvement_trend,
|
||||||
|
}
|
||||||
|
return json.dumps(data, indent=2)
|
||||||
|
|
||||||
|
def log_dashboard(self, dashboard: PerformanceDashboard) -> None:
|
||||||
|
"""Log dashboard summary to logger.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
dashboard: PerformanceDashboard object
|
||||||
|
"""
|
||||||
|
logger.info("=" * 60)
|
||||||
|
logger.info("PERFORMANCE DASHBOARD")
|
||||||
|
logger.info("=" * 60)
|
||||||
|
logger.info("Generated: %s", dashboard.generated_at)
|
||||||
|
logger.info("")
|
||||||
|
logger.info("Overall Performance:")
|
||||||
|
logger.info(" Total Trades: %d", dashboard.overall_metrics.total_trades)
|
||||||
|
logger.info(" Win Rate: %.2f%%", dashboard.overall_metrics.win_rate)
|
||||||
|
logger.info(" Average P&L: %.2f", dashboard.overall_metrics.avg_pnl)
|
||||||
|
logger.info(" Total P&L: %.2f", dashboard.overall_metrics.total_pnl)
|
||||||
|
logger.info("")
|
||||||
|
logger.info("Improvement Trend (%s):", dashboard.improvement_trend["trend"])
|
||||||
|
logger.info(" Win Rate Change: %+.2f%%", dashboard.improvement_trend["win_rate_change"])
|
||||||
|
logger.info(" P&L Change: %+.2f", dashboard.improvement_trend["pnl_change"])
|
||||||
|
logger.info("=" * 60)
|
||||||
88
src/main.py
88
src/main.py
@@ -19,7 +19,10 @@ from src.brain.gemini_client import GeminiClient
|
|||||||
from src.broker.kis_api import KISBroker
|
from src.broker.kis_api import KISBroker
|
||||||
from src.broker.overseas import OverseasBroker
|
from src.broker.overseas import OverseasBroker
|
||||||
from src.config import Settings
|
from src.config import Settings
|
||||||
|
from src.context.layer import ContextLayer
|
||||||
from src.context.store import ContextStore
|
from src.context.store import ContextStore
|
||||||
|
from src.core.criticality import CriticalityAssessor, CriticalityLevel
|
||||||
|
from src.core.priority_queue import PriorityTaskQueue
|
||||||
from src.core.risk_manager import CircuitBreakerTripped, RiskManager
|
from src.core.risk_manager import CircuitBreakerTripped, RiskManager
|
||||||
from src.db import init_db, log_trade
|
from src.db import init_db, log_trade
|
||||||
from src.logging.decision_logger import DecisionLogger
|
from src.logging.decision_logger import DecisionLogger
|
||||||
@@ -57,10 +60,14 @@ async def trading_cycle(
|
|||||||
risk: RiskManager,
|
risk: RiskManager,
|
||||||
db_conn: Any,
|
db_conn: Any,
|
||||||
decision_logger: DecisionLogger,
|
decision_logger: DecisionLogger,
|
||||||
|
context_store: ContextStore,
|
||||||
|
criticality_assessor: CriticalityAssessor,
|
||||||
market: MarketInfo,
|
market: MarketInfo,
|
||||||
stock_code: str,
|
stock_code: str,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Execute one trading cycle for a single stock."""
|
"""Execute one trading cycle for a single stock."""
|
||||||
|
cycle_start_time = asyncio.get_event_loop().time()
|
||||||
|
|
||||||
# 1. Fetch market data
|
# 1. Fetch market data
|
||||||
if market.is_domestic:
|
if market.is_domestic:
|
||||||
orderbook = await broker.get_orderbook(stock_code)
|
orderbook = await broker.get_orderbook(stock_code)
|
||||||
@@ -106,6 +113,42 @@ async def trading_cycle(
|
|||||||
"foreigner_net": foreigner_net,
|
"foreigner_net": foreigner_net,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# 1.5. Get volatility metrics from context store (L7_REALTIME)
|
||||||
|
latest_timeframe = context_store.get_latest_timeframe(ContextLayer.L7_REALTIME)
|
||||||
|
volatility_score = 50.0 # Default normal volatility
|
||||||
|
volume_surge = 1.0
|
||||||
|
price_change_1m = 0.0
|
||||||
|
|
||||||
|
if latest_timeframe:
|
||||||
|
volatility_data = context_store.get_context(
|
||||||
|
ContextLayer.L7_REALTIME,
|
||||||
|
latest_timeframe,
|
||||||
|
f"volatility_{stock_code}",
|
||||||
|
)
|
||||||
|
if volatility_data:
|
||||||
|
volatility_score = volatility_data.get("momentum_score", 50.0)
|
||||||
|
volume_surge = volatility_data.get("volume_surge", 1.0)
|
||||||
|
price_change_1m = volatility_data.get("price_change_1m", 0.0)
|
||||||
|
|
||||||
|
# 1.6. Assess criticality based on market conditions
|
||||||
|
criticality = criticality_assessor.assess_market_conditions(
|
||||||
|
pnl_pct=pnl_pct,
|
||||||
|
volatility_score=volatility_score,
|
||||||
|
volume_surge=volume_surge,
|
||||||
|
price_change_1m=price_change_1m,
|
||||||
|
is_market_open=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
"Criticality for %s (%s): %s (pnl=%.2f%%, volatility=%.1f, volume_surge=%.1fx)",
|
||||||
|
stock_code,
|
||||||
|
market.name,
|
||||||
|
criticality.value,
|
||||||
|
pnl_pct,
|
||||||
|
volatility_score,
|
||||||
|
volume_surge,
|
||||||
|
)
|
||||||
|
|
||||||
# 2. Ask the brain for a decision
|
# 2. Ask the brain for a decision
|
||||||
decision = await brain.decide(market_data)
|
decision = await brain.decide(market_data)
|
||||||
logger.info(
|
logger.info(
|
||||||
@@ -191,6 +234,27 @@ async def trading_cycle(
|
|||||||
exchange_code=market.exchange_code,
|
exchange_code=market.exchange_code,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# 7. Latency monitoring
|
||||||
|
cycle_end_time = asyncio.get_event_loop().time()
|
||||||
|
cycle_latency = cycle_end_time - cycle_start_time
|
||||||
|
timeout = criticality_assessor.get_timeout(criticality)
|
||||||
|
|
||||||
|
if timeout and cycle_latency > timeout:
|
||||||
|
logger.warning(
|
||||||
|
"Trading cycle exceeded timeout for %s (criticality=%s, latency=%.2fs, timeout=%.2fs)",
|
||||||
|
stock_code,
|
||||||
|
criticality.value,
|
||||||
|
cycle_latency,
|
||||||
|
timeout,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.debug(
|
||||||
|
"Trading cycle completed within timeout for %s (criticality=%s, latency=%.2fs)",
|
||||||
|
stock_code,
|
||||||
|
criticality.value,
|
||||||
|
cycle_latency,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
async def run(settings: Settings) -> None:
|
async def run(settings: Settings) -> None:
|
||||||
"""Main async loop — iterate over open markets on a timer."""
|
"""Main async loop — iterate over open markets on a timer."""
|
||||||
@@ -212,6 +276,16 @@ async def run(settings: Settings) -> None:
|
|||||||
top_n=5,
|
top_n=5,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Initialize latency control system
|
||||||
|
criticality_assessor = CriticalityAssessor(
|
||||||
|
critical_pnl_threshold=-2.5, # Near circuit breaker at -3.0%
|
||||||
|
critical_price_change_threshold=5.0, # 5% in 1 minute
|
||||||
|
critical_volume_surge_threshold=10.0, # 10x average
|
||||||
|
high_volatility_threshold=70.0,
|
||||||
|
low_volatility_threshold=30.0,
|
||||||
|
)
|
||||||
|
priority_queue = PriorityTaskQueue(max_size=1000)
|
||||||
|
|
||||||
# Track last scan time for each market
|
# Track last scan time for each market
|
||||||
last_scan_time: dict[str, float] = {}
|
last_scan_time: dict[str, float] = {}
|
||||||
|
|
||||||
@@ -315,6 +389,8 @@ async def run(settings: Settings) -> None:
|
|||||||
risk,
|
risk,
|
||||||
db_conn,
|
db_conn,
|
||||||
decision_logger,
|
decision_logger,
|
||||||
|
context_store,
|
||||||
|
criticality_assessor,
|
||||||
market,
|
market,
|
||||||
stock_code,
|
stock_code,
|
||||||
)
|
)
|
||||||
@@ -343,6 +419,18 @@ async def run(settings: Settings) -> None:
|
|||||||
logger.exception("Unexpected error for %s: %s", stock_code, exc)
|
logger.exception("Unexpected error for %s: %s", stock_code, exc)
|
||||||
break # Don't retry on unexpected errors
|
break # Don't retry on unexpected errors
|
||||||
|
|
||||||
|
# Log priority queue metrics periodically
|
||||||
|
metrics = await priority_queue.get_metrics()
|
||||||
|
if metrics.total_enqueued > 0:
|
||||||
|
logger.info(
|
||||||
|
"Priority queue metrics: enqueued=%d, dequeued=%d, size=%d, timeouts=%d, errors=%d",
|
||||||
|
metrics.total_enqueued,
|
||||||
|
metrics.total_dequeued,
|
||||||
|
metrics.current_size,
|
||||||
|
metrics.total_timeouts,
|
||||||
|
metrics.total_errors,
|
||||||
|
)
|
||||||
|
|
||||||
# Wait for next cycle or shutdown
|
# Wait for next cycle or shutdown
|
||||||
try:
|
try:
|
||||||
await asyncio.wait_for(shutdown.wait(), timeout=TRADE_INTERVAL_SECONDS)
|
await asyncio.wait_for(shutdown.wait(), timeout=TRADE_INTERVAL_SECONDS)
|
||||||
|
|||||||
686
tests/test_evolution.py
Normal file
686
tests/test_evolution.py
Normal file
@@ -0,0 +1,686 @@
|
|||||||
|
"""Tests for the Evolution Engine components.
|
||||||
|
|
||||||
|
Tests cover:
|
||||||
|
- EvolutionOptimizer: failure analysis and strategy generation
|
||||||
|
- ABTester: A/B testing and statistical comparison
|
||||||
|
- PerformanceTracker: metrics tracking and dashboard
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
import sqlite3
|
||||||
|
import tempfile
|
||||||
|
from datetime import UTC, datetime, timedelta
|
||||||
|
from pathlib import Path
|
||||||
|
from unittest.mock import AsyncMock, MagicMock, Mock, patch
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from src.config import Settings
|
||||||
|
from src.db import init_db, log_trade
|
||||||
|
from src.evolution.ab_test import ABTester, ABTestResult, StrategyPerformance
|
||||||
|
from src.evolution.optimizer import EvolutionOptimizer
|
||||||
|
from src.evolution.performance_tracker import (
|
||||||
|
PerformanceDashboard,
|
||||||
|
PerformanceTracker,
|
||||||
|
StrategyMetrics,
|
||||||
|
)
|
||||||
|
from src.logging.decision_logger import DecisionLogger
|
||||||
|
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
# Fixtures
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def db_conn() -> sqlite3.Connection:
|
||||||
|
"""Provide an in-memory database with initialized schema."""
|
||||||
|
return init_db(":memory:")
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def settings() -> Settings:
|
||||||
|
"""Provide test settings."""
|
||||||
|
return Settings(
|
||||||
|
KIS_APP_KEY="test_key",
|
||||||
|
KIS_APP_SECRET="test_secret",
|
||||||
|
KIS_ACCOUNT_NO="12345678-01",
|
||||||
|
GEMINI_API_KEY="test_gemini_key",
|
||||||
|
GEMINI_MODEL="gemini-pro",
|
||||||
|
DB_PATH=":memory:",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def optimizer(settings: Settings) -> EvolutionOptimizer:
|
||||||
|
"""Provide an EvolutionOptimizer instance."""
|
||||||
|
return EvolutionOptimizer(settings)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def decision_logger(db_conn: sqlite3.Connection) -> DecisionLogger:
|
||||||
|
"""Provide a DecisionLogger instance."""
|
||||||
|
return DecisionLogger(db_conn)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def ab_tester() -> ABTester:
|
||||||
|
"""Provide an ABTester instance."""
|
||||||
|
return ABTester(significance_level=0.05)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def performance_tracker(settings: Settings) -> PerformanceTracker:
|
||||||
|
"""Provide a PerformanceTracker instance."""
|
||||||
|
return PerformanceTracker(db_path=":memory:")
|
||||||
|
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
# EvolutionOptimizer Tests
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_analyze_failures_uses_decision_logger(optimizer: EvolutionOptimizer) -> None:
|
||||||
|
"""Test that analyze_failures uses DecisionLogger.get_losing_decisions()."""
|
||||||
|
# Add some losing decisions to the database
|
||||||
|
logger = optimizer._decision_logger
|
||||||
|
|
||||||
|
# High-confidence loss
|
||||||
|
id1 = logger.log_decision(
|
||||||
|
stock_code="005930",
|
||||||
|
market="KR",
|
||||||
|
exchange_code="KRX",
|
||||||
|
action="BUY",
|
||||||
|
confidence=85,
|
||||||
|
rationale="Expected growth",
|
||||||
|
context_snapshot={"L1": {"price": 70000}},
|
||||||
|
input_data={"price": 70000, "volume": 1000},
|
||||||
|
)
|
||||||
|
logger.update_outcome(id1, pnl=-2000.0, accuracy=0)
|
||||||
|
|
||||||
|
# Another high-confidence loss
|
||||||
|
id2 = logger.log_decision(
|
||||||
|
stock_code="000660",
|
||||||
|
market="KR",
|
||||||
|
exchange_code="KRX",
|
||||||
|
action="SELL",
|
||||||
|
confidence=90,
|
||||||
|
rationale="Expected drop",
|
||||||
|
context_snapshot={"L1": {"price": 100000}},
|
||||||
|
input_data={"price": 100000, "volume": 500},
|
||||||
|
)
|
||||||
|
logger.update_outcome(id2, pnl=-1500.0, accuracy=0)
|
||||||
|
|
||||||
|
# Low-confidence loss (should be ignored)
|
||||||
|
id3 = logger.log_decision(
|
||||||
|
stock_code="035420",
|
||||||
|
market="KR",
|
||||||
|
exchange_code="KRX",
|
||||||
|
action="HOLD",
|
||||||
|
confidence=70,
|
||||||
|
rationale="Uncertain",
|
||||||
|
context_snapshot={},
|
||||||
|
input_data={},
|
||||||
|
)
|
||||||
|
logger.update_outcome(id3, pnl=-500.0, accuracy=0)
|
||||||
|
|
||||||
|
# Analyze failures
|
||||||
|
failures = optimizer.analyze_failures(limit=10)
|
||||||
|
|
||||||
|
# Should get 2 failures (confidence >= 80)
|
||||||
|
assert len(failures) == 2
|
||||||
|
assert all(f["confidence"] >= 80 for f in failures)
|
||||||
|
assert all(f["outcome_pnl"] <= -100.0 for f in failures)
|
||||||
|
|
||||||
|
|
||||||
|
def test_analyze_failures_empty_database(optimizer: EvolutionOptimizer) -> None:
|
||||||
|
"""Test analyze_failures with no losing decisions."""
|
||||||
|
failures = optimizer.analyze_failures()
|
||||||
|
assert failures == []
|
||||||
|
|
||||||
|
|
||||||
|
def test_identify_failure_patterns(optimizer: EvolutionOptimizer) -> None:
|
||||||
|
"""Test identification of failure patterns."""
|
||||||
|
failures = [
|
||||||
|
{
|
||||||
|
"decision_id": "1",
|
||||||
|
"timestamp": "2024-01-15T09:30:00+00:00",
|
||||||
|
"stock_code": "005930",
|
||||||
|
"market": "KR",
|
||||||
|
"exchange_code": "KRX",
|
||||||
|
"action": "BUY",
|
||||||
|
"confidence": 85,
|
||||||
|
"rationale": "Test",
|
||||||
|
"outcome_pnl": -1000.0,
|
||||||
|
"outcome_accuracy": 0,
|
||||||
|
"context_snapshot": {},
|
||||||
|
"input_data": {},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"decision_id": "2",
|
||||||
|
"timestamp": "2024-01-15T14:30:00+00:00",
|
||||||
|
"stock_code": "000660",
|
||||||
|
"market": "KR",
|
||||||
|
"exchange_code": "KRX",
|
||||||
|
"action": "SELL",
|
||||||
|
"confidence": 90,
|
||||||
|
"rationale": "Test",
|
||||||
|
"outcome_pnl": -2000.0,
|
||||||
|
"outcome_accuracy": 0,
|
||||||
|
"context_snapshot": {},
|
||||||
|
"input_data": {},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"decision_id": "3",
|
||||||
|
"timestamp": "2024-01-15T09:45:00+00:00",
|
||||||
|
"stock_code": "035420",
|
||||||
|
"market": "US_NASDAQ",
|
||||||
|
"exchange_code": "NASDAQ",
|
||||||
|
"action": "BUY",
|
||||||
|
"confidence": 80,
|
||||||
|
"rationale": "Test",
|
||||||
|
"outcome_pnl": -500.0,
|
||||||
|
"outcome_accuracy": 0,
|
||||||
|
"context_snapshot": {},
|
||||||
|
"input_data": {},
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
patterns = optimizer.identify_failure_patterns(failures)
|
||||||
|
|
||||||
|
assert patterns["total_failures"] == 3
|
||||||
|
assert patterns["markets"]["KR"] == 2
|
||||||
|
assert patterns["markets"]["US_NASDAQ"] == 1
|
||||||
|
assert patterns["actions"]["BUY"] == 2
|
||||||
|
assert patterns["actions"]["SELL"] == 1
|
||||||
|
assert 9 in patterns["hours"] # 09:30 and 09:45
|
||||||
|
assert 14 in patterns["hours"] # 14:30
|
||||||
|
assert patterns["avg_confidence"] == 85.0
|
||||||
|
assert patterns["avg_loss"] == -1166.67
|
||||||
|
|
||||||
|
|
||||||
|
def test_identify_failure_patterns_empty(optimizer: EvolutionOptimizer) -> None:
|
||||||
|
"""Test pattern identification with no failures."""
|
||||||
|
patterns = optimizer.identify_failure_patterns([])
|
||||||
|
assert patterns["pattern_count"] == 0
|
||||||
|
assert patterns["patterns"] == {}
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_generate_strategy_creates_file(optimizer: EvolutionOptimizer, tmp_path: Path) -> None:
|
||||||
|
"""Test that generate_strategy creates a strategy file."""
|
||||||
|
failures = [
|
||||||
|
{
|
||||||
|
"decision_id": "1",
|
||||||
|
"timestamp": "2024-01-15T09:30:00+00:00",
|
||||||
|
"stock_code": "005930",
|
||||||
|
"market": "KR",
|
||||||
|
"action": "BUY",
|
||||||
|
"confidence": 85,
|
||||||
|
"outcome_pnl": -1000.0,
|
||||||
|
"context_snapshot": {},
|
||||||
|
"input_data": {},
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
# Mock Gemini response
|
||||||
|
mock_response = Mock()
|
||||||
|
mock_response.text = """
|
||||||
|
# Simple strategy
|
||||||
|
price = market_data.get("current_price", 0)
|
||||||
|
if price > 50000:
|
||||||
|
return {"action": "BUY", "confidence": 70, "rationale": "Price above threshold"}
|
||||||
|
return {"action": "HOLD", "confidence": 50, "rationale": "Waiting"}
|
||||||
|
"""
|
||||||
|
|
||||||
|
with patch.object(optimizer._client.aio.models, "generate_content", new=AsyncMock(return_value=mock_response)):
|
||||||
|
with patch("src.evolution.optimizer.STRATEGIES_DIR", tmp_path):
|
||||||
|
strategy_path = await optimizer.generate_strategy(failures)
|
||||||
|
|
||||||
|
assert strategy_path is not None
|
||||||
|
assert strategy_path.exists()
|
||||||
|
assert strategy_path.suffix == ".py"
|
||||||
|
assert "class Strategy_" in strategy_path.read_text()
|
||||||
|
assert "def evaluate" in strategy_path.read_text()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_generate_strategy_handles_api_error(optimizer: EvolutionOptimizer) -> None:
|
||||||
|
"""Test that generate_strategy handles Gemini API errors gracefully."""
|
||||||
|
failures = [{"decision_id": "1", "timestamp": "2024-01-15T09:30:00+00:00"}]
|
||||||
|
|
||||||
|
with patch.object(
|
||||||
|
optimizer._client.aio.models,
|
||||||
|
"generate_content",
|
||||||
|
side_effect=Exception("API Error"),
|
||||||
|
):
|
||||||
|
strategy_path = await optimizer.generate_strategy(failures)
|
||||||
|
|
||||||
|
assert strategy_path is None
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_performance_summary() -> None:
|
||||||
|
"""Test getting performance summary from trades table."""
|
||||||
|
# Create a temporary database with trades
|
||||||
|
import tempfile
|
||||||
|
with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as tmp:
|
||||||
|
tmp_path = tmp.name
|
||||||
|
|
||||||
|
conn = init_db(tmp_path)
|
||||||
|
log_trade(conn, "005930", "BUY", 85, "Test win", quantity=10, price=70000, pnl=1000.0)
|
||||||
|
log_trade(conn, "000660", "SELL", 90, "Test loss", quantity=5, price=100000, pnl=-500.0)
|
||||||
|
log_trade(conn, "035420", "BUY", 80, "Test win", quantity=8, price=50000, pnl=800.0)
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
# Create settings with temp database path
|
||||||
|
settings = Settings(
|
||||||
|
KIS_APP_KEY="test_key",
|
||||||
|
KIS_APP_SECRET="test_secret",
|
||||||
|
KIS_ACCOUNT_NO="12345678-01",
|
||||||
|
GEMINI_API_KEY="test_gemini_key",
|
||||||
|
GEMINI_MODEL="gemini-pro",
|
||||||
|
DB_PATH=tmp_path,
|
||||||
|
)
|
||||||
|
|
||||||
|
optimizer = EvolutionOptimizer(settings)
|
||||||
|
summary = optimizer.get_performance_summary()
|
||||||
|
|
||||||
|
assert summary["total_trades"] == 3
|
||||||
|
assert summary["wins"] == 2
|
||||||
|
assert summary["losses"] == 1
|
||||||
|
assert summary["total_pnl"] == 1300.0
|
||||||
|
assert summary["avg_pnl"] == 433.33
|
||||||
|
|
||||||
|
# Clean up
|
||||||
|
Path(tmp_path).unlink()
|
||||||
|
|
||||||
|
|
||||||
|
def test_validate_strategy_success(optimizer: EvolutionOptimizer, tmp_path: Path) -> None:
|
||||||
|
"""Test strategy validation when tests pass."""
|
||||||
|
strategy_file = tmp_path / "test_strategy.py"
|
||||||
|
strategy_file.write_text("# Valid strategy file")
|
||||||
|
|
||||||
|
with patch("subprocess.run") as mock_run:
|
||||||
|
mock_run.return_value = Mock(returncode=0, stdout="", stderr="")
|
||||||
|
result = optimizer.validate_strategy(strategy_file)
|
||||||
|
|
||||||
|
assert result is True
|
||||||
|
assert strategy_file.exists()
|
||||||
|
|
||||||
|
|
||||||
|
def test_validate_strategy_failure(optimizer: EvolutionOptimizer, tmp_path: Path) -> None:
|
||||||
|
"""Test strategy validation when tests fail."""
|
||||||
|
strategy_file = tmp_path / "test_strategy.py"
|
||||||
|
strategy_file.write_text("# Invalid strategy file")
|
||||||
|
|
||||||
|
with patch("subprocess.run") as mock_run:
|
||||||
|
mock_run.return_value = Mock(returncode=1, stdout="FAILED", stderr="")
|
||||||
|
result = optimizer.validate_strategy(strategy_file)
|
||||||
|
|
||||||
|
assert result is False
|
||||||
|
# File should be deleted on failure
|
||||||
|
assert not strategy_file.exists()
|
||||||
|
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
# ABTester Tests
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_calculate_performance_basic(ab_tester: ABTester) -> None:
|
||||||
|
"""Test basic performance calculation."""
|
||||||
|
trades = [
|
||||||
|
{"pnl": 1000.0},
|
||||||
|
{"pnl": -500.0},
|
||||||
|
{"pnl": 800.0},
|
||||||
|
{"pnl": 200.0},
|
||||||
|
]
|
||||||
|
|
||||||
|
perf = ab_tester.calculate_performance(trades, "TestStrategy")
|
||||||
|
|
||||||
|
assert perf.strategy_name == "TestStrategy"
|
||||||
|
assert perf.total_trades == 4
|
||||||
|
assert perf.wins == 3
|
||||||
|
assert perf.losses == 1
|
||||||
|
assert perf.total_pnl == 1500.0
|
||||||
|
assert perf.avg_pnl == 375.0
|
||||||
|
assert perf.win_rate == 75.0
|
||||||
|
assert perf.sharpe_ratio is not None
|
||||||
|
|
||||||
|
|
||||||
|
def test_calculate_performance_empty(ab_tester: ABTester) -> None:
|
||||||
|
"""Test performance calculation with no trades."""
|
||||||
|
perf = ab_tester.calculate_performance([], "EmptyStrategy")
|
||||||
|
|
||||||
|
assert perf.total_trades == 0
|
||||||
|
assert perf.wins == 0
|
||||||
|
assert perf.losses == 0
|
||||||
|
assert perf.total_pnl == 0.0
|
||||||
|
assert perf.avg_pnl == 0.0
|
||||||
|
assert perf.win_rate == 0.0
|
||||||
|
assert perf.sharpe_ratio is None
|
||||||
|
|
||||||
|
|
||||||
|
def test_compare_strategies_significant_difference(ab_tester: ABTester) -> None:
|
||||||
|
"""Test strategy comparison with significant performance difference."""
|
||||||
|
# Strategy A: consistently profitable
|
||||||
|
trades_a = [{"pnl": 1000.0} for _ in range(30)]
|
||||||
|
|
||||||
|
# Strategy B: consistently losing
|
||||||
|
trades_b = [{"pnl": -500.0} for _ in range(30)]
|
||||||
|
|
||||||
|
result = ab_tester.compare_strategies(trades_a, trades_b, "Strategy A", "Strategy B")
|
||||||
|
|
||||||
|
# scipy returns np.True_ instead of Python bool
|
||||||
|
assert bool(result.is_significant) is True
|
||||||
|
assert result.winner == "Strategy A"
|
||||||
|
assert result.p_value < 0.05
|
||||||
|
assert result.performance_a.avg_pnl > result.performance_b.avg_pnl
|
||||||
|
|
||||||
|
|
||||||
|
def test_compare_strategies_no_difference(ab_tester: ABTester) -> None:
|
||||||
|
"""Test strategy comparison with no significant difference."""
|
||||||
|
# Both strategies have similar performance
|
||||||
|
trades_a = [{"pnl": 100.0}, {"pnl": -50.0}, {"pnl": 80.0}]
|
||||||
|
trades_b = [{"pnl": 90.0}, {"pnl": -60.0}, {"pnl": 85.0}]
|
||||||
|
|
||||||
|
result = ab_tester.compare_strategies(trades_a, trades_b, "Strategy A", "Strategy B")
|
||||||
|
|
||||||
|
# With small samples and similar performance, likely not significant
|
||||||
|
assert result.winner is None or not result.is_significant
|
||||||
|
|
||||||
|
|
||||||
|
def test_should_deploy_meets_criteria(ab_tester: ABTester) -> None:
|
||||||
|
"""Test deployment decision when criteria are met."""
|
||||||
|
# Create a winning result that meets criteria
|
||||||
|
trades_a = [{"pnl": 1000.0} for _ in range(25)] # 100% win rate
|
||||||
|
trades_b = [{"pnl": -500.0} for _ in range(25)]
|
||||||
|
|
||||||
|
result = ab_tester.compare_strategies(trades_a, trades_b, "Winner", "Loser")
|
||||||
|
|
||||||
|
should_deploy = ab_tester.should_deploy(result, min_win_rate=60.0, min_trades=20)
|
||||||
|
|
||||||
|
assert should_deploy is True
|
||||||
|
|
||||||
|
|
||||||
|
def test_should_deploy_insufficient_trades(ab_tester: ABTester) -> None:
|
||||||
|
"""Test deployment decision with insufficient trades."""
|
||||||
|
trades_a = [{"pnl": 1000.0} for _ in range(10)] # Only 10 trades
|
||||||
|
trades_b = [{"pnl": -500.0} for _ in range(10)]
|
||||||
|
|
||||||
|
result = ab_tester.compare_strategies(trades_a, trades_b, "Winner", "Loser")
|
||||||
|
|
||||||
|
should_deploy = ab_tester.should_deploy(result, min_win_rate=60.0, min_trades=20)
|
||||||
|
|
||||||
|
assert should_deploy is False
|
||||||
|
|
||||||
|
|
||||||
|
def test_should_deploy_low_win_rate(ab_tester: ABTester) -> None:
|
||||||
|
"""Test deployment decision with low win rate."""
|
||||||
|
# Mix of wins and losses, below 60% win rate
|
||||||
|
trades_a = [{"pnl": 100.0}] * 10 + [{"pnl": -100.0}] * 15 # 40% win rate
|
||||||
|
trades_b = [{"pnl": -500.0} for _ in range(25)]
|
||||||
|
|
||||||
|
result = ab_tester.compare_strategies(trades_a, trades_b, "LowWinner", "Loser")
|
||||||
|
|
||||||
|
should_deploy = ab_tester.should_deploy(result, min_win_rate=60.0, min_trades=20)
|
||||||
|
|
||||||
|
assert should_deploy is False
|
||||||
|
|
||||||
|
|
||||||
|
def test_should_deploy_not_significant(ab_tester: ABTester) -> None:
|
||||||
|
"""Test deployment decision when difference is not significant."""
|
||||||
|
# Use more varied data to ensure statistical insignificance
|
||||||
|
trades_a = [{"pnl": 100.0}, {"pnl": -50.0}] * 12 + [{"pnl": 100.0}]
|
||||||
|
trades_b = [{"pnl": 95.0}, {"pnl": -45.0}] * 12 + [{"pnl": 95.0}]
|
||||||
|
|
||||||
|
result = ab_tester.compare_strategies(trades_a, trades_b, "A", "B")
|
||||||
|
|
||||||
|
should_deploy = ab_tester.should_deploy(result, min_win_rate=60.0, min_trades=20)
|
||||||
|
|
||||||
|
# Not significant or not profitable enough
|
||||||
|
# Even if significant, win rate is 50% which is below 60% threshold
|
||||||
|
assert should_deploy is False
|
||||||
|
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
# PerformanceTracker Tests
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_strategy_metrics(db_conn: sqlite3.Connection) -> None:
|
||||||
|
"""Test getting strategy metrics."""
|
||||||
|
# Add some trades
|
||||||
|
log_trade(db_conn, "005930", "BUY", 85, "Win 1", quantity=10, price=70000, pnl=1000.0)
|
||||||
|
log_trade(db_conn, "000660", "SELL", 90, "Loss 1", quantity=5, price=100000, pnl=-500.0)
|
||||||
|
log_trade(db_conn, "035420", "BUY", 80, "Win 2", quantity=8, price=50000, pnl=800.0)
|
||||||
|
log_trade(db_conn, "005930", "HOLD", 75, "Hold", quantity=0, price=70000, pnl=0.0)
|
||||||
|
|
||||||
|
tracker = PerformanceTracker(db_path=":memory:")
|
||||||
|
# Manually set connection for testing
|
||||||
|
tracker._db_path = db_conn
|
||||||
|
|
||||||
|
# Need to use the same connection
|
||||||
|
with patch("sqlite3.connect", return_value=db_conn):
|
||||||
|
metrics = tracker.get_strategy_metrics()
|
||||||
|
|
||||||
|
assert metrics.total_trades == 4
|
||||||
|
assert metrics.wins == 2
|
||||||
|
assert metrics.losses == 1
|
||||||
|
assert metrics.holds == 1
|
||||||
|
assert metrics.win_rate == 50.0
|
||||||
|
assert metrics.total_pnl == 1300.0
|
||||||
|
|
||||||
|
|
||||||
|
def test_calculate_improvement_trend_improving(performance_tracker: PerformanceTracker) -> None:
|
||||||
|
"""Test improvement trend calculation for improving strategy."""
|
||||||
|
metrics = [
|
||||||
|
StrategyMetrics(
|
||||||
|
strategy_name="test",
|
||||||
|
period_start="2024-01-01",
|
||||||
|
period_end="2024-01-07",
|
||||||
|
total_trades=10,
|
||||||
|
wins=5,
|
||||||
|
losses=5,
|
||||||
|
holds=0,
|
||||||
|
win_rate=50.0,
|
||||||
|
avg_pnl=100.0,
|
||||||
|
total_pnl=1000.0,
|
||||||
|
best_trade=500.0,
|
||||||
|
worst_trade=-300.0,
|
||||||
|
avg_confidence=75.0,
|
||||||
|
),
|
||||||
|
StrategyMetrics(
|
||||||
|
strategy_name="test",
|
||||||
|
period_start="2024-01-08",
|
||||||
|
period_end="2024-01-14",
|
||||||
|
total_trades=10,
|
||||||
|
wins=7,
|
||||||
|
losses=3,
|
||||||
|
holds=0,
|
||||||
|
win_rate=70.0,
|
||||||
|
avg_pnl=200.0,
|
||||||
|
total_pnl=2000.0,
|
||||||
|
best_trade=600.0,
|
||||||
|
worst_trade=-200.0,
|
||||||
|
avg_confidence=80.0,
|
||||||
|
),
|
||||||
|
]
|
||||||
|
|
||||||
|
trend = performance_tracker.calculate_improvement_trend(metrics)
|
||||||
|
|
||||||
|
assert trend["trend"] == "improving"
|
||||||
|
assert trend["win_rate_change"] == 20.0
|
||||||
|
assert trend["pnl_change"] == 100.0
|
||||||
|
assert trend["confidence_change"] == 5.0
|
||||||
|
|
||||||
|
|
||||||
|
def test_calculate_improvement_trend_declining(performance_tracker: PerformanceTracker) -> None:
|
||||||
|
"""Test improvement trend calculation for declining strategy."""
|
||||||
|
metrics = [
|
||||||
|
StrategyMetrics(
|
||||||
|
strategy_name="test",
|
||||||
|
period_start="2024-01-01",
|
||||||
|
period_end="2024-01-07",
|
||||||
|
total_trades=10,
|
||||||
|
wins=7,
|
||||||
|
losses=3,
|
||||||
|
holds=0,
|
||||||
|
win_rate=70.0,
|
||||||
|
avg_pnl=200.0,
|
||||||
|
total_pnl=2000.0,
|
||||||
|
best_trade=600.0,
|
||||||
|
worst_trade=-200.0,
|
||||||
|
avg_confidence=80.0,
|
||||||
|
),
|
||||||
|
StrategyMetrics(
|
||||||
|
strategy_name="test",
|
||||||
|
period_start="2024-01-08",
|
||||||
|
period_end="2024-01-14",
|
||||||
|
total_trades=10,
|
||||||
|
wins=4,
|
||||||
|
losses=6,
|
||||||
|
holds=0,
|
||||||
|
win_rate=40.0,
|
||||||
|
avg_pnl=-50.0,
|
||||||
|
total_pnl=-500.0,
|
||||||
|
best_trade=300.0,
|
||||||
|
worst_trade=-400.0,
|
||||||
|
avg_confidence=70.0,
|
||||||
|
),
|
||||||
|
]
|
||||||
|
|
||||||
|
trend = performance_tracker.calculate_improvement_trend(metrics)
|
||||||
|
|
||||||
|
assert trend["trend"] == "declining"
|
||||||
|
assert trend["win_rate_change"] == -30.0
|
||||||
|
assert trend["pnl_change"] == -250.0
|
||||||
|
|
||||||
|
|
||||||
|
def test_calculate_improvement_trend_insufficient_data(performance_tracker: PerformanceTracker) -> None:
|
||||||
|
"""Test improvement trend with insufficient data."""
|
||||||
|
metrics = [
|
||||||
|
StrategyMetrics(
|
||||||
|
strategy_name="test",
|
||||||
|
period_start="2024-01-01",
|
||||||
|
period_end="2024-01-07",
|
||||||
|
total_trades=10,
|
||||||
|
wins=5,
|
||||||
|
losses=5,
|
||||||
|
holds=0,
|
||||||
|
win_rate=50.0,
|
||||||
|
avg_pnl=100.0,
|
||||||
|
total_pnl=1000.0,
|
||||||
|
best_trade=500.0,
|
||||||
|
worst_trade=-300.0,
|
||||||
|
avg_confidence=75.0,
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
trend = performance_tracker.calculate_improvement_trend(metrics)
|
||||||
|
|
||||||
|
assert trend["trend"] == "insufficient_data"
|
||||||
|
assert trend["win_rate_change"] == 0.0
|
||||||
|
assert trend["pnl_change"] == 0.0
|
||||||
|
|
||||||
|
|
||||||
|
def test_export_dashboard_json(performance_tracker: PerformanceTracker) -> None:
|
||||||
|
"""Test exporting dashboard as JSON."""
|
||||||
|
overall_metrics = StrategyMetrics(
|
||||||
|
strategy_name="test",
|
||||||
|
period_start="2024-01-01",
|
||||||
|
period_end="2024-01-31",
|
||||||
|
total_trades=100,
|
||||||
|
wins=60,
|
||||||
|
losses=40,
|
||||||
|
holds=10,
|
||||||
|
win_rate=60.0,
|
||||||
|
avg_pnl=150.0,
|
||||||
|
total_pnl=15000.0,
|
||||||
|
best_trade=1000.0,
|
||||||
|
worst_trade=-500.0,
|
||||||
|
avg_confidence=80.0,
|
||||||
|
)
|
||||||
|
|
||||||
|
dashboard = PerformanceDashboard(
|
||||||
|
generated_at=datetime.now(UTC).isoformat(),
|
||||||
|
overall_metrics=overall_metrics,
|
||||||
|
daily_metrics=[],
|
||||||
|
weekly_metrics=[],
|
||||||
|
improvement_trend={"trend": "improving", "win_rate_change": 10.0},
|
||||||
|
)
|
||||||
|
|
||||||
|
json_output = performance_tracker.export_dashboard_json(dashboard)
|
||||||
|
|
||||||
|
# Verify it's valid JSON
|
||||||
|
data = json.loads(json_output)
|
||||||
|
assert "generated_at" in data
|
||||||
|
assert "overall_metrics" in data
|
||||||
|
assert data["overall_metrics"]["total_trades"] == 100
|
||||||
|
assert data["overall_metrics"]["win_rate"] == 60.0
|
||||||
|
|
||||||
|
|
||||||
|
def test_generate_dashboard() -> None:
|
||||||
|
"""Test generating a complete dashboard."""
|
||||||
|
# Create tracker with temp database
|
||||||
|
with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as tmp:
|
||||||
|
tmp_path = tmp.name
|
||||||
|
|
||||||
|
# Initialize with data
|
||||||
|
conn = init_db(tmp_path)
|
||||||
|
log_trade(conn, "005930", "BUY", 85, "Win", quantity=10, price=70000, pnl=1000.0)
|
||||||
|
log_trade(conn, "000660", "SELL", 90, "Loss", quantity=5, price=100000, pnl=-500.0)
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
tracker = PerformanceTracker(db_path=tmp_path)
|
||||||
|
dashboard = tracker.generate_dashboard()
|
||||||
|
|
||||||
|
assert isinstance(dashboard, PerformanceDashboard)
|
||||||
|
assert dashboard.overall_metrics.total_trades == 2
|
||||||
|
assert len(dashboard.daily_metrics) == 7
|
||||||
|
assert len(dashboard.weekly_metrics) == 4
|
||||||
|
assert "trend" in dashboard.improvement_trend
|
||||||
|
|
||||||
|
# Clean up
|
||||||
|
Path(tmp_path).unlink()
|
||||||
|
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
# Integration Tests
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_full_evolution_pipeline(optimizer: EvolutionOptimizer, tmp_path: Path) -> None:
|
||||||
|
"""Test the complete evolution pipeline."""
|
||||||
|
# Add losing decisions
|
||||||
|
logger = optimizer._decision_logger
|
||||||
|
id1 = logger.log_decision(
|
||||||
|
stock_code="005930",
|
||||||
|
market="KR",
|
||||||
|
exchange_code="KRX",
|
||||||
|
action="BUY",
|
||||||
|
confidence=85,
|
||||||
|
rationale="Expected growth",
|
||||||
|
context_snapshot={},
|
||||||
|
input_data={},
|
||||||
|
)
|
||||||
|
logger.update_outcome(id1, pnl=-2000.0, accuracy=0)
|
||||||
|
|
||||||
|
# Mock Gemini and subprocess
|
||||||
|
mock_response = Mock()
|
||||||
|
mock_response.text = 'return {"action": "HOLD", "confidence": 50, "rationale": "Test"}'
|
||||||
|
|
||||||
|
with patch.object(optimizer._client.aio.models, "generate_content", new=AsyncMock(return_value=mock_response)):
|
||||||
|
with patch("src.evolution.optimizer.STRATEGIES_DIR", tmp_path):
|
||||||
|
with patch("subprocess.run") as mock_run:
|
||||||
|
mock_run.return_value = Mock(returncode=0, stdout="", stderr="")
|
||||||
|
|
||||||
|
result = await optimizer.evolve()
|
||||||
|
|
||||||
|
assert result is not None
|
||||||
|
assert "title" in result
|
||||||
|
assert "branch" in result
|
||||||
|
assert "status" in result
|
||||||
558
tests/test_latency_control.py
Normal file
558
tests/test_latency_control.py
Normal file
@@ -0,0 +1,558 @@
|
|||||||
|
"""Tests for latency control system (criticality assessment and priority queue)."""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from src.core.criticality import CriticalityAssessor, CriticalityLevel
|
||||||
|
from src.core.priority_queue import PriorityTask, PriorityTaskQueue
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# CriticalityAssessor Tests
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
class TestCriticalityAssessor:
|
||||||
|
"""Test suite for criticality assessment logic."""
|
||||||
|
|
||||||
|
def test_market_closed_returns_low(self) -> None:
|
||||||
|
"""Market closed should return LOW priority."""
|
||||||
|
assessor = CriticalityAssessor()
|
||||||
|
level = assessor.assess_market_conditions(
|
||||||
|
pnl_pct=0.0,
|
||||||
|
volatility_score=50.0,
|
||||||
|
volume_surge=1.0,
|
||||||
|
is_market_open=False,
|
||||||
|
)
|
||||||
|
assert level == CriticalityLevel.LOW
|
||||||
|
|
||||||
|
def test_very_low_volatility_returns_low(self) -> None:
|
||||||
|
"""Very low volatility should return LOW priority."""
|
||||||
|
assessor = CriticalityAssessor()
|
||||||
|
level = assessor.assess_market_conditions(
|
||||||
|
pnl_pct=0.0,
|
||||||
|
volatility_score=20.0, # Below 30.0 threshold
|
||||||
|
volume_surge=1.0,
|
||||||
|
is_market_open=True,
|
||||||
|
)
|
||||||
|
assert level == CriticalityLevel.LOW
|
||||||
|
|
||||||
|
def test_critical_pnl_threshold_triggered(self) -> None:
|
||||||
|
"""P&L below -2.5% should trigger CRITICAL."""
|
||||||
|
assessor = CriticalityAssessor()
|
||||||
|
level = assessor.assess_market_conditions(
|
||||||
|
pnl_pct=-2.6, # Below -2.5% threshold
|
||||||
|
volatility_score=50.0,
|
||||||
|
volume_surge=1.0,
|
||||||
|
is_market_open=True,
|
||||||
|
)
|
||||||
|
assert level == CriticalityLevel.CRITICAL
|
||||||
|
|
||||||
|
def test_critical_pnl_at_circuit_breaker_proximity(self) -> None:
|
||||||
|
"""P&L at exactly -2.5% (near -3.0% breaker) should be CRITICAL."""
|
||||||
|
assessor = CriticalityAssessor()
|
||||||
|
level = assessor.assess_market_conditions(
|
||||||
|
pnl_pct=-2.5,
|
||||||
|
volatility_score=50.0,
|
||||||
|
volume_surge=1.0,
|
||||||
|
is_market_open=True,
|
||||||
|
)
|
||||||
|
assert level == CriticalityLevel.CRITICAL
|
||||||
|
|
||||||
|
def test_critical_price_change_positive(self) -> None:
|
||||||
|
"""Large positive price change (>5%) should trigger CRITICAL."""
|
||||||
|
assessor = CriticalityAssessor()
|
||||||
|
level = assessor.assess_market_conditions(
|
||||||
|
pnl_pct=0.0,
|
||||||
|
volatility_score=50.0,
|
||||||
|
volume_surge=1.0,
|
||||||
|
price_change_1m=5.5, # Above 5.0% threshold
|
||||||
|
is_market_open=True,
|
||||||
|
)
|
||||||
|
assert level == CriticalityLevel.CRITICAL
|
||||||
|
|
||||||
|
def test_critical_price_change_negative(self) -> None:
|
||||||
|
"""Large negative price change (<-5%) should trigger CRITICAL."""
|
||||||
|
assessor = CriticalityAssessor()
|
||||||
|
level = assessor.assess_market_conditions(
|
||||||
|
pnl_pct=0.0,
|
||||||
|
volatility_score=50.0,
|
||||||
|
volume_surge=1.0,
|
||||||
|
price_change_1m=-6.0, # Below -5.0% threshold
|
||||||
|
is_market_open=True,
|
||||||
|
)
|
||||||
|
assert level == CriticalityLevel.CRITICAL
|
||||||
|
|
||||||
|
def test_critical_volume_surge(self) -> None:
|
||||||
|
"""Extreme volume surge (>10x) should trigger CRITICAL."""
|
||||||
|
assessor = CriticalityAssessor()
|
||||||
|
level = assessor.assess_market_conditions(
|
||||||
|
pnl_pct=0.0,
|
||||||
|
volatility_score=50.0,
|
||||||
|
volume_surge=12.0, # Above 10.0x threshold
|
||||||
|
is_market_open=True,
|
||||||
|
)
|
||||||
|
assert level == CriticalityLevel.CRITICAL
|
||||||
|
|
||||||
|
def test_high_volatility_returns_high(self) -> None:
|
||||||
|
"""High volatility score should return HIGH priority."""
|
||||||
|
assessor = CriticalityAssessor()
|
||||||
|
level = assessor.assess_market_conditions(
|
||||||
|
pnl_pct=0.0,
|
||||||
|
volatility_score=75.0, # Above 70.0 threshold
|
||||||
|
volume_surge=1.0,
|
||||||
|
is_market_open=True,
|
||||||
|
)
|
||||||
|
assert level == CriticalityLevel.HIGH
|
||||||
|
|
||||||
|
def test_normal_conditions_return_normal(self) -> None:
|
||||||
|
"""Normal market conditions should return NORMAL priority."""
|
||||||
|
assessor = CriticalityAssessor()
|
||||||
|
level = assessor.assess_market_conditions(
|
||||||
|
pnl_pct=0.5,
|
||||||
|
volatility_score=50.0, # Between 30-70
|
||||||
|
volume_surge=1.5,
|
||||||
|
price_change_1m=1.0,
|
||||||
|
is_market_open=True,
|
||||||
|
)
|
||||||
|
assert level == CriticalityLevel.NORMAL
|
||||||
|
|
||||||
|
def test_custom_thresholds(self) -> None:
|
||||||
|
"""Custom thresholds should be respected."""
|
||||||
|
assessor = CriticalityAssessor(
|
||||||
|
critical_pnl_threshold=-1.0,
|
||||||
|
critical_price_change_threshold=3.0,
|
||||||
|
critical_volume_surge_threshold=5.0,
|
||||||
|
high_volatility_threshold=60.0,
|
||||||
|
low_volatility_threshold=20.0,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Test custom P&L threshold
|
||||||
|
level = assessor.assess_market_conditions(
|
||||||
|
pnl_pct=-1.1,
|
||||||
|
volatility_score=50.0,
|
||||||
|
volume_surge=1.0,
|
||||||
|
is_market_open=True,
|
||||||
|
)
|
||||||
|
assert level == CriticalityLevel.CRITICAL
|
||||||
|
|
||||||
|
# Test custom price change threshold
|
||||||
|
level = assessor.assess_market_conditions(
|
||||||
|
pnl_pct=0.0,
|
||||||
|
volatility_score=50.0,
|
||||||
|
volume_surge=1.0,
|
||||||
|
price_change_1m=3.5,
|
||||||
|
is_market_open=True,
|
||||||
|
)
|
||||||
|
assert level == CriticalityLevel.CRITICAL
|
||||||
|
|
||||||
|
def test_get_timeout_returns_correct_values(self) -> None:
|
||||||
|
"""Timeout values should match specification."""
|
||||||
|
assessor = CriticalityAssessor()
|
||||||
|
|
||||||
|
assert assessor.get_timeout(CriticalityLevel.CRITICAL) == 5.0
|
||||||
|
assert assessor.get_timeout(CriticalityLevel.HIGH) == 30.0
|
||||||
|
assert assessor.get_timeout(CriticalityLevel.NORMAL) == 60.0
|
||||||
|
assert assessor.get_timeout(CriticalityLevel.LOW) is None
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# PriorityTaskQueue Tests
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
class TestPriorityTaskQueue:
|
||||||
|
"""Test suite for priority queue implementation."""
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_enqueue_task(self) -> None:
|
||||||
|
"""Tasks should be enqueued successfully."""
|
||||||
|
queue = PriorityTaskQueue()
|
||||||
|
|
||||||
|
success = await queue.enqueue(
|
||||||
|
task_id="test-1",
|
||||||
|
criticality=CriticalityLevel.NORMAL,
|
||||||
|
task_data={"action": "test"},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert success is True
|
||||||
|
assert await queue.size() == 1
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_enqueue_rejects_when_full(self) -> None:
|
||||||
|
"""Queue should reject tasks when full."""
|
||||||
|
queue = PriorityTaskQueue(max_size=2)
|
||||||
|
|
||||||
|
# Fill the queue
|
||||||
|
await queue.enqueue("task-1", CriticalityLevel.NORMAL, {})
|
||||||
|
await queue.enqueue("task-2", CriticalityLevel.NORMAL, {})
|
||||||
|
|
||||||
|
# Third task should be rejected
|
||||||
|
success = await queue.enqueue("task-3", CriticalityLevel.NORMAL, {})
|
||||||
|
assert success is False
|
||||||
|
assert await queue.size() == 2
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_dequeue_returns_highest_priority(self) -> None:
|
||||||
|
"""Dequeue should return highest priority task first."""
|
||||||
|
queue = PriorityTaskQueue()
|
||||||
|
|
||||||
|
# Enqueue tasks in reverse priority order
|
||||||
|
await queue.enqueue("low", CriticalityLevel.LOW, {"priority": 3})
|
||||||
|
await queue.enqueue("normal", CriticalityLevel.NORMAL, {"priority": 2})
|
||||||
|
await queue.enqueue("high", CriticalityLevel.HIGH, {"priority": 1})
|
||||||
|
await queue.enqueue("critical", CriticalityLevel.CRITICAL, {"priority": 0})
|
||||||
|
|
||||||
|
# Dequeue should return CRITICAL first
|
||||||
|
task = await queue.dequeue(timeout=1.0)
|
||||||
|
assert task is not None
|
||||||
|
assert task.task_id == "critical"
|
||||||
|
assert task.priority == 0
|
||||||
|
|
||||||
|
# Then HIGH
|
||||||
|
task = await queue.dequeue(timeout=1.0)
|
||||||
|
assert task is not None
|
||||||
|
assert task.task_id == "high"
|
||||||
|
assert task.priority == 1
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_dequeue_fifo_within_same_priority(self) -> None:
|
||||||
|
"""Tasks with same priority should be FIFO."""
|
||||||
|
queue = PriorityTaskQueue()
|
||||||
|
|
||||||
|
# Enqueue multiple tasks with same priority
|
||||||
|
await queue.enqueue("task-1", CriticalityLevel.NORMAL, {})
|
||||||
|
await asyncio.sleep(0.01) # Small delay to ensure different timestamps
|
||||||
|
await queue.enqueue("task-2", CriticalityLevel.NORMAL, {})
|
||||||
|
await asyncio.sleep(0.01)
|
||||||
|
await queue.enqueue("task-3", CriticalityLevel.NORMAL, {})
|
||||||
|
|
||||||
|
# Should dequeue in FIFO order
|
||||||
|
task1 = await queue.dequeue(timeout=1.0)
|
||||||
|
task2 = await queue.dequeue(timeout=1.0)
|
||||||
|
task3 = await queue.dequeue(timeout=1.0)
|
||||||
|
|
||||||
|
assert task1 is not None and task1.task_id == "task-1"
|
||||||
|
assert task2 is not None and task2.task_id == "task-2"
|
||||||
|
assert task3 is not None and task3.task_id == "task-3"
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_dequeue_returns_none_when_empty(self) -> None:
|
||||||
|
"""Dequeue should return None when queue is empty after timeout."""
|
||||||
|
queue = PriorityTaskQueue()
|
||||||
|
|
||||||
|
task = await queue.dequeue(timeout=0.1)
|
||||||
|
assert task is None
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_execute_with_timeout_success(self) -> None:
|
||||||
|
"""Task execution should succeed within timeout."""
|
||||||
|
queue = PriorityTaskQueue()
|
||||||
|
|
||||||
|
# Create a simple async callback
|
||||||
|
async def test_callback() -> str:
|
||||||
|
await asyncio.sleep(0.01)
|
||||||
|
return "success"
|
||||||
|
|
||||||
|
task = PriorityTask(
|
||||||
|
priority=0,
|
||||||
|
timestamp=0.0,
|
||||||
|
task_id="test",
|
||||||
|
task_data={},
|
||||||
|
callback=test_callback,
|
||||||
|
)
|
||||||
|
|
||||||
|
result = await queue.execute_with_timeout(task, timeout=1.0)
|
||||||
|
assert result == "success"
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_execute_with_timeout_raises_timeout_error(self) -> None:
|
||||||
|
"""Task execution should raise TimeoutError if exceeds timeout."""
|
||||||
|
queue = PriorityTaskQueue()
|
||||||
|
|
||||||
|
# Create a slow async callback
|
||||||
|
async def slow_callback() -> str:
|
||||||
|
await asyncio.sleep(1.0)
|
||||||
|
return "too slow"
|
||||||
|
|
||||||
|
task = PriorityTask(
|
||||||
|
priority=0,
|
||||||
|
timestamp=0.0,
|
||||||
|
task_id="test",
|
||||||
|
task_data={},
|
||||||
|
callback=slow_callback,
|
||||||
|
)
|
||||||
|
|
||||||
|
with pytest.raises(asyncio.TimeoutError):
|
||||||
|
await queue.execute_with_timeout(task, timeout=0.1)
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_execute_with_timeout_propagates_exceptions(self) -> None:
|
||||||
|
"""Task execution should propagate exceptions from callback."""
|
||||||
|
queue = PriorityTaskQueue()
|
||||||
|
|
||||||
|
# Create a failing async callback
|
||||||
|
async def failing_callback() -> None:
|
||||||
|
raise ValueError("Test error")
|
||||||
|
|
||||||
|
task = PriorityTask(
|
||||||
|
priority=0,
|
||||||
|
timestamp=0.0,
|
||||||
|
task_id="test",
|
||||||
|
task_data={},
|
||||||
|
callback=failing_callback,
|
||||||
|
)
|
||||||
|
|
||||||
|
with pytest.raises(ValueError, match="Test error"):
|
||||||
|
await queue.execute_with_timeout(task, timeout=1.0)
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_execute_without_timeout(self) -> None:
|
||||||
|
"""Task execution should work without timeout (LOW priority)."""
|
||||||
|
queue = PriorityTaskQueue()
|
||||||
|
|
||||||
|
async def test_callback() -> str:
|
||||||
|
await asyncio.sleep(0.01)
|
||||||
|
return "success"
|
||||||
|
|
||||||
|
task = PriorityTask(
|
||||||
|
priority=3,
|
||||||
|
timestamp=0.0,
|
||||||
|
task_id="test",
|
||||||
|
task_data={},
|
||||||
|
callback=test_callback,
|
||||||
|
)
|
||||||
|
|
||||||
|
result = await queue.execute_with_timeout(task, timeout=None)
|
||||||
|
assert result == "success"
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_get_metrics(self) -> None:
|
||||||
|
"""Queue should track metrics correctly."""
|
||||||
|
queue = PriorityTaskQueue()
|
||||||
|
|
||||||
|
# Enqueue and dequeue some tasks
|
||||||
|
await queue.enqueue("task-1", CriticalityLevel.CRITICAL, {})
|
||||||
|
await queue.enqueue("task-2", CriticalityLevel.HIGH, {})
|
||||||
|
await queue.enqueue("task-3", CriticalityLevel.NORMAL, {})
|
||||||
|
|
||||||
|
await queue.dequeue(timeout=1.0)
|
||||||
|
await queue.dequeue(timeout=1.0)
|
||||||
|
|
||||||
|
metrics = await queue.get_metrics()
|
||||||
|
|
||||||
|
assert metrics.total_enqueued == 3
|
||||||
|
assert metrics.total_dequeued == 2
|
||||||
|
assert metrics.current_size == 1
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_wait_time_metrics(self) -> None:
|
||||||
|
"""Queue should track wait times per criticality level."""
|
||||||
|
queue = PriorityTaskQueue()
|
||||||
|
|
||||||
|
# Enqueue tasks with different criticality
|
||||||
|
await queue.enqueue("critical-1", CriticalityLevel.CRITICAL, {})
|
||||||
|
await asyncio.sleep(0.05) # Add some wait time
|
||||||
|
|
||||||
|
await queue.dequeue(timeout=1.0)
|
||||||
|
|
||||||
|
metrics = await queue.get_metrics()
|
||||||
|
|
||||||
|
# Should have wait time metrics for CRITICAL
|
||||||
|
assert CriticalityLevel.CRITICAL in metrics.avg_wait_time
|
||||||
|
assert metrics.avg_wait_time[CriticalityLevel.CRITICAL] > 0.0
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_clear_queue(self) -> None:
|
||||||
|
"""Clear should remove all tasks from queue."""
|
||||||
|
queue = PriorityTaskQueue()
|
||||||
|
|
||||||
|
await queue.enqueue("task-1", CriticalityLevel.NORMAL, {})
|
||||||
|
await queue.enqueue("task-2", CriticalityLevel.NORMAL, {})
|
||||||
|
await queue.enqueue("task-3", CriticalityLevel.NORMAL, {})
|
||||||
|
|
||||||
|
cleared = await queue.clear()
|
||||||
|
|
||||||
|
assert cleared == 3
|
||||||
|
assert await queue.size() == 0
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_concurrent_enqueue_dequeue(self) -> None:
|
||||||
|
"""Queue should handle concurrent operations safely."""
|
||||||
|
queue = PriorityTaskQueue()
|
||||||
|
|
||||||
|
# Concurrent enqueue operations
|
||||||
|
async def enqueue_tasks() -> None:
|
||||||
|
for i in range(10):
|
||||||
|
await queue.enqueue(
|
||||||
|
f"task-{i}",
|
||||||
|
CriticalityLevel.NORMAL,
|
||||||
|
{"index": i},
|
||||||
|
)
|
||||||
|
|
||||||
|
# Concurrent dequeue operations
|
||||||
|
async def dequeue_tasks() -> list[str]:
|
||||||
|
tasks = []
|
||||||
|
for _ in range(10):
|
||||||
|
task = await queue.dequeue(timeout=1.0)
|
||||||
|
if task:
|
||||||
|
tasks.append(task.task_id)
|
||||||
|
await asyncio.sleep(0.01)
|
||||||
|
return tasks
|
||||||
|
|
||||||
|
# Run both concurrently
|
||||||
|
enqueue_task = asyncio.create_task(enqueue_tasks())
|
||||||
|
dequeue_task = asyncio.create_task(dequeue_tasks())
|
||||||
|
|
||||||
|
await enqueue_task
|
||||||
|
dequeued_ids = await dequeue_task
|
||||||
|
|
||||||
|
# All tasks should be processed
|
||||||
|
assert len(dequeued_ids) == 10
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_timeout_metric_tracking(self) -> None:
|
||||||
|
"""Queue should track timeout occurrences."""
|
||||||
|
queue = PriorityTaskQueue()
|
||||||
|
|
||||||
|
async def slow_callback() -> str:
|
||||||
|
await asyncio.sleep(1.0)
|
||||||
|
return "too slow"
|
||||||
|
|
||||||
|
task = PriorityTask(
|
||||||
|
priority=0,
|
||||||
|
timestamp=0.0,
|
||||||
|
task_id="test",
|
||||||
|
task_data={},
|
||||||
|
callback=slow_callback,
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
await queue.execute_with_timeout(task, timeout=0.1)
|
||||||
|
except TimeoutError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
metrics = await queue.get_metrics()
|
||||||
|
assert metrics.total_timeouts == 1
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_error_metric_tracking(self) -> None:
|
||||||
|
"""Queue should track execution errors."""
|
||||||
|
queue = PriorityTaskQueue()
|
||||||
|
|
||||||
|
async def failing_callback() -> None:
|
||||||
|
raise ValueError("Test error")
|
||||||
|
|
||||||
|
task = PriorityTask(
|
||||||
|
priority=0,
|
||||||
|
timestamp=0.0,
|
||||||
|
task_id="test",
|
||||||
|
task_data={},
|
||||||
|
callback=failing_callback,
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
await queue.execute_with_timeout(task, timeout=1.0)
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
metrics = await queue.get_metrics()
|
||||||
|
assert metrics.total_errors == 1
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Integration Tests
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
class TestLatencyControlIntegration:
|
||||||
|
"""Integration tests for criticality assessment and priority queue."""
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_critical_task_bypass_queue(self) -> None:
|
||||||
|
"""CRITICAL tasks should bypass lower priority tasks."""
|
||||||
|
queue = PriorityTaskQueue()
|
||||||
|
|
||||||
|
# Add normal priority tasks
|
||||||
|
await queue.enqueue("normal-1", CriticalityLevel.NORMAL, {})
|
||||||
|
await queue.enqueue("normal-2", CriticalityLevel.NORMAL, {})
|
||||||
|
|
||||||
|
# Add critical task (should jump to front)
|
||||||
|
await queue.enqueue("critical", CriticalityLevel.CRITICAL, {})
|
||||||
|
|
||||||
|
# Dequeue should return critical first
|
||||||
|
task = await queue.dequeue(timeout=1.0)
|
||||||
|
assert task is not None
|
||||||
|
assert task.task_id == "critical"
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_timeout_enforcement_by_criticality(self) -> None:
|
||||||
|
"""Timeout enforcement should match criticality level."""
|
||||||
|
assessor = CriticalityAssessor()
|
||||||
|
|
||||||
|
# CRITICAL should have 5s timeout
|
||||||
|
critical_timeout = assessor.get_timeout(CriticalityLevel.CRITICAL)
|
||||||
|
assert critical_timeout == 5.0
|
||||||
|
|
||||||
|
# HIGH should have 30s timeout
|
||||||
|
high_timeout = assessor.get_timeout(CriticalityLevel.HIGH)
|
||||||
|
assert high_timeout == 30.0
|
||||||
|
|
||||||
|
# NORMAL should have 60s timeout
|
||||||
|
normal_timeout = assessor.get_timeout(CriticalityLevel.NORMAL)
|
||||||
|
assert normal_timeout == 60.0
|
||||||
|
|
||||||
|
# LOW should have no timeout
|
||||||
|
low_timeout = assessor.get_timeout(CriticalityLevel.LOW)
|
||||||
|
assert low_timeout is None
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_fast_path_execution_for_critical(self) -> None:
|
||||||
|
"""CRITICAL tasks should complete quickly."""
|
||||||
|
queue = PriorityTaskQueue()
|
||||||
|
|
||||||
|
# Create a fast callback simulating fast-path execution
|
||||||
|
async def fast_path_callback() -> str:
|
||||||
|
# Simulate simplified decision flow
|
||||||
|
await asyncio.sleep(0.01) # Very fast execution
|
||||||
|
return "fast_path_complete"
|
||||||
|
|
||||||
|
task = PriorityTask(
|
||||||
|
priority=0, # CRITICAL
|
||||||
|
timestamp=0.0,
|
||||||
|
task_id="critical-fast",
|
||||||
|
task_data={},
|
||||||
|
callback=fast_path_callback,
|
||||||
|
)
|
||||||
|
|
||||||
|
import time
|
||||||
|
|
||||||
|
start = time.time()
|
||||||
|
result = await queue.execute_with_timeout(task, timeout=5.0)
|
||||||
|
elapsed = time.time() - start
|
||||||
|
|
||||||
|
assert result == "fast_path_complete"
|
||||||
|
assert elapsed < 5.0 # Should complete well under CRITICAL timeout
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_graceful_degradation_when_queue_full(self) -> None:
|
||||||
|
"""System should gracefully handle full queue."""
|
||||||
|
queue = PriorityTaskQueue(max_size=2)
|
||||||
|
|
||||||
|
# Fill the queue
|
||||||
|
await queue.enqueue("task-1", CriticalityLevel.NORMAL, {})
|
||||||
|
await queue.enqueue("task-2", CriticalityLevel.NORMAL, {})
|
||||||
|
|
||||||
|
# Try to add more tasks
|
||||||
|
success = await queue.enqueue("task-3", CriticalityLevel.NORMAL, {})
|
||||||
|
assert success is False
|
||||||
|
|
||||||
|
# Queue should still function
|
||||||
|
task = await queue.dequeue(timeout=1.0)
|
||||||
|
assert task is not None
|
||||||
|
|
||||||
|
# Now we can add another task
|
||||||
|
success = await queue.enqueue("task-4", CriticalityLevel.NORMAL, {})
|
||||||
|
assert success is True
|
||||||
Reference in New Issue
Block a user