Spaces:
Running
Running
Update LLM.py
Browse files
LLM.py
CHANGED
|
@@ -1,615 +1,461 @@
|
|
| 1 |
-
# LLM.py (
|
| 2 |
-
import os, traceback,
|
| 3 |
-
import
|
| 4 |
from datetime import datetime
|
| 5 |
-
from
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
from
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
except Exception as e: return f"Error formatting chart data: {str(e)}"
|
| 30 |
-
def _format_raw_candle_data(self, candles, timeframe):
|
| 31 |
-
try:
|
| 32 |
-
if len(candles) < 10: return f"Only {len(candles)} candles available - insufficient for deep pattern analysis"
|
| 33 |
-
analysis_candles = candles[-50:] if len(candles) > 50 else candles
|
| 34 |
-
summary = []; summary.append(f"Total candles: {len(candles)} (showing last {len(analysis_candles)})"); summary.append("Recent candles (newest to oldest):")
|
| 35 |
-
for i in range(min(15, len(analysis_candles))):
|
| 36 |
-
idx = len(analysis_candles) - 1 - i; candle = analysis_candles[idx]
|
| 37 |
-
try: timestamp = datetime.fromtimestamp(candle[0] / 1000).strftime('%Y-%m-%d %H:%M:%S')
|
| 38 |
-
except: timestamp = "unknown"
|
| 39 |
-
open_price, high, low, close, volume = candle[1], candle[2], candle[3], candle[4], candle[5]
|
| 40 |
-
candle_type = "🟢 BULLISH" if close > open_price else "🔴 BEARISH" if close < open_price else "⚪ NEUTRAL"
|
| 41 |
-
body_size = abs(close - open_price); body_percent = (body_size / open_price * 100) if open_price > 0 else 0
|
| 42 |
-
wick_upper = high - max(open_price, close); wick_lower = min(open_price, close) - low; total_range = high - low
|
| 43 |
-
if total_range > 0: body_ratio = (body_size / total_range) * 100; upper_wick_ratio = (wick_upper / total_range) * 100; lower_wick_ratio = (wick_lower / total_range) * 100
|
| 44 |
-
else: body_ratio = upper_wick_ratio = lower_wick_ratio = 0
|
| 45 |
-
summary.append(f"{i+1:2d}. {timestamp} | {candle_type}"); summary.append(f" O:{open_price:.8f} H:{high:.8f} L:{low:.8f} C:{close:.8f}"); summary.append(f" Body: {body_percent:.2f}% | Body/Range: {body_ratio:.1f}%"); summary.append(f" Wicks: Upper {upper_wick_ratio:.1f}% / Lower {lower_wick_ratio:.1f}%"); summary.append(f" Volume: {volume:,.0f}")
|
| 46 |
-
if len(analysis_candles) >= 20:
|
| 47 |
-
stats = self._calculate_candle_statistics(analysis_candles)
|
| 48 |
-
summary.append(f"\n📊 STATISTICAL ANALYSIS:"); summary.append(f"• Price Change: {stats['price_change']:+.2f}%"); summary.append(f"• Average Body Size: {stats['avg_body']:.4f}%"); summary.append(f"• Volatility (ATR): {stats['atr']:.6f}"); summary.append(f"• Trend: {stats['trend']}"); summary.append(f"• Support: {stats['support']:.6f}"); summary.append(f"• Resistance: {stats['resistance']:.6f}")
|
| 49 |
-
return "\n".join(summary)
|
| 50 |
-
except Exception as e: return f"Error formatting raw candle data: {str(e)}"
|
| 51 |
-
def _calculate_candle_statistics(self, candles):
|
| 52 |
-
try:
|
| 53 |
-
closes = [c[4] for c in candles]; opens = [c[1] for c in candles]; highs = [c[2] for c in candles]; lows = [c[3] for c in candles]
|
| 54 |
-
first_close = closes[0]; last_close = closes[-1]; price_change = ((last_close - first_close) / first_close) * 100
|
| 55 |
-
body_sizes = [abs(close - open) for open, close in zip(opens, closes)]; avg_body = (sum(body_sizes) / len(body_sizes)) / first_close * 100 if first_close > 0 else 0
|
| 56 |
-
true_ranges = [];
|
| 57 |
-
for i in range(1, len(candles)): high, low, prev_close = highs[i], lows[i], closes[i-1]; tr1 = high - low; tr2 = abs(high - prev_close); tr3 = abs(low - prev_close); true_ranges.append(max(tr1, tr2, tr3))
|
| 58 |
-
atr = sum(true_ranges) / len(true_ranges) if true_ranges else 0
|
| 59 |
-
if price_change > 3: trend = "STRONG UPTREND"
|
| 60 |
-
elif price_change > 1: trend = "UPTREND"
|
| 61 |
-
elif price_change < -3: trend = "STRONG DOWNTREND"
|
| 62 |
-
elif price_change < -1: trend = "DOWNTREND"
|
| 63 |
-
else: trend = "SIDEWAYS"
|
| 64 |
-
support = min(lows); resistance = max(highs)
|
| 65 |
-
return {'price_change': price_change, 'avg_body': avg_body, 'atr': atr, 'trend': trend, 'support': support, 'resistance': resistance}
|
| 66 |
-
except Exception as e: return {'price_change': 0, 'avg_body': 0, 'atr': 0, 'trend': 'UNKNOWN', 'support': 0, 'resistance': 0}
|
| 67 |
-
async def analyze_chart_patterns(self, symbol, ohlcv_data): pass
|
| 68 |
-
def _parse_pattern_response(self, response_text): pass
|
| 69 |
-
|
| 70 |
|
| 71 |
class LLMService:
|
| 72 |
-
def __init__(self
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
self.
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
self.
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 91 |
try:
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 102 |
|
| 103 |
-
|
| 104 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 105 |
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 110 |
)
|
| 111 |
-
|
| 112 |
-
prompt = self._create_comprehensive_sentry_prompt(
|
| 113 |
-
data_payload, news_text, None, whale_data,
|
| 114 |
-
statistical_feedback, active_context_playbook
|
| 115 |
-
)
|
| 116 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 117 |
if self.r2_service:
|
| 118 |
-
|
| 119 |
-
await self.r2_service.save_llm_prompts_async(
|
| 120 |
-
symbol, 'sentry_watchlist_decision_v5', prompt, analysis_data
|
| 121 |
-
)
|
| 122 |
|
| 123 |
-
|
| 124 |
-
|
| 125 |
|
| 126 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 127 |
|
| 128 |
-
|
| 129 |
-
if decision_dict.get('action') == 'WATCH' and 'strategy_to_watch' not in decision_dict:
|
| 130 |
-
print(f" ⚠️ LLM {symbol}: Action is WATCH but strategy_to_watch is missing. Forcing HOLD.")
|
| 131 |
-
decision_dict['action'] = 'HOLD'
|
| 132 |
-
|
| 133 |
-
decision_dict['model_source'] = self.model_name
|
| 134 |
-
decision_dict['whale_data_integrated'] = whale_data.get('data_available', False)
|
| 135 |
-
return decision_dict
|
| 136 |
-
else:
|
| 137 |
-
print(f"❌ LLM parsing failed for {symbol} - no fallback decisions")
|
| 138 |
-
return None
|
| 139 |
|
| 140 |
except Exception as e:
|
| 141 |
-
print(f"❌
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 145 |
try:
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
return None
|
| 155 |
-
|
| 156 |
-
if fallback_strategy == "reflection" or fallback_strategy == "distillation":
|
| 157 |
-
return decision_data
|
| 158 |
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
missing = [f for f in required_fields if f not in decision_data]
|
| 169 |
-
print(f" MIA: {missing}")
|
| 170 |
-
return None
|
| 171 |
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
decision_data['strategy_to_watch'] = decision_data.get('strategy', fallback_strategy)
|
| 179 |
-
else:
|
| 180 |
-
print(f"⚠️ LLM suggested unsupported action ({action}) for {symbol}. Forcing HOLD.")
|
| 181 |
-
decision_data['action'] = 'HOLD'
|
| 182 |
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
decision_data['trade_type'] = None
|
| 187 |
|
| 188 |
-
#
|
| 189 |
-
|
| 190 |
-
if not strategy_value or strategy_value == 'unknown':
|
| 191 |
-
decision_data['strategy'] = fallback_strategy
|
| 192 |
-
if decision_data.get('action') == 'WATCH':
|
| 193 |
-
decision_data['strategy_to_watch'] = fallback_strategy
|
| 194 |
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 199 |
|
| 200 |
-
async def _get_pattern_analysis(self, data_payload):
|
| 201 |
-
try:
|
| 202 |
-
symbol = data_payload['symbol']
|
| 203 |
-
ohlcv_data = data_payload.get('raw_ohlcv') or data_payload.get('ohlcv')
|
| 204 |
-
if ohlcv_data: return None
|
| 205 |
-
return None
|
| 206 |
except Exception as e:
|
| 207 |
-
print(f"❌
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 219 |
|
| 220 |
-
|
| 221 |
-
|
|
|
|
|
|
|
|
|
|
| 222 |
|
| 223 |
-
|
| 224 |
-
|
|
|
|
|
|
|
|
|
|
| 225 |
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
target_strategy = payload.get('target_strategy', 'GENERIC')
|
| 232 |
-
enhanced_final_score = payload.get('enhanced_final_score', 0)
|
| 233 |
-
enhanced_score_display = f"{enhanced_final_score:.3f}" if isinstance(enhanced_final_score, (int, float)) else str(enhanced_final_score)
|
| 234 |
-
|
| 235 |
-
indicators_summary = format_technical_indicators(advanced_indicators)
|
| 236 |
-
strategies_summary = format_strategy_scores(strategy_scores, recommended_strategy)
|
| 237 |
-
whale_analysis_section = format_whale_analysis_for_llm(whale_data)
|
| 238 |
-
ohlcv_data = payload.get('raw_ohlcv') or payload.get('ohlcv', {})
|
| 239 |
-
candle_data_section = self._format_candle_data_comprehensive(ohlcv_data)
|
| 240 |
-
market_context_section = self._format_market_context(sentiment_data)
|
| 241 |
|
| 242 |
-
|
| 243 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 244 |
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
*
|
| 271 |
-
*
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
{
|
| 278 |
-
{
|
| 279 |
-
|
| 280 |
-
|
| 281 |
-
|
| 282 |
-
{
|
| 283 |
-
|
| 284 |
-
|
| 285 |
-
{candle_data_section}
|
| 286 |
-
{chr(10)}--- END OF CANDLE DATA ---{chr(10)}
|
| 287 |
-
|
| 288 |
-
🎯 STRATEGY ANALYSIS (System's recommendation based on various factors):
|
| 289 |
-
{strategies_summary}
|
| 290 |
-
|
| 291 |
-
🐋 WHALE ACTIVITY ANALYSIS:
|
| 292 |
-
{whale_analysis_section}
|
| 293 |
-
|
| 294 |
-
🌍 MARKET CONTEXT:
|
| 295 |
-
{market_context_section if market_context_section and "No market context" not in market_context_section else "Market context data not available for this analysis."}
|
| 296 |
-
|
| 297 |
-
---
|
| 298 |
-
🎯 SENTRY DECISION INSTRUCTIONS (WATCH or HOLD):
|
| 299 |
-
|
| 300 |
-
1. **PERFORM CHART PATTERN ANALYSIS:** Based *ONLY* on the provided 'RAW CANDLE DATA SUMMARY & STATISTICS', identify relevant patterns.
|
| 301 |
-
2. **[CRITICAL] INVESTIGATE THE STRATEGIC ALERT (if present):**
|
| 302 |
-
* ** للتحقق من الاستمرارية (Continuation):** هل الارتفاع مدعوم بـ 'volume_ratio' عالي (موجود في المؤشرات)؟ هل هو اختراق واضح لنمط تجميعي (مثل 'Bull Flag' أو 'Consolidation Breakout')؟ هل الشموع قوية (أجسام كبيرة)؟
|
| 303 |
-
* ** للتحقق من الإرهاق (Exhaustion):** هل ترى 'Bearish Divergence' (السعر يصنع قمة جديدة بينما RSI/MACD لا يفعل)؟ هل يضعف 'volume_ratio' مع الصعود؟ هل تظهر شموع انعكاسية (Doji, Shooting Star) على 4H/1D؟
|
| 304 |
-
3. **INTEGRATE ALL DATA:** ادمج 'تحقيقك' مع باقي البيانات (Learning Hub, Whale Activity).
|
| 305 |
-
4. **DECIDE ACTION (WATCH or HOLD):**
|
| 306 |
-
* **WATCH:** فقط إذا أكد تحقيقك أنها 'Sustainable Continuation' ولديك ثقة عالية (>= 0.75).
|
| 307 |
-
* **HOLD:** إذا أظهر تحقيقك أنها 'Exhaustion Trap'، أو إذا كان الوضع غير واضح ومحفوف بالمخاطر.
|
| 308 |
-
5. **DEFINE STRATEGY:** If (and only if) action is 'WATCH', you MUST specify which strategy the Sentry should use (e.g., 'breakout_momentum', 'mean_reversion').
|
| 309 |
-
6. **SELF-CRITIQUE:** Justify your decision. Why is this strong enough for the Sentry?
|
| 310 |
-
7. **[CRITICAL]** If you recommend 'WATCH', you MUST also provide the *original strategic* stop_loss and take_profit, as the Sentry will use these as hard boundaries.
|
| 311 |
-
|
| 312 |
-
OUTPUT FORMAT (JSON - SENTRY DECISION):
|
| 313 |
-
{{
|
| 314 |
-
"action": "WATCH/HOLD",
|
| 315 |
-
"reasoning": "Detailed explanation integrating ALL data sources. If the Strategic Alert was present, *explicitly state your investigation findings* (e.g., 'I confirmed this is continuation because volume is increasing and a bull flag is forming on 1H...')",
|
| 316 |
-
"pattern_identified_by_llm": "Name of the primary pattern(s) identified (e.g., 'Bull Flag on 1H', 'No Clear Pattern')",
|
| 317 |
-
|
| 318 |
-
"confidence_level": 0.85,
|
| 319 |
-
"strategy_to_watch": "breakout_momentum",
|
| 320 |
-
|
| 321 |
-
"stop_loss": 0.000000,
|
| 322 |
-
"take_profit": 0.000000,
|
| 323 |
-
"exit_profile": "ATR_TRAILING",
|
| 324 |
-
"exit_parameters": {{ "atr_multiplier": 2.0, "atr_period": 14, "break_even_trigger_percent": 1.5 }},
|
| 325 |
-
|
| 326 |
-
"self_critique": {{
|
| 327 |
-
"failure_modes": [
|
| 328 |
-
"What is the first reason this 'WATCH' decision could fail? (e.g., 'The identified pattern is a false breakout.')",
|
| 329 |
-
"What is the second reason? (e.g., 'The Sentry might enter too late.')"
|
| 330 |
-
],
|
| 331 |
-
"confidence_adjustment_reason": "Brief reason if confidence was adjusted post-critique."
|
| 332 |
-
}}
|
| 333 |
-
}}
|
| 334 |
"""
|
| 335 |
-
return prompt
|
| 336 |
-
|
| 337 |
-
def _format_candle_data_comprehensive(self, ohlcv_data):
|
| 338 |
-
if not ohlcv_data: return "No raw candle data available for analysis"
|
| 339 |
-
try:
|
| 340 |
-
timeframes_available = []; total_candles = 0
|
| 341 |
-
for timeframe, candles in ohlcv_data.items():
|
| 342 |
-
if candles and len(candles) >= 5: timeframes_available.append(f"{timeframe.upper()} ({len(candles)} candles)"); total_candles += len(candles)
|
| 343 |
-
if not timeframes_available: return "Insufficient candle data across all timeframes"
|
| 344 |
-
summary = f"📊 Available Timeframes: {', '.join(timeframes_available)}\n"
|
| 345 |
-
summary += f"📈 Total Candles Available: {total_candles}\n\n"
|
| 346 |
-
raw_candle_analysis_text = self.pattern_engine._format_chart_data_for_llm(ohlcv_data)
|
| 347 |
-
summary += raw_candle_analysis_text
|
| 348 |
-
return summary
|
| 349 |
-
except Exception as e: return f"Error formatting raw candle data: {str(e)}"
|
| 350 |
|
| 351 |
-
|
| 352 |
-
|
| 353 |
-
|
| 354 |
-
|
| 355 |
-
|
| 356 |
-
|
| 357 |
-
|
| 358 |
-
|
| 359 |
-
|
| 360 |
-
|
| 361 |
-
|
| 362 |
-
|
| 363 |
-
|
| 364 |
-
|
| 365 |
-
|
| 366 |
-
|
| 367 |
-
|
| 368 |
-
|
| 369 |
-
|
| 370 |
-
|
| 371 |
-
|
| 372 |
-
|
| 373 |
-
|
| 374 |
-
|
| 375 |
-
|
| 376 |
-
|
| 377 |
-
|
| 378 |
-
if self.learning_hub and self.learning_hub.initialized:
|
| 379 |
-
statistical_feedback = await self.learning_hub.get_statistical_feedback_for_llm(original_strategy)
|
| 380 |
-
active_context_playbook = await self.learning_hub.get_active_context_for_llm(
|
| 381 |
-
domain="strategy", query=f"{original_strategy} {symbol} re-analysis"
|
| 382 |
-
)
|
| 383 |
-
|
| 384 |
-
prompt = self._create_re_analysis_prompt(
|
| 385 |
-
trade_data, processed_data, news_text, pattern_analysis,
|
| 386 |
-
whale_data, statistical_feedback, active_context_playbook
|
| 387 |
-
)
|
| 388 |
-
|
| 389 |
-
if self.r2_service:
|
| 390 |
-
analysis_data = { 'symbol': symbol, 'original_strategy': original_strategy }
|
| 391 |
-
await self.r2_service.save_llm_prompts_async(
|
| 392 |
-
symbol, 'trade_reanalysis_v5_hub', prompt, analysis_data
|
| 393 |
-
)
|
| 394 |
-
|
| 395 |
-
async with self.semaphore:
|
| 396 |
-
response = await self._call_llm(prompt)
|
| 397 |
|
| 398 |
-
# 🔴 --- START OF CHANGE (V5.4) --- 🔴
|
| 399 |
-
# (تمرير كائن الصفقة الأصلي بالكامل لنسخ القيم القديمة إذا لزم الأمر)
|
| 400 |
-
re_analysis_dict = self._parse_re_analysis_response(response, original_strategy, symbol, trade_data)
|
| 401 |
-
# 🔴 --- END OF CHANGE --- 🔴
|
| 402 |
-
|
| 403 |
-
if re_analysis_dict:
|
| 404 |
-
re_analysis_dict['model_source'] = self.model_name
|
| 405 |
-
return re_analysis_dict
|
| 406 |
-
else:
|
| 407 |
-
print(f"❌ LLM re-analysis parsing failed for {symbol}")
|
| 408 |
-
return None
|
| 409 |
-
except Exception as e:
|
| 410 |
-
print(f"❌ Error in LLM re-analysis: {e}"); traceback.print_exc()
|
| 411 |
-
return None
|
| 412 |
|
| 413 |
-
|
| 414 |
-
|
| 415 |
-
|
| 416 |
-
|
| 417 |
-
|
| 418 |
-
|
| 419 |
-
|
| 420 |
-
|
| 421 |
-
|
| 422 |
-
action = decision_data.get('action')
|
| 423 |
-
if action not in ['HOLD', 'CLOSE_TRADE', 'UPDATE_TRADE']:
|
| 424 |
-
print(f"⚠️ LLM suggested unsupported re-analysis action ({action}) for {symbol}. Forcing HOLD.")
|
| 425 |
-
decision_data['action'] = 'HOLD'
|
| 426 |
-
|
| 427 |
-
# (منطق التحقق الجديد V5.4)
|
| 428 |
-
if action == 'UPDATE_TRADE':
|
| 429 |
-
required_update_fields = ['new_stop_loss', 'new_take_profit', 'new_exit_profile', 'new_exit_parameters']
|
| 430 |
-
if not validate_required_fields(decision_data, required_update_fields):
|
| 431 |
-
print(f"❌ Missing required fields for UPDATE_TRADE for {symbol}"); decision_data['action'] = 'HOLD'
|
| 432 |
-
elif not isinstance(decision_data['new_exit_parameters'], dict):
|
| 433 |
-
print(f"❌ 'new_exit_parameters' is not a valid dict for {symbol}"); decision_data['action'] = 'HOLD'
|
| 434 |
-
|
| 435 |
-
# (آلية الحماية من المسح)
|
| 436 |
-
if action in ['HOLD', 'UPDATE_TRADE']:
|
| 437 |
-
# التحقق من new_stop_loss
|
| 438 |
-
new_sl = decision_data.get('new_stop_loss')
|
| 439 |
-
if not isinstance(new_sl, (int, float)) or new_sl <= 0:
|
| 440 |
-
print(f"⚠️ LLM Re-Analysis {symbol}: new_stop_loss is invalid ({new_sl}). Reverting to original SL.")
|
| 441 |
-
decision_data['new_stop_loss'] = original_trade.get('stop_loss')
|
| 442 |
-
|
| 443 |
-
# التحقق من new_take_profit
|
| 444 |
-
new_tp = decision_data.get('new_take_profit')
|
| 445 |
-
if not isinstance(new_tp, (int, float)) or new_tp <= 0:
|
| 446 |
-
print(f"⚠️ LLM Re-Analysis {symbol}: new_take_profit is invalid ({new_tp}). Reverting to original TP.")
|
| 447 |
-
decision_data['new_take_profit'] = original_trade.get('take_profit')
|
| 448 |
-
|
| 449 |
-
strategy_value = decision_data.get('strategy')
|
| 450 |
-
if not strategy_value or strategy_value == 'unknown':
|
| 451 |
-
decision_data['strategy'] = fallback_strategy
|
| 452 |
-
|
| 453 |
-
return decision_data
|
| 454 |
-
except Exception as e:
|
| 455 |
-
print(f"Error parsing re-analysis response for {symbol}: {e}")
|
| 456 |
-
return None
|
| 457 |
-
# 🔴 --- END OF CHANGE --- 🔴
|
| 458 |
-
|
| 459 |
-
def _create_re_analysis_prompt(
|
| 460 |
-
self,
|
| 461 |
-
trade_data: dict, processed_data: dict, news_text: str,
|
| 462 |
-
pattern_analysis: dict, whale_data: dict,
|
| 463 |
-
statistical_feedback: str, active_context_playbook: str
|
| 464 |
-
) -> str:
|
| 465 |
|
| 466 |
-
symbol = trade_data.get('symbol', 'N/A')
|
| 467 |
|
| 468 |
-
#
|
| 469 |
-
|
| 470 |
-
|
| 471 |
-
|
| 472 |
-
|
| 473 |
-
|
| 474 |
-
|
| 475 |
-
|
| 476 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 477 |
|
| 478 |
-
|
| 479 |
-
|
|
|
|
| 480 |
|
| 481 |
-
|
|
|
|
|
|
|
|
|
|
| 482 |
|
| 483 |
-
|
| 484 |
-
try:
|
| 485 |
-
rsi_1d = processed_data.get('advanced_indicators', {}).get('1d', {}).get('rsi', 50)
|
| 486 |
-
if price_change_24h_raw > 40 and rsi_1d > 75:
|
| 487 |
-
exhaustion_warning = (
|
| 488 |
-
"🚩 **تنبيه استراتيجي: تم رصد زخم مرتفع (احتمال إرهاق)** 🚩\n"
|
| 489 |
-
f"الأصل مرتفع {price_change_24h_display} خلال 24 ساعة ومؤشر RSI على 1D/4H في منطقة تشبع شرائي.\n"
|
| 490 |
-
"هذا يزيد من خطورة الاستمرار في الصفقة. قم بالتحقيق في قوة الاتجاه الحالية.\n"
|
| 491 |
-
"هل بدأ الحجم (Volume) يضعف؟ هل تظهر إشارات انعكاس على 1H/4H؟\n"
|
| 492 |
-
"------------------------------------------------------------------"
|
| 493 |
-
)
|
| 494 |
-
except Exception:
|
| 495 |
-
pass
|
| 496 |
-
|
| 497 |
-
# 🔴 --- START OF CHANGE (V5.4) --- 🔴
|
| 498 |
-
prompt = f"""
|
| 499 |
-
TRADE RE-ANALYSIS FOR {symbol} (SPOT ONLY - Currently Open LONG Position)
|
| 500 |
-
|
| 501 |
-
{exhaustion_warning}
|
| 502 |
-
|
| 503 |
-
📊 CURRENT TRADE CONTEXT:
|
| 504 |
-
* Strategy: {strategy}
|
| 505 |
-
* Entry Price: {entry_price} (LONG position)
|
| 506 |
-
* Current Price: {current_price}
|
| 507 |
-
* 24H Price Change: {price_change_24h_display}
|
| 508 |
-
* Current Performance: {price_change_display}
|
| 509 |
-
* Current Exit Profile: {current_exit_profile}
|
| 510 |
-
* CURRENT Stop Loss: {current_sl}
|
| 511 |
-
* CURRENT Take Profit: {current_tp}
|
| 512 |
-
|
| 513 |
-
--- LEARNING HUB INPUT (CRITICAL) ---
|
| 514 |
-
{playbook_section}
|
| 515 |
-
{statistical_feedback_section}
|
| 516 |
-
--- END OF LEARNING INPUT ---
|
| 517 |
-
|
| 518 |
-
🔄 UPDATED TECHNICAL ANALYSIS:
|
| 519 |
-
{indicators_summary}
|
| 520 |
-
|
| 521 |
-
📈 UPDATED RAW CANDLE DATA SUMMARY & STATISTICS:
|
| 522 |
-
{candle_data_section}
|
| 523 |
-
{chr(10)}--- END OF CANDLE DATA ---{chr(10)}
|
| 524 |
-
|
| 525 |
-
🐋 UPDATED WHALE ACTIVITY:
|
| 526 |
-
{whale_analysis_section}
|
| 527 |
-
|
| 528 |
-
🌍 UPDATED MARKET CONTEXT:
|
| 529 |
-
{market_context_section if market_context_section and "No market context" not in market_context_section else "Market context data not available for this re-analysis."}
|
| 530 |
-
|
| 531 |
-
---
|
| 532 |
-
🎯 RE-ANALYSIS INSTRUCTIONS (SPOT - LONG POSITION):
|
| 533 |
-
|
| 534 |
-
1. **Analyze the Data:** Review all new data (Indicators, Candles, Whale, Market).
|
| 535 |
-
2. **Evaluate Current State:** Is the original reason for entry still valid? Has the risk changed?
|
| 536 |
-
3. **Investigate Alert (if present):** If the 'Exhaustion Alert' is active, determine if the trend is weakening (exit) or just consolidating (hold).
|
| 537 |
-
4. **Decide Action (HOLD, CLOSE_TRADE, or UPDATE_TRADE):**
|
| 538 |
-
* **HOLD:** The trade is still valid. The current strategy/targets are fine.
|
| 539 |
-
* **CLOSE_TRADE:** The trade is invalidated (e.g., trend reversal, risk too high).
|
| 540 |
-
* **UPDATE_TRADE:** The trade is valid, but the exit parameters need adjustment (e.g., raise stop loss to lock profit, or change exit profile).
|
| 541 |
-
|
| 542 |
-
**[CRITICAL OUTPUT RULES - YOU MUST FOLLOW THESE]:**
|
| 543 |
-
1. You **MUST** return one of three actions: `HOLD`, `CLOSE_TRADE`, or `UPDATE_TRADE`.
|
| 544 |
-
2. If `action` is `HOLD` or `UPDATE_TRADE`, you **MUST** provide valid (non-zero) numeric values for `new_stop_loss` and `new_take_profit`.
|
| 545 |
-
3. **If `action` is `HOLD` and you do not want to change the targets,** you **MUST** return the *CURRENT* values (Current SL: {current_sl}, Current TP: {current_tp}) in the `new_stop_loss` and `new_take_profit` fields.
|
| 546 |
-
4. If `action` is `CLOSE_TRADE`, the values for targets/exit profile are irrelevant.
|
| 547 |
-
|
| 548 |
-
OUTPUT FORMAT (JSON - SPOT RE-ANALYSIS):
|
| 549 |
-
{{
|
| 550 |
-
"action": "HOLD/CLOSE_TRADE/UPDATE_TRADE",
|
| 551 |
-
"reasoning": "Comprehensive justification. If 'HOLD' or 'UPDATE', justify the new (or existing) SL/TP values.",
|
| 552 |
-
"new_stop_loss": 0.000000,
|
| 553 |
-
"new_take_profit": 0.000000,
|
| 554 |
-
"new_exit_profile": "The new exit profile (or the existing one if HOLD)",
|
| 555 |
-
"new_exit_parameters": {{ "example_key": "example_value" }},
|
| 556 |
-
"new_expected_minutes": 15,
|
| 557 |
-
"confidence_level": 0.85,
|
| 558 |
-
"strategy": "{strategy}",
|
| 559 |
-
"self_critique": {{
|
| 560 |
-
"failure_modes": ["Primary risk of this new decision?", "Second risk?"],
|
| 561 |
-
"confidence_adjustment_reason": "Brief reason if confidence was adjusted."
|
| 562 |
-
}}
|
| 563 |
-
}}
|
| 564 |
-
"""
|
| 565 |
-
# 🔴 --- END OF CHANGE --- 🔴
|
| 566 |
-
return prompt
|
| 567 |
|
| 568 |
-
|
| 569 |
-
|
| 570 |
-
|
| 571 |
-
|
|
|
|
|
|
|
| 572 |
|
| 573 |
-
|
| 574 |
-
|
| 575 |
-
|
| 576 |
-
|
| 577 |
-
|
| 578 |
-
|
| 579 |
-
|
| 580 |
-
|
| 581 |
-
|
| 582 |
-
|
| 583 |
-
max_tokens=4000
|
| 584 |
-
)
|
| 585 |
-
|
| 586 |
-
content = None
|
| 587 |
-
if response.choices and response.choices[0].message:
|
| 588 |
-
content = response.choices[0].message.content
|
| 589 |
-
|
| 590 |
-
if content and '{' in content and '}' in content:
|
| 591 |
-
return content
|
| 592 |
-
else:
|
| 593 |
-
if content is None:
|
| 594 |
-
print(f"⚠️ LLM returned NO content (None) (attempt {attempt+1}). Check content filters or API status.")
|
| 595 |
-
else:
|
| 596 |
-
print(f"⚠️ LLM returned invalid content (not JSON) (attempt {attempt+1}): {content[:100]}...")
|
| 597 |
-
|
| 598 |
-
if attempt == 0: await asyncio.sleep(1)
|
| 599 |
-
|
| 600 |
-
except (RateLimitError, APITimeoutError) as e:
|
| 601 |
-
print(f"❌ LLM API Error (Rate Limit/Timeout): {e}. Retrying via backoff...")
|
| 602 |
-
raise
|
| 603 |
-
except Exception as e:
|
| 604 |
-
print(f"❌ Unexpected LLM API error (attempt {attempt+1}): {e}")
|
| 605 |
-
if attempt == 0: await asyncio.sleep(2)
|
| 606 |
-
elif attempt == 1: raise
|
| 607 |
-
|
| 608 |
-
print("❌ LLM failed to return valid content after retries.")
|
| 609 |
-
return ""
|
| 610 |
-
|
| 611 |
-
except Exception as e:
|
| 612 |
-
print(f"❌ Final failure in _call_llm after backoff retries: {e}")
|
| 613 |
-
raise
|
| 614 |
|
| 615 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# LLM.py (V18.1 - 24h Accumulation Analysis)
|
| 2 |
+
import os, traceback, json, time, re
|
| 3 |
+
import httpx
|
| 4 |
from datetime import datetime
|
| 5 |
+
from typing import List, Dict, Any, Optional
|
| 6 |
+
|
| 7 |
+
try:
|
| 8 |
+
from r2 import R2Service
|
| 9 |
+
from learning_hub.hub_manager import LearningHubManager # (استيراد العقل)
|
| 10 |
+
except ImportError:
|
| 11 |
+
print("❌ [LLMService] فشل استيراد R2Service أو LearningHubManager")
|
| 12 |
+
R2Service = None
|
| 13 |
+
LearningHubManager = None
|
| 14 |
+
|
| 15 |
+
# (V8.1) استيراد NewsFetcher
|
| 16 |
+
try:
|
| 17 |
+
from sentiment_news import NewsFetcher
|
| 18 |
+
except ImportError:
|
| 19 |
+
NewsFetcher = None
|
| 20 |
+
|
| 21 |
+
# إعدادات النموذج
|
| 22 |
+
LLM_API_URL = os.getenv("LLM_API_URL", "https://api.groq.com/openai/v1/chat/completions")
|
| 23 |
+
LLM_API_KEY = os.getenv("LLM_API_KEY")
|
| 24 |
+
LLM_MODEL = os.getenv("LLM_MODEL", "llama3-70b-8192")
|
| 25 |
+
|
| 26 |
+
# إعدادات العميل
|
| 27 |
+
# (زيادة المهلة إلى 5 دقائق للتحليلات المعقدة)
|
| 28 |
+
CLIENT_TIMEOUT = 300.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
|
| 30 |
class LLMService:
|
| 31 |
+
def __init__(self):
|
| 32 |
+
if not LLM_API_KEY:
|
| 33 |
+
raise ValueError("❌ [LLMService] متغير بيئة LLM_API_KEY غير موجود!")
|
| 34 |
+
|
| 35 |
+
self.headers = {
|
| 36 |
+
"Authorization": f"Bearer {LLM_API_KEY}",
|
| 37 |
+
"Content-Type": "application/json"
|
| 38 |
+
}
|
| 39 |
+
self.http_client = httpx.AsyncClient(timeout=CLIENT_TIMEOUT)
|
| 40 |
+
|
| 41 |
+
# --- (الربط بالخدمات الأخرى) ---
|
| 42 |
+
self.r2_service: Optional[R2Service] = None
|
| 43 |
+
self.learning_hub: Optional[LearningHubManager] = None
|
| 44 |
+
|
| 45 |
+
# (V8.1) إضافة NewsFetcher (للاستخدام في إعادة التحليل)
|
| 46 |
+
self.news_fetcher: Optional[NewsFetcher] = None
|
| 47 |
+
|
| 48 |
+
print(f"✅ [LLMService] مهيأ. النموذج: {LLM_MODEL}")
|
| 49 |
+
|
| 50 |
+
async def _call_llm(self, prompt: str, temperature: float = 0.2, max_tokens: int = 1500) -> Optional[str]:
|
| 51 |
+
"""
|
| 52 |
+
(محدث) إجراء استدعاء API للنموذج الضخم (باللغة الإنجليزية).
|
| 53 |
+
"""
|
| 54 |
+
payload = {
|
| 55 |
+
"model": LLM_MODEL,
|
| 56 |
+
"messages": [
|
| 57 |
+
{"role": "system", "content": "You are an expert AI trading analyst. You MUST respond ONLY with the requested JSON object, with no introductory text, markdown formatting (like ```json), or explanations."},
|
| 58 |
+
{"role": "user", "content": prompt}
|
| 59 |
+
],
|
| 60 |
+
"temperature": temperature,
|
| 61 |
+
"max_tokens": max_tokens,
|
| 62 |
+
"top_p": 1,
|
| 63 |
+
"stream": False,
|
| 64 |
+
"response_format": {"type": "json_object"}
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
try:
|
| 68 |
+
response = await self.http_client.post(LLM_API_URL, headers=self.headers, json=payload)
|
| 69 |
+
response.raise_for_status() # Check for HTTP 4xx/5xx errors
|
| 70 |
+
|
| 71 |
+
data = response.json()
|
| 72 |
+
|
| 73 |
+
if "choices" in data and len(data["choices"]) > 0:
|
| 74 |
+
content = data["choices"][0].get("message", {}).get("content")
|
| 75 |
+
if content:
|
| 76 |
+
return content.strip()
|
| 77 |
+
|
| 78 |
+
print(f"❌ [LLMService] استجابة API غي�� متوقعة: {data}")
|
| 79 |
+
return None
|
| 80 |
+
|
| 81 |
+
except httpx.HTTPStatusError as http_err:
|
| 82 |
+
print(f"❌ [LLMService] خطأ HTTP: {http_err} - {http_err.response.text}")
|
| 83 |
+
except httpx.RequestError as req_err:
|
| 84 |
+
print(f"❌ [LLMService] خطأ في الطلب: {req_err}")
|
| 85 |
+
except json.JSONDecodeError:
|
| 86 |
+
print(f"❌ [LLMService] فشل في تحليل استجابة JSON: {response.text}")
|
| 87 |
+
except Exception as e:
|
| 88 |
+
print(f"❌ [LLMService] خطأ غير متوقع في _call_llm: {e}")
|
| 89 |
+
traceback.print_exc()
|
| 90 |
+
|
| 91 |
+
return None
|
| 92 |
+
|
| 93 |
+
def _parse_llm_response_enhanced(self,
|
| 94 |
+
response_text: str,
|
| 95 |
+
fallback_strategy: str = "decision",
|
| 96 |
+
symbol: str = "N/A") -> Optional[Dict[str, Any]]:
|
| 97 |
+
"""
|
| 98 |
+
(محدث V8) محلل JSON ذكي ومتسامح مع الأخطاء.
|
| 99 |
+
"""
|
| 100 |
+
if not response_text:
|
| 101 |
+
print(f" ⚠️ [LLMParser] الاستجابة فارغة لـ {symbol}.")
|
| 102 |
+
return self._get_fallback_response(fallback_strategy, "Empty response")
|
| 103 |
+
|
| 104 |
+
# 1. محاولة تحليل JSON مباشرة (لأننا طلبنا response_format=json_object)
|
| 105 |
+
try:
|
| 106 |
+
return json.loads(response_text)
|
| 107 |
+
except json.JSONDecodeError:
|
| 108 |
+
print(f" ⚠️ [LLMParser] فشل تحليل JSON المباشر لـ {symbol}. محاولة استخراج JSON...")
|
| 109 |
+
pass # (الانتقال إلى المحاولة 2)
|
| 110 |
|
| 111 |
+
# 2. محاولة استخراج JSON من داخل نص (Fallback 1)
|
| 112 |
+
try:
|
| 113 |
+
# (البحث عن أول { وآخر })
|
| 114 |
+
json_match = re.search(r'\{.*\}', response_text, re.DOTALL)
|
| 115 |
+
if json_match:
|
| 116 |
+
json_string = json_match.group(0)
|
| 117 |
+
return json.loads(json_string)
|
| 118 |
+
else:
|
| 119 |
+
print(f" ⚠️ [LLMParser] لم يتم العثور على JSON مطابق لـ {symbol}.")
|
| 120 |
+
raise json.JSONDecodeError("No JSON object found in text", response_text, 0)
|
| 121 |
+
except json.JSONDecodeError as e:
|
| 122 |
+
print(f" ❌ [LLMParser] فشل الاستخراج النهائي لـ {symbol}. نص الاستجابة: {response_text[:200]}...")
|
| 123 |
+
return self._get_fallback_response(fallback_strategy, f"Final JSON parse fail: {e}")
|
| 124 |
+
except Exception as e:
|
| 125 |
+
print(f" ❌ [LLMParser] خطأ غير متوقع في المحلل لـ {symbol}: {e}")
|
| 126 |
+
return self._get_fallback_response(fallback_strategy, f"Unexpected parser error: {e}")
|
| 127 |
+
|
| 128 |
+
def _get_fallback_response(self, strategy: str, reason: str) -> Optional[Dict[str, Any]]:
|
| 129 |
+
"""
|
| 130 |
+
(محدث V8) إرجاع استجابة آمنة عند فشل النموذج الضخم.
|
| 131 |
+
"""
|
| 132 |
+
print(f" 🚨 [LLMService] تفعيل الاستجابة الاحتياطية (Fallback) لاستراتيجية '{strategy}' (السبب: {reason})")
|
| 133 |
+
|
| 134 |
+
if strategy == "decision":
|
| 135 |
+
# (القرار الآمن: لا تتداول)
|
| 136 |
+
return {
|
| 137 |
+
"action": "NO_DECISION",
|
| 138 |
+
"strategy_to_watch": "GENERIC",
|
| 139 |
+
"confidence_level": 0,
|
| 140 |
+
"reasoning": f"LLM analysis failed: {reason}",
|
| 141 |
+
"exit_profile": "Standard"
|
| 142 |
+
}
|
| 143 |
+
elif strategy == "reanalysis":
|
| 144 |
+
# (القرار الآمن: استمر في الصفقة الحالية)
|
| 145 |
+
return {
|
| 146 |
+
"action": "HOLD",
|
| 147 |
+
"strategy": "MAINTAIN_CURRENT",
|
| 148 |
+
"reasoning": f"LLM re-analysis failed: {reason}. Maintaining current trade strategy."
|
| 149 |
+
}
|
| 150 |
+
elif strategy == "reflection":
|
| 151 |
+
# (القرار الآمن: لا تقم بإنشاء قاعدة تعلم)
|
| 152 |
+
return None # (سيمنع Reflector من إنشاء دلتا)
|
| 153 |
+
|
| 154 |
+
elif strategy == "distillation":
|
| 155 |
+
# (القرار الآمن: لا تقم بإنشاء قواعد مقطرة)
|
| 156 |
+
return None # (سيمنع Curator من المتابعة)
|
| 157 |
|
| 158 |
+
return None # (Fallback عام)
|
| 159 |
+
|
| 160 |
+
async def get_trading_decision(self, candidate_data: Dict[str, Any]) -> Optional[Dict[str, Any]]:
|
| 161 |
+
"""
|
| 162 |
+
(محدث V8.1)
|
| 163 |
+
يستدعي النموذج الضخم لاتخاذ قرار "WATCH" استراتيجي (Explorer Brain).
|
| 164 |
+
"""
|
| 165 |
+
symbol = candidate_data.get('symbol', 'UNKNOWN')
|
| 166 |
+
try:
|
| 167 |
+
# 1. (العقل) جلب القواعد (Deltas) من محور التعلم
|
| 168 |
+
# (سيتم جلب القواعد العامة + قواعد الاستراتيجية بناءً على المرشح)
|
| 169 |
+
learning_context_prompt = "Playbook: No learning context available."
|
| 170 |
+
if self.learning_hub:
|
| 171 |
+
# (استعلام عام لجلب أفضل القواعد الشاملة)
|
| 172 |
+
learning_context_prompt = await self.learning_hub.get_active_context_for_llm(
|
| 173 |
+
domain="general",
|
| 174 |
+
query=f"{symbol} strategy decision"
|
| 175 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 176 |
|
| 177 |
+
# 2. إنشاء الـ Prompt (باللغة الإنجليزية)
|
| 178 |
+
prompt = self._create_trading_prompt(candidate_data, learning_context_prompt)
|
| 179 |
+
|
| 180 |
+
# (اختياري: حفظ الـ Prompt للتدقيق)
|
| 181 |
if self.r2_service:
|
| 182 |
+
await self.r2_service.save_llm_prompts_async(symbol, "trading_decision", prompt, candidate_data)
|
|
|
|
|
|
|
|
|
|
| 183 |
|
| 184 |
+
# 3. استدعاء النموذج الضخم (LLM)
|
| 185 |
+
response_text = await self._call_llm(prompt, temperature=0.1, max_tokens=1000)
|
| 186 |
|
| 187 |
+
# 4. تحليل الاستجابة (باستخدام المحلل الذكي)
|
| 188 |
+
decision_json = self._parse_llm_response_enhanced(
|
| 189 |
+
response_text,
|
| 190 |
+
fallback_strategy="decision",
|
| 191 |
+
symbol=symbol
|
| 192 |
+
)
|
| 193 |
|
| 194 |
+
return decision_json
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 195 |
|
| 196 |
except Exception as e:
|
| 197 |
+
print(f"❌ [LLMService] فشل فادح في get_trading_decision لـ {symbol}: {e}")
|
| 198 |
+
traceback.print_exc()
|
| 199 |
+
return self._get_fallback_response("decision", str(e)) # (إرجاع قرار آمن)
|
| 200 |
+
|
| 201 |
+
async def re_analyze_trade_async(self, trade_data: Dict[str, Any], current_data: Dict[str, Any]) -> Optional[Dict[str, Any]]:
|
| 202 |
+
"""
|
| 203 |
+
(محدث V8.1)
|
| 204 |
+
يستدعي النموذج الضخم لإعادة تحليل صفقة مفتوحة (Reflector Brain).
|
| 205 |
+
"""
|
| 206 |
+
symbol = trade_data.get('symbol', 'UNKNOWN')
|
| 207 |
try:
|
| 208 |
+
# 1. (العقل) جلب القواعد (Deltas) من محور التعلم
|
| 209 |
+
learning_context_prompt = "Playbook: No learning context available."
|
| 210 |
+
if self.learning_hub:
|
| 211 |
+
# (استعلام محدد لإعادة التحليل)
|
| 212 |
+
learning_context_prompt = await self.learning_hub.get_active_context_for_llm(
|
| 213 |
+
domain="strategy",
|
| 214 |
+
query=f"{symbol} re-analysis {trade_data.get('strategy', 'GENERIC')}"
|
| 215 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 216 |
|
| 217 |
+
# 2. (V8.1) جلب أحدث الأخبار (باستخدام NewsFetcher المخصص)
|
| 218 |
+
latest_news_text = "News data unavailable for re-analysis."
|
| 219 |
+
latest_news_score = 0.0
|
| 220 |
+
if self.news_fetcher:
|
| 221 |
+
# (هذا يجلب أحدث الأخبار في الوقت الفعلي)
|
| 222 |
+
latest_news_text = await self.news_fetcher.get_news_for_symbol(symbol)
|
| 223 |
+
if self.news_fetcher.vader_analyzer and latest_news_text:
|
| 224 |
+
vader_scores = self.news_fetcher.vader_analyzer.polarity_scores(latest_news_text)
|
| 225 |
+
latest_news_score = vader_scores.get('compound', 0.0)
|
|
|
|
|
|
|
|
|
|
| 226 |
|
| 227 |
+
# (إضافة الأخبار المحدثة إلى البيانات الحالية)
|
| 228 |
+
current_data['latest_news_text'] = latest_news_text
|
| 229 |
+
current_data['latest_news_score'] = latest_news_score
|
| 230 |
+
|
| 231 |
+
# 3. إنشاء الـ Prompt (باللغة الإنجليزية)
|
| 232 |
+
prompt = self._create_reanalysis_prompt(trade_data, current_data, learning_context_prompt)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 233 |
|
| 234 |
+
# (اختياري: حفظ الـ Prompt للتدقيق)
|
| 235 |
+
if self.r2_service:
|
| 236 |
+
await self.r2_service.save_llm_prompts_async(symbol, "trade_reanalysis", prompt, current_data)
|
|
|
|
| 237 |
|
| 238 |
+
# 4. استدعاء النموذج الضخم (LLM)
|
| 239 |
+
response_text = await self._call_llm(prompt, temperature=0.0, max_tokens=1000) # (درجة حرارة 0.0 للحسم)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 240 |
|
| 241 |
+
# 5. تحليل الاستجابة (باستخدام المحلل الذكي)
|
| 242 |
+
decision_json = self._parse_llm_response_enhanced(
|
| 243 |
+
response_text,
|
| 244 |
+
fallback_strategy="reanalysis",
|
| 245 |
+
symbol=symbol
|
| 246 |
+
)
|
| 247 |
+
|
| 248 |
+
return decision_json
|
| 249 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 250 |
except Exception as e:
|
| 251 |
+
print(f"❌ [LLMService] فشل فادح في re_analyze_trade_async لـ {symbol}: {e}")
|
| 252 |
+
traceback.print_exc()
|
| 253 |
+
return self._get_fallback_response("reanalysis", str(e)) # (إرجاع قرار آمن)
|
| 254 |
+
|
| 255 |
+
# --- (دوال إنشاء الـ Prompts) ---
|
| 256 |
+
# (ملاحظة: هذه الدوال يجب أن تكون دائماً باللغة الإنجليزية)
|
| 257 |
+
|
| 258 |
+
def _create_trading_prompt(self,
|
| 259 |
+
candidate_data: Dict[str, Any],
|
| 260 |
+
learning_context: str) -> str:
|
| 261 |
+
"""
|
| 262 |
+
(معدل V18.1)
|
| 263 |
+
إنشاء الـ Prompt (باللغة الإنجليزية) لاتخاذ قرار التداول الأولي (Explorer).
|
| 264 |
+
"""
|
| 265 |
+
|
| 266 |
+
symbol = candidate_data.get('symbol', 'N/A')
|
| 267 |
|
| 268 |
+
# --- 1. استخراج بيانات ML (الطبقة 1) ---
|
| 269 |
+
l1_score = candidate_data.get('enhanced_final_score', 0)
|
| 270 |
+
l1_reasons = candidate_data.get('reasons_for_candidacy', [])
|
| 271 |
+
pattern_data = candidate_data.get('pattern_analysis', {})
|
| 272 |
+
mc_data = candidate_data.get('monte_carlo_distribution', {})
|
| 273 |
|
| 274 |
+
# --- 2. استخراج بيانات المشاعر والأخبار (الطبقة 1) ---
|
| 275 |
+
# (V8.2) جلب بيانات VADER الإحصائية (المتعلمة)
|
| 276 |
+
news_text = candidate_data.get('news_text', 'No news text provided.')
|
| 277 |
+
news_score_raw = candidate_data.get('news_score_raw', 0.0) # (درجة VADER الخام)
|
| 278 |
+
statistical_news_pnl = candidate_data.get('statistical_news_pnl', 0.0) # (الدرجة الإحصائية المتعلمة)
|
| 279 |
|
| 280 |
+
# --- 3. استخراج بيانات الحيتان (الطبقة 1) ---
|
| 281 |
+
# (ملاحظة: هذا هو القسم الذي سنقوم بتحديثه)
|
| 282 |
+
whale_data = candidate_data.get('whale_data', {})
|
| 283 |
+
whale_summary = whale_data.get('llm_friendly_summary', {})
|
| 284 |
+
exchange_flows = whale_data.get('exchange_flows', {})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 285 |
|
| 286 |
+
whale_signal = whale_summary.get('recommended_action', 'HOLD')
|
| 287 |
+
whale_confidence = whale_summary.get('confidence', 0.3)
|
| 288 |
+
whale_reason = whale_summary.get('whale_activity_summary', 'No whale data.')
|
| 289 |
+
|
| 290 |
+
# (البيانات قصيرة المدى - نفترض أنها من أفضل نافذة متعلمة، e.g., 1h)
|
| 291 |
+
net_flow_usd = exchange_flows.get('net_flow_usd', 0.0)
|
| 292 |
+
|
| 293 |
+
# 🔴 --- START OF CHANGE (V18.1) --- 🔴
|
| 294 |
+
# (البيانات طويلة المدى - من تحليل 24 ساعة الجديد)
|
| 295 |
+
accumulation_data_24h = whale_data.get('accumulation_analysis_24h', {})
|
| 296 |
+
net_flow_24h_usd = accumulation_data_24h.get('net_flow_usd', 0.0)
|
| 297 |
+
total_inflow_24h_usd = accumulation_data_24h.get('to_exchanges_usd', 0.0)
|
| 298 |
+
total_outflow_24h_usd = accumulation_data_24h.get('from_exchanges_usd', 0.0)
|
| 299 |
+
relative_net_flow_24h_percent = accumulation_data_24h.get('relative_net_flow_percent', 0.0)
|
| 300 |
+
# 🔴 --- END OF CHANGE --- 🔴
|
| 301 |
|
| 302 |
+
# --- 4. استخراج بيانات السوق (الطبقة 0) ---
|
| 303 |
+
market_context = candidate_data.get('sentiment_data', {})
|
| 304 |
+
market_trend = market_context.get('market_trend', 'UNKNOWN')
|
| 305 |
+
btc_sentiment = market_context.get('btc_sentiment', 'UNKNOWN')
|
| 306 |
+
|
| 307 |
+
# --- 5. بناء أقسام الـ Prompt (الإنجليزية) ---
|
| 308 |
+
|
| 309 |
+
# (التعلم)
|
| 310 |
+
playbook_prompt = f"""
|
| 311 |
+
--- START OF LEARNING PLAYBOOK ---
|
| 312 |
+
{learning_context}
|
| 313 |
+
--- END OF PLAYBOOK ---
|
| 314 |
+
"""
|
| 315 |
+
# (ملخص ML)
|
| 316 |
+
ml_summary_prompt = f"""
|
| 317 |
+
1. **ML Analysis (Score: {l1_score:.3f}):**
|
| 318 |
+
* Reasons: {', '.join(l1_reasons)}
|
| 319 |
+
* Chart Pattern: {pattern_data.get('pattern_detected', 'None')} (Conf: {pattern_data.get('pattern_confidence', 0):.2f})
|
| 320 |
+
* Monte Carlo (1h): {mc_data.get('probability_of_gain', 0):.1f}% chance of profit (Expected: {mc_data.get('expected_return_pct', 0):.2f}%)
|
| 321 |
+
"""
|
| 322 |
+
# (ملخص الأخبار)
|
| 323 |
+
news_prompt = f"""
|
| 324 |
+
2. **News & Sentiment Analysis:**
|
| 325 |
+
* Market Trend: {market_trend} (BTC: {btc_sentiment})
|
| 326 |
+
* VADER (Raw): {news_score_raw:.3f}
|
| 327 |
+
* Statistical PnL (Learned): {statistical_news_pnl:+.2f}%
|
| 328 |
+
* News Text: {news_text[:300]}...
|
| 329 |
+
"""
|
| 330 |
+
# (ملخص الحيتان - محدث)
|
| 331 |
+
whale_activity_prompt = f"""
|
| 332 |
+
3. **Whale Activity (Real-time Flow - Optimized Window):**
|
| 333 |
+
* Signal: {whale_signal} (Confidence: {whale_confidence:.2f})
|
| 334 |
+
* Reason: {whale_reason}
|
| 335 |
+
* Net Flow (to/from Exchanges): ${net_flow_usd:,.2f}
|
| 336 |
+
|
| 337 |
+
4. **Whale Activity (24h Accumulation):**
|
| 338 |
+
* 24h Net Flow (Accumulation): ${net_flow_24h_usd:,.2f}
|
| 339 |
+
* 24h Total Deposits: ${total_inflow_24h_usd:,.2f}
|
| 340 |
+
* 24h Total Withdrawals: ${total_outflow_24h_usd:,.2f}
|
| 341 |
+
* Relative 24h Net Flow (vs Daily Volume): {relative_net_flow_24h_percent:+.2f}%
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 342 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 343 |
|
| 344 |
+
prompt_sections = [
|
| 345 |
+
f"SYSTEM: You are an expert AI trading analyst (Explorer Brain). Analyze the provided data for {symbol} and decide if it's a high-potential candidate to 'WATCH'.",
|
| 346 |
+
playbook_prompt,
|
| 347 |
+
"--- START OF CANDIDATE DATA ---",
|
| 348 |
+
ml_summary_prompt,
|
| 349 |
+
news_prompt,
|
| 350 |
+
whale_activity_prompt,
|
| 351 |
+
"--- END OF CANDIDATE DATA ---",
|
| 352 |
+
"""
|
| 353 |
+
TASK:
|
| 354 |
+
1. Synthesize all data points (ML, News, Whale Flow, 24h Accumulation).
|
| 355 |
+
2. Consult the "Playbook" for learned rules.
|
| 356 |
+
3. Decide the final action: 'WATCH' (if high confidence) or 'NO_DECISION' (if low confidence or conflicting signals).
|
| 357 |
+
4. If 'WATCH', define the 'strategy_to_watch' (e.g., 'Breakout', 'MeanReversion', 'WhaleAccumulation') and an appropriate 'exit_profile' (e.g., 'Aggressive', 'Standard', 'Patient').
|
| 358 |
+
|
| 359 |
+
OUTPUT (JSON Object ONLY):
|
| 360 |
+
{
|
| 361 |
+
"action": "WATCH" or "NO_DECISION",
|
| 362 |
+
"strategy_to_watch": "STRATEGY_NAME",
|
| 363 |
+
"confidence_level": 0.0_to_1.0,
|
| 364 |
+
"reasoning": "Brief justification (max 40 words) synthesizing all data points.",
|
| 365 |
+
"exit_profile": "Aggressive" or "Standard" or "Patient"
|
| 366 |
+
}
|
| 367 |
+
"""
|
| 368 |
+
]
|
| 369 |
+
|
| 370 |
+
return "\n".join(prompt_sections)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 371 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 372 |
|
| 373 |
+
def _create_reanalysis_prompt(self,
|
| 374 |
+
trade_data: Dict[str, Any],
|
| 375 |
+
current_data: Dict[str, Any],
|
| 376 |
+
learning_context: str) -> str:
|
| 377 |
+
"""
|
| 378 |
+
(معدل V8.1)
|
| 379 |
+
إنشاء الـ Prompt (باللغة الإنجليزية) لإعادة تحليل صفقة مفتوحة (Reflector Brain).
|
| 380 |
+
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 381 |
|
| 382 |
+
symbol = trade_data.get('symbol', 'N/A')
|
| 383 |
|
| 384 |
+
# --- 1. بيانات الصفقة الأصلية (القديمة) ---
|
| 385 |
+
original_strategy = trade_data.get('strategy', 'N/A')
|
| 386 |
+
original_reasoning = trade_data.get('decision_data', {}).get('reasoning', 'N/A')
|
| 387 |
+
entry_price = trade_data.get('entry_price', 0)
|
| 388 |
+
current_pnl = trade_data.get('pnl_percent', 0)
|
| 389 |
+
current_sl = trade_data.get('stop_loss', 0)
|
| 390 |
+
current_tp = trade_data.get('take_profit', 0)
|
| 391 |
+
|
| 392 |
+
# --- 2. البيانات الفنية المحدثة (الحالية) ---
|
| 393 |
+
current_price = current_data.get('current_price', 0)
|
| 394 |
+
mc_data = current_data.get('monte_carlo_distribution', {})
|
| 395 |
+
mc_prob = mc_data.get('probability_of_gain', 0)
|
| 396 |
+
mc_expected_return = mc_data.get('expected_return_pct', 0)
|
| 397 |
|
| 398 |
+
# --- 3. (V8.1) بيانات الأخبار المحدثة (الحالية) ---
|
| 399 |
+
latest_news_text = current_data.get('latest_news_text', 'No news.')
|
| 400 |
+
latest_news_score = current_data.get('latest_news_score', 0.0)
|
| 401 |
|
| 402 |
+
# --- 4. (العقل) بيانات التعلم الإحصائي ---
|
| 403 |
+
statistical_feedback = ""
|
| 404 |
+
if self.learning_hub:
|
| 405 |
+
statistical_feedback = await self.learning_hub.get_statistical_feedback_for_llm(original_strategy)
|
| 406 |
|
| 407 |
+
# --- 5. بناء أقسام الـ Prompt (الإنجليزية) ---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 408 |
|
| 409 |
+
playbook_prompt = f"""
|
| 410 |
+
--- START OF LEARNING PLAYBOOK ---
|
| 411 |
+
{learning_context}
|
| 412 |
+
{statistical_feedback}
|
| 413 |
+
--- END OF PLAYBOOK ---
|
| 414 |
+
"""
|
| 415 |
|
| 416 |
+
trade_status_prompt = f"""
|
| 417 |
+
1. **Open Trade Status ({symbol}):**
|
| 418 |
+
* Current PnL: {current_pnl:+.2f}%
|
| 419 |
+
* Original Strategy: {original_strategy}
|
| 420 |
+
* Original Reasoning: {original_reasoning}
|
| 421 |
+
* Entry Price: {entry_price}
|
| 422 |
+
* Current Price: {current_price}
|
| 423 |
+
* Current StopLoss: {current_sl}
|
| 424 |
+
* Current TakeProfit: {current_tp}
|
| 425 |
+
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 426 |
|
| 427 |
+
current_analysis_prompt = f"""
|
| 428 |
+
2. **Current Real-time Analysis:**
|
| 429 |
+
* Monte Carlo (1h): {mc_prob:.1f}% chance of profit (Expected: {mc_expected_return:.2f}%)
|
| 430 |
+
* Latest News (VADER: {latest_news_score:.3f}): {latest_news_text[:300]}...
|
| 431 |
+
"""
|
| 432 |
+
|
| 433 |
+
prompt_sections = [
|
| 434 |
+
f"SYSTEM: You are an expert AI trading analyst (Reflector Brain). An open trade for {symbol} has triggered a mandatory re-analysis. Analyze the new data and decide the next action.",
|
| 435 |
+
playbook_prompt,
|
| 436 |
+
"--- START OF TRADE DATA ---",
|
| 437 |
+
trade_status_prompt,
|
| 438 |
+
current_analysis_prompt,
|
| 439 |
+
"--- END OF TRADE DATA ---",
|
| 440 |
+
"""
|
| 441 |
+
TASK:
|
| 442 |
+
1. Analyze the "Open Trade Status" against the "Current Real-time Analysis".
|
| 443 |
+
2. Consult the "Playbook" for learned rules and statistical feedback.
|
| 444 |
+
3. Decide the best course of action:
|
| 445 |
+
* 'HOLD': Maintain the current strategy (if analysis is still valid or neutral).
|
| 446 |
+
* 'UPDATE_TRADE': Adjust StopLoss/TakeProfit (if risk/reward changed).
|
| 447 |
+
* 'CLOSE_TRADE': Immediately close the trade (if analysis is invalidated or high risk).
|
| 448 |
+
4. You MUST provide new SL/TP targets if action is 'UPDATE_TRADE'.
|
| 449 |
+
|
| 450 |
+
OUTPUT (JSON Object ONLY):
|
| 451 |
+
{
|
| 452 |
+
"action": "HOLD" or "UPDATE_TRADE" or "CLOSE_TRADE",
|
| 453 |
+
"strategy": "MAINTAIN_CURRENT" or "ADAPTIVE_EXIT" or "IMMEDIATE_EXIT",
|
| 454 |
+
"reasoning": "Brief justification (max 40 words) for the decision.",
|
| 455 |
+
"new_stop_loss": (float or null, required if action is 'UPDATE_TRADE'),
|
| 456 |
+
"new_take_profit": (float or null, required if action is 'UPDATE_TRADE')
|
| 457 |
+
}
|
| 458 |
+
"""
|
| 459 |
+
]
|
| 460 |
+
|
| 461 |
+
return "\n".join(prompt_sections)
|