Riy777 commited on
Commit
99c948e
·
1 Parent(s): d10d6ba

Update LLM.py

Browse files
Files changed (1) hide show
  1. LLM.py +422 -576
LLM.py CHANGED
@@ -1,615 +1,461 @@
1
- # LLM.py (Updated to V5.4 - Fixed Re-Analysis TP/SL Wipe Bug)
2
- import os, traceback, asyncio, json, time
3
- import re
4
  from datetime import datetime
5
- from functools import wraps
6
- from backoff import on_exception, expo
7
- from openai import OpenAI, RateLimitError, APITimeoutError
8
- import numpy as np
9
- from sentiment_news import NewsFetcher
10
- from helpers import validate_required_fields, format_technical_indicators, format_strategy_scores, format_candle_data_for_pattern_analysis, format_whale_analysis_for_llm, parse_json_from_response
11
- from ml_engine.processor import safe_json_parse
12
-
13
- NVIDIA_API_KEY = os.getenv("NVIDIA_API_KEY")
14
- PRIMARY_MODEL = "nvidia/llama-3.1-nemotron-ultra-253b-v1"
15
-
16
- # (PatternAnalysisEngine - لا تغيير)
17
- class PatternAnalysisEngine:
18
- def __init__(self, llm_service):
19
- self.llm = llm_service
20
- def _format_chart_data_for_llm(self, ohlcv_data):
21
- if not ohlcv_data: return "Insufficient chart data for pattern analysis"
22
- try:
23
- all_timeframes = []
24
- for timeframe, candles in ohlcv_data.items():
25
- if candles and len(candles) >= 10:
26
- raw_candle_summary = self._format_raw_candle_data(candles, timeframe)
27
- all_timeframes.append(f"=== {timeframe.upper()} TIMEFRAME ({len(candles)} CANDLES) ===\n{raw_candle_summary}")
28
- return "\n\n".join(all_timeframes) if all_timeframes else "No sufficient timeframe data available"
29
- except Exception as e: return f"Error formatting chart data: {str(e)}"
30
- def _format_raw_candle_data(self, candles, timeframe):
31
- try:
32
- if len(candles) < 10: return f"Only {len(candles)} candles available - insufficient for deep pattern analysis"
33
- analysis_candles = candles[-50:] if len(candles) > 50 else candles
34
- summary = []; summary.append(f"Total candles: {len(candles)} (showing last {len(analysis_candles)})"); summary.append("Recent candles (newest to oldest):")
35
- for i in range(min(15, len(analysis_candles))):
36
- idx = len(analysis_candles) - 1 - i; candle = analysis_candles[idx]
37
- try: timestamp = datetime.fromtimestamp(candle[0] / 1000).strftime('%Y-%m-%d %H:%M:%S')
38
- except: timestamp = "unknown"
39
- open_price, high, low, close, volume = candle[1], candle[2], candle[3], candle[4], candle[5]
40
- candle_type = "🟢 BULLISH" if close > open_price else "🔴 BEARISH" if close < open_price else "⚪ NEUTRAL"
41
- body_size = abs(close - open_price); body_percent = (body_size / open_price * 100) if open_price > 0 else 0
42
- wick_upper = high - max(open_price, close); wick_lower = min(open_price, close) - low; total_range = high - low
43
- if total_range > 0: body_ratio = (body_size / total_range) * 100; upper_wick_ratio = (wick_upper / total_range) * 100; lower_wick_ratio = (wick_lower / total_range) * 100
44
- else: body_ratio = upper_wick_ratio = lower_wick_ratio = 0
45
- summary.append(f"{i+1:2d}. {timestamp} | {candle_type}"); summary.append(f" O:{open_price:.8f} H:{high:.8f} L:{low:.8f} C:{close:.8f}"); summary.append(f" Body: {body_percent:.2f}% | Body/Range: {body_ratio:.1f}%"); summary.append(f" Wicks: Upper {upper_wick_ratio:.1f}% / Lower {lower_wick_ratio:.1f}%"); summary.append(f" Volume: {volume:,.0f}")
46
- if len(analysis_candles) >= 20:
47
- stats = self._calculate_candle_statistics(analysis_candles)
48
- summary.append(f"\n📊 STATISTICAL ANALYSIS:"); summary.append(f"• Price Change: {stats['price_change']:+.2f}%"); summary.append(f"• Average Body Size: {stats['avg_body']:.4f}%"); summary.append(f"• Volatility (ATR): {stats['atr']:.6f}"); summary.append(f"• Trend: {stats['trend']}"); summary.append(f"• Support: {stats['support']:.6f}"); summary.append(f"• Resistance: {stats['resistance']:.6f}")
49
- return "\n".join(summary)
50
- except Exception as e: return f"Error formatting raw candle data: {str(e)}"
51
- def _calculate_candle_statistics(self, candles):
52
- try:
53
- closes = [c[4] for c in candles]; opens = [c[1] for c in candles]; highs = [c[2] for c in candles]; lows = [c[3] for c in candles]
54
- first_close = closes[0]; last_close = closes[-1]; price_change = ((last_close - first_close) / first_close) * 100
55
- body_sizes = [abs(close - open) for open, close in zip(opens, closes)]; avg_body = (sum(body_sizes) / len(body_sizes)) / first_close * 100 if first_close > 0 else 0
56
- true_ranges = [];
57
- for i in range(1, len(candles)): high, low, prev_close = highs[i], lows[i], closes[i-1]; tr1 = high - low; tr2 = abs(high - prev_close); tr3 = abs(low - prev_close); true_ranges.append(max(tr1, tr2, tr3))
58
- atr = sum(true_ranges) / len(true_ranges) if true_ranges else 0
59
- if price_change > 3: trend = "STRONG UPTREND"
60
- elif price_change > 1: trend = "UPTREND"
61
- elif price_change < -3: trend = "STRONG DOWNTREND"
62
- elif price_change < -1: trend = "DOWNTREND"
63
- else: trend = "SIDEWAYS"
64
- support = min(lows); resistance = max(highs)
65
- return {'price_change': price_change, 'avg_body': avg_body, 'atr': atr, 'trend': trend, 'support': support, 'resistance': resistance}
66
- except Exception as e: return {'price_change': 0, 'avg_body': 0, 'atr': 0, 'trend': 'UNKNOWN', 'support': 0, 'resistance': 0}
67
- async def analyze_chart_patterns(self, symbol, ohlcv_data): pass
68
- def _parse_pattern_response(self, response_text): pass
69
-
70
 
71
  class LLMService:
72
- def __init__(self, api_key=NVIDIA_API_KEY, model_name=PRIMARY_MODEL, temperature=0.7):
73
- self.api_key = api_key
74
- self.model_name = model_name
75
- self.temperature = temperature
76
- self.client = OpenAI(base_url="https://integrate.api.nvidia.com/v1", api_key=self.api_key)
77
- self.news_fetcher = NewsFetcher()
78
- self.pattern_engine = PatternAnalysisEngine(self)
79
- self.semaphore = asyncio.Semaphore(5)
80
- self.r2_service = None
81
- self.learning_hub = None
82
-
83
- def _rate_limit_nvidia_api(func):
84
- @wraps(func)
85
- @on_exception(expo, RateLimitError, max_tries=5)
86
- async def wrapper(*args, **kwargs):
87
- return await func(*args, **kwargs)
88
- return wrapper
89
-
90
- async def get_trading_decision(self, data_payload: dict):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
  try:
92
- symbol = data_payload.get('symbol', 'unknown')
93
- target_strategy = data_payload.get('target_strategy', 'GENERIC')
94
-
95
- ohlcv_data = data_payload.get('raw_ohlcv') or data_payload.get('ohlcv')
96
- if not ohlcv_data: return None
97
- total_candles = sum(len(data) for data in ohlcv_data.values() if data) if ohlcv_data else 0
98
- if total_candles < 30: return None
99
-
100
- news_text = await self.news_fetcher.get_news_for_symbol(symbol)
101
- whale_data = data_payload.get('whale_data', {})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
 
103
- statistical_feedback = "No statistical learning data yet."
104
- active_context_playbook = "No active learning rules available."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
 
106
- if self.learning_hub and self.learning_hub.initialized:
107
- statistical_feedback = await self.learning_hub.get_statistical_feedback_for_llm(target_strategy)
108
- active_context_playbook = await self.learning_hub.get_active_context_for_llm(
109
- domain="strategy", query=f"{target_strategy} {symbol}"
 
 
 
 
 
 
 
 
 
 
 
 
 
110
  )
111
-
112
- prompt = self._create_comprehensive_sentry_prompt(
113
- data_payload, news_text, None, whale_data,
114
- statistical_feedback, active_context_playbook
115
- )
116
 
 
 
 
 
117
  if self.r2_service:
118
- analysis_data = { 'symbol': symbol, 'target_strategy': target_strategy, 'statistical_feedback': statistical_feedback, 'active_context_playbook': active_context_playbook }
119
- await self.r2_service.save_llm_prompts_async(
120
- symbol, 'sentry_watchlist_decision_v5', prompt, analysis_data
121
- )
122
 
123
- async with self.semaphore:
124
- response = await self._call_llm(prompt)
125
 
126
- decision_dict = self._parse_llm_response_enhanced(response, target_strategy, symbol)
 
 
 
 
 
127
 
128
- if decision_dict:
129
- if decision_dict.get('action') == 'WATCH' and 'strategy_to_watch' not in decision_dict:
130
- print(f" ⚠️ LLM {symbol}: Action is WATCH but strategy_to_watch is missing. Forcing HOLD.")
131
- decision_dict['action'] = 'HOLD'
132
-
133
- decision_dict['model_source'] = self.model_name
134
- decision_dict['whale_data_integrated'] = whale_data.get('data_available', False)
135
- return decision_dict
136
- else:
137
- print(f"❌ LLM parsing failed for {symbol} - no fallback decisions")
138
- return None
139
 
140
  except Exception as e:
141
- print(f"❌ Error in get_trading_decision for {data_payload.get('symbol', 'unknown')}: {e}"); traceback.print_exc()
142
- return None
143
-
144
- def _parse_llm_response_enhanced(self, response_text: str, fallback_strategy: str, symbol: str) -> dict:
 
 
 
 
 
 
145
  try:
146
- json_str = parse_json_from_response(response_text)
147
- if not json_str:
148
- print(f"❌ Failed to extract JSON from LLM response for {symbol}")
149
- return None
150
-
151
- decision_data = safe_json_parse(json_str)
152
- if not decision_data:
153
- print(f"❌ Failed to parse JSON (safe_json_parse) for {symbol}: {response_text}")
154
- return None
155
-
156
- if fallback_strategy == "reflection" or fallback_strategy == "distillation":
157
- return decision_data
158
 
159
- required_fields = ['action', 'reasoning', 'confidence_level', 'pattern_identified_by_llm']
160
-
161
- if decision_data.get('action') == 'WATCH':
162
- required_fields.append('strategy_to_watch')
163
- elif decision_data.get('action') == 'BUY': # (احتياطي للنظام القديم)
164
- required_fields.extend(['risk_assessment', 'stop_loss', 'take_profit', 'expected_target_minutes', 'exit_profile', 'exit_parameters'])
165
-
166
- if not validate_required_fields(decision_data, required_fields):
167
- print(f"❌ Missing required fields in LLM response for {symbol}")
168
- missing = [f for f in required_fields if f not in decision_data]
169
- print(f" MIA: {missing}")
170
- return None
171
 
172
- action = decision_data.get('action')
173
- if action not in ['WATCH', 'HOLD']:
174
- # (السماح بـ 'BUY' كإجراء احتياطي إذا فشل النموذج في فهم 'WATCH')
175
- if action == 'BUY':
176
- print(f"⚠️ LLM {symbol} returned 'BUY' instead of 'WATCH'. Converting to 'WATCH'...")
177
- decision_data['action'] = 'WATCH'
178
- decision_data['strategy_to_watch'] = decision_data.get('strategy', fallback_strategy)
179
- else:
180
- print(f"⚠️ LLM suggested unsupported action ({action}) for {symbol}. Forcing HOLD.")
181
- decision_data['action'] = 'HOLD'
182
 
183
- if decision_data.get('action') == 'BUY': # (معالجة إضافية للحالة الاحتياطية)
184
- decision_data['trade_type'] = 'LONG'
185
- else:
186
- decision_data['trade_type'] = None
187
 
188
- # (تعديل: استخدام 'strategy_to_watch' بدلاً من 'strategy')
189
- strategy_value = decision_data.get('strategy_to_watch') if decision_data.get('action') == 'WATCH' else decision_data.get('strategy')
190
- if not strategy_value or strategy_value == 'unknown':
191
- decision_data['strategy'] = fallback_strategy
192
- if decision_data.get('action') == 'WATCH':
193
- decision_data['strategy_to_watch'] = fallback_strategy
194
 
195
- return decision_data
196
- except Exception as e:
197
- print(f"❌ Error parsing LLM response for {symbol}: {e}")
198
- return None
 
 
 
 
199
 
200
- async def _get_pattern_analysis(self, data_payload):
201
- try:
202
- symbol = data_payload['symbol']
203
- ohlcv_data = data_payload.get('raw_ohlcv') or data_payload.get('ohlcv')
204
- if ohlcv_data: return None
205
- return None
206
  except Exception as e:
207
- print(f"❌ Pattern analysis failed for {data_payload.get('symbol')}: {e}")
208
- return None
209
-
210
- def _create_comprehensive_sentry_prompt(
211
- self,
212
- payload: dict,
213
- news_text: str,
214
- pattern_analysis: dict,
215
- whale_data: dict,
216
- statistical_feedback: str,
217
- active_context_playbook: str
218
- ) -> str:
 
 
 
 
219
 
220
- symbol = payload.get('symbol', 'N/A')
221
- current_price = payload.get('current_price', 'N/A')
 
 
 
222
 
223
- price_change_24h_raw = payload.get('price_change_24h', 0)
224
- price_change_24h_display = f"{price_change_24h_raw:+.2f}%" if isinstance(price_change_24h_raw, (int, float)) else "N/A"
 
 
 
225
 
226
- reasons = payload.get('reasons_for_candidacy', [])
227
- sentiment_data = payload.get('sentiment_data', {})
228
- advanced_indicators = payload.get('advanced_indicators', {})
229
- strategy_scores = payload.get('strategy_scores', {})
230
- recommended_strategy = payload.get('recommended_strategy', 'N/A')
231
- target_strategy = payload.get('target_strategy', 'GENERIC')
232
- enhanced_final_score = payload.get('enhanced_final_score', 0)
233
- enhanced_score_display = f"{enhanced_final_score:.3f}" if isinstance(enhanced_final_score, (int, float)) else str(enhanced_final_score)
234
-
235
- indicators_summary = format_technical_indicators(advanced_indicators)
236
- strategies_summary = format_strategy_scores(strategy_scores, recommended_strategy)
237
- whale_analysis_section = format_whale_analysis_for_llm(whale_data)
238
- ohlcv_data = payload.get('raw_ohlcv') or payload.get('ohlcv', {})
239
- candle_data_section = self._format_candle_data_comprehensive(ohlcv_data)
240
- market_context_section = self._format_market_context(sentiment_data)
241
 
242
- statistical_feedback_section = f"🧠 STATISTICAL FEEDBACK (Slow-Learner):\n{statistical_feedback}"
243
- playbook_section = f"📚 LEARNING PLAYBOOK (Fast-Learner Active Rules):\n{active_context_playbook}"
 
 
 
 
 
 
 
 
 
 
 
 
 
244
 
245
- exhaustion_warning = ""
246
- try:
247
- rsi_1d = advanced_indicators.get('1d', {}).get('rsi', 50)
248
- rsi_4h = advanced_indicators.get('4h', {}).get('rsi', 50)
249
- if price_change_24h_raw > 40 and (rsi_1d > 75 or rsi_4h > 75):
250
- exhaustion_warning = (
251
- "🚩 **تنبيه استراتيجي: تم رصد زخم مرتفع (احتمال إرهاق)** 🚩\n"
252
- f"الأصل مرتفع {price_change_24h_display} خلال 24 ساعة ومؤشر RSI على 1D/4H في منطقة تشبع شرائي.\n"
253
- "هذا ليس أمر 'إيقاف'، بل هو 'تحدي تحليلي'. مهمتك هي التحقيق وتحديد ما إذا كان هذا:\n"
254
- "1. **فخ إرهاق (Exhaustion Trap):** (يجب 'HOLD')\n"
255
- "2. **اختراق استمراري حقيقي (Sustainable Continuation):** (يمكن 'WATCH')\n"
256
- "------------------------------------------------------------------"
257
- )
258
- except Exception:
259
- pass
260
-
261
-
262
- prompt = f"""
263
- COMPREHENSIVE STRATEGIC ANALYSIS FOR {symbol} (FOR SENTRY WATCHLIST)
264
-
265
- 🚨 IMPORTANT: You are a STRATEGIC EXPLORER. Your job is NOT to execute a trade. Your job is to decide if this asset is interesting enough to be passed to the "SENTRY" (a high-speed tactical agent) for real-time monitoring and execution.
266
-
267
- {exhaustion_warning}
268
-
269
- 🎯 STRATEGY CONTEXT:
270
- * Target Strategy: {target_strategy}
271
- * Recommended Strategy (from ML): {recommended_strategy}
272
- * Current Price: ${current_price}
273
- * 24H Price Change: {price_change_24h_display}
274
- * Enhanced System Score: {enhanced_score_display}
275
-
276
- --- LEARNING HUB INPUT (CRITICAL) ---
277
- {playbook_section}
278
- {statistical_feedback_section}
279
- --- END OF LEARNING INPUT ---
280
-
281
- 📊 TECHNICAL INDICATORS (ALL TIMEFRAMES):
282
- {indicators_summary}
283
-
284
- 📈 RAW CANDLE DATA SUMMARY & STATISTICS (FOR YOUR PATTERN ANALYSIS):
285
- {candle_data_section}
286
- {chr(10)}--- END OF CANDLE DATA ---{chr(10)}
287
-
288
- 🎯 STRATEGY ANALYSIS (System's recommendation based on various factors):
289
- {strategies_summary}
290
-
291
- 🐋 WHALE ACTIVITY ANALYSIS:
292
- {whale_analysis_section}
293
-
294
- 🌍 MARKET CONTEXT:
295
- {market_context_section if market_context_section and "No market context" not in market_context_section else "Market context data not available for this analysis."}
296
-
297
- ---
298
- 🎯 SENTRY DECISION INSTRUCTIONS (WATCH or HOLD):
299
-
300
- 1. **PERFORM CHART PATTERN ANALYSIS:** Based *ONLY* on the provided 'RAW CANDLE DATA SUMMARY & STATISTICS', identify relevant patterns.
301
- 2. **[CRITICAL] INVESTIGATE THE STRATEGIC ALERT (if present):**
302
- * ** للتحقق من الاستمرارية (Continuation):** هل الارتفاع مدعوم بـ 'volume_ratio' عالي (موجود في المؤشرات)؟ هل هو اختراق واضح لنمط تجميعي (مثل 'Bull Flag' أو 'Consolidation Breakout')؟ هل الشموع قوية (أجسام كبيرة)؟
303
- * ** للتحقق من الإرهاق (Exhaustion):** هل ترى 'Bearish Divergence' (السعر يصنع قمة جديدة بينما RSI/MACD لا يفعل)؟ هل يضعف 'volume_ratio' مع الصعود؟ هل تظهر شموع انعكاسية (Doji, Shooting Star) على 4H/1D؟
304
- 3. **INTEGRATE ALL DATA:** ادمج 'تحقيقك' مع باقي البيانات (Learning Hub, Whale Activity).
305
- 4. **DECIDE ACTION (WATCH or HOLD):**
306
- * **WATCH:** فقط إذا أكد تحقيقك أنها 'Sustainable Continuation' ولديك ثقة عالية (>= 0.75).
307
- * **HOLD:** إذا أظهر تحقيقك أنها 'Exhaustion Trap'، أو إذا كان الوضع غير واضح ومحفوف بالمخاطر.
308
- 5. **DEFINE STRATEGY:** If (and only if) action is 'WATCH', you MUST specify which strategy the Sentry should use (e.g., 'breakout_momentum', 'mean_reversion').
309
- 6. **SELF-CRITIQUE:** Justify your decision. Why is this strong enough for the Sentry?
310
- 7. **[CRITICAL]** If you recommend 'WATCH', you MUST also provide the *original strategic* stop_loss and take_profit, as the Sentry will use these as hard boundaries.
311
-
312
- OUTPUT FORMAT (JSON - SENTRY DECISION):
313
- {{
314
- "action": "WATCH/HOLD",
315
- "reasoning": "Detailed explanation integrating ALL data sources. If the Strategic Alert was present, *explicitly state your investigation findings* (e.g., 'I confirmed this is continuation because volume is increasing and a bull flag is forming on 1H...')",
316
- "pattern_identified_by_llm": "Name of the primary pattern(s) identified (e.g., 'Bull Flag on 1H', 'No Clear Pattern')",
317
-
318
- "confidence_level": 0.85,
319
- "strategy_to_watch": "breakout_momentum",
320
-
321
- "stop_loss": 0.000000,
322
- "take_profit": 0.000000,
323
- "exit_profile": "ATR_TRAILING",
324
- "exit_parameters": {{ "atr_multiplier": 2.0, "atr_period": 14, "break_even_trigger_percent": 1.5 }},
325
-
326
- "self_critique": {{
327
- "failure_modes": [
328
- "What is the first reason this 'WATCH' decision could fail? (e.g., 'The identified pattern is a false breakout.')",
329
- "What is the second reason? (e.g., 'The Sentry might enter too late.')"
330
- ],
331
- "confidence_adjustment_reason": "Brief reason if confidence was adjusted post-critique."
332
- }}
333
- }}
334
  """
335
- return prompt
336
-
337
- def _format_candle_data_comprehensive(self, ohlcv_data):
338
- if not ohlcv_data: return "No raw candle data available for analysis"
339
- try:
340
- timeframes_available = []; total_candles = 0
341
- for timeframe, candles in ohlcv_data.items():
342
- if candles and len(candles) >= 5: timeframes_available.append(f"{timeframe.upper()} ({len(candles)} candles)"); total_candles += len(candles)
343
- if not timeframes_available: return "Insufficient candle data across all timeframes"
344
- summary = f"📊 Available Timeframes: {', '.join(timeframes_available)}\n"
345
- summary += f"📈 Total Candles Available: {total_candles}\n\n"
346
- raw_candle_analysis_text = self.pattern_engine._format_chart_data_for_llm(ohlcv_data)
347
- summary += raw_candle_analysis_text
348
- return summary
349
- except Exception as e: return f"Error formatting raw candle data: {str(e)}"
350
 
351
- def _analyze_timeframe_candles(self, candles, timeframe):
352
- # (دالة مساعدة - لا تغيير)
353
- return "" # (تم اختصارها)
354
-
355
- def _format_market_context(self, sentiment_data):
356
- if not sentiment_data or sentiment_data.get('data_quality', 'LOW') == 'LOW': return "Market context data not available or incomplete."
357
- btc_sentiment = sentiment_data.get('btc_sentiment', 'N/A'); fear_greed = sentiment_data.get('fear_and_greed_index', 'N/A'); market_trend = sentiment_data.get('market_trend', 'N/A')
358
- lines = [f" Bitcoin Sentiment: {btc_sentiment}", f"• Fear & Greed Index: {fear_greed} ({sentiment_data.get('sentiment_class', 'Neutral')})", f"• Overall Market Trend: {market_trend.replace('_', ' ').title() if isinstance(market_trend, str) else 'N/A'}"]
359
- general_whale = sentiment_data.get('general_whale_activity', {});
360
- if general_whale and general_whale.get('sentiment') != 'NEUTRAL':
361
- whale_sentiment = general_whale.get('sentiment', 'N/A'); critical_alert = general_whale.get('critical_alert', False)
362
- lines.append(f"• General Whale Sentiment: {whale_sentiment.replace('_', ' ').title() if isinstance(whale_sentiment, str) else 'N/A'}");
363
- if critical_alert: lines.append(" ⚠️ CRITICAL WHALE ALERT ACTIVE")
364
- return "\n".join(lines)
365
-
366
-
367
- async def re_analyze_trade_async(self, trade_data: dict, processed_data: dict):
368
- try:
369
- symbol = trade_data['symbol']; original_strategy = trade_data.get('strategy', 'GENERIC')
370
- ohlcv_data = processed_data.get('raw_ohlcv') or processed_data.get('ohlcv')
371
- if not ohlcv_data: return None
372
- news_text = await self.news_fetcher.get_news_for_symbol(symbol)
373
- pattern_analysis = await self._get_pattern_analysis(processed_data)
374
- whale_data = processed_data.get('whale_data', {})
375
-
376
- statistical_feedback = "No statistical learning data yet."
377
- active_context_playbook = "No active learning rules available."
378
- if self.learning_hub and self.learning_hub.initialized:
379
- statistical_feedback = await self.learning_hub.get_statistical_feedback_for_llm(original_strategy)
380
- active_context_playbook = await self.learning_hub.get_active_context_for_llm(
381
- domain="strategy", query=f"{original_strategy} {symbol} re-analysis"
382
- )
383
-
384
- prompt = self._create_re_analysis_prompt(
385
- trade_data, processed_data, news_text, pattern_analysis,
386
- whale_data, statistical_feedback, active_context_playbook
387
- )
388
-
389
- if self.r2_service:
390
- analysis_data = { 'symbol': symbol, 'original_strategy': original_strategy }
391
- await self.r2_service.save_llm_prompts_async(
392
- symbol, 'trade_reanalysis_v5_hub', prompt, analysis_data
393
- )
394
-
395
- async with self.semaphore:
396
- response = await self._call_llm(prompt)
397
 
398
- # 🔴 --- START OF CHANGE (V5.4) --- 🔴
399
- # (تمرير كائن الصفقة الأصلي بالكامل لنسخ القيم القديمة إذا لزم الأمر)
400
- re_analysis_dict = self._parse_re_analysis_response(response, original_strategy, symbol, trade_data)
401
- # 🔴 --- END OF CHANGE --- 🔴
402
-
403
- if re_analysis_dict:
404
- re_analysis_dict['model_source'] = self.model_name
405
- return re_analysis_dict
406
- else:
407
- print(f"❌ LLM re-analysis parsing failed for {symbol}")
408
- return None
409
- except Exception as e:
410
- print(f"❌ Error in LLM re-analysis: {e}"); traceback.print_exc()
411
- return None
412
 
413
- # 🔴 --- START OF CHANGE (V5.4) --- 🔴
414
- def _parse_re_analysis_response(self, response_text: str, fallback_strategy: str, symbol: str, original_trade: dict) -> dict:
415
- """(محدث V5.4) يضمن عدم مسح قيم SL/TP أبداً."""
416
- try:
417
- json_str = parse_json_from_response(response_text)
418
- if not json_str: return None
419
- decision_data = safe_json_parse(json_str)
420
- if not decision_data: print(f"❌ Failed to parse JSON (safe_json_parse) for re-analysis of {symbol}: {response_text}"); return None
421
-
422
- action = decision_data.get('action')
423
- if action not in ['HOLD', 'CLOSE_TRADE', 'UPDATE_TRADE']:
424
- print(f"⚠️ LLM suggested unsupported re-analysis action ({action}) for {symbol}. Forcing HOLD.")
425
- decision_data['action'] = 'HOLD'
426
-
427
- # (منطق التحقق الجديد V5.4)
428
- if action == 'UPDATE_TRADE':
429
- required_update_fields = ['new_stop_loss', 'new_take_profit', 'new_exit_profile', 'new_exit_parameters']
430
- if not validate_required_fields(decision_data, required_update_fields):
431
- print(f"❌ Missing required fields for UPDATE_TRADE for {symbol}"); decision_data['action'] = 'HOLD'
432
- elif not isinstance(decision_data['new_exit_parameters'], dict):
433
- print(f"❌ 'new_exit_parameters' is not a valid dict for {symbol}"); decision_data['action'] = 'HOLD'
434
-
435
- # (آلية الحماية من المسح)
436
- if action in ['HOLD', 'UPDATE_TRADE']:
437
- # التحقق من new_stop_loss
438
- new_sl = decision_data.get('new_stop_loss')
439
- if not isinstance(new_sl, (int, float)) or new_sl <= 0:
440
- print(f"⚠️ LLM Re-Analysis {symbol}: new_stop_loss is invalid ({new_sl}). Reverting to original SL.")
441
- decision_data['new_stop_loss'] = original_trade.get('stop_loss')
442
-
443
- # التحقق من new_take_profit
444
- new_tp = decision_data.get('new_take_profit')
445
- if not isinstance(new_tp, (int, float)) or new_tp <= 0:
446
- print(f"⚠️ LLM Re-Analysis {symbol}: new_take_profit is invalid ({new_tp}). Reverting to original TP.")
447
- decision_data['new_take_profit'] = original_trade.get('take_profit')
448
-
449
- strategy_value = decision_data.get('strategy')
450
- if not strategy_value or strategy_value == 'unknown':
451
- decision_data['strategy'] = fallback_strategy
452
-
453
- return decision_data
454
- except Exception as e:
455
- print(f"Error parsing re-analysis response for {symbol}: {e}")
456
- return None
457
- # 🔴 --- END OF CHANGE --- 🔴
458
-
459
- def _create_re_analysis_prompt(
460
- self,
461
- trade_data: dict, processed_data: dict, news_text: str,
462
- pattern_analysis: dict, whale_data: dict,
463
- statistical_feedback: str, active_context_playbook: str
464
- ) -> str:
465
 
466
- symbol = trade_data.get('symbol', 'N/A'); entry_price = trade_data.get('entry_price', 'N/A'); current_price = processed_data.get('current_price', 'N/A'); strategy = trade_data.get('strategy', 'GENERIC'); current_exit_profile = trade_data.get('decision_data', {}).get('exit_profile', 'N/A'); current_exit_params = json.dumps(trade_data.get('decision_data', {}).get('exit_parameters', {}))
467
 
468
- # 🔴 --- START OF CHANGE (V5.4) --- 🔴
469
- # (تمرير الأهداف الحالية إلى النموذج)
470
- current_sl = trade_data.get('stop_loss', 'N/A')
471
- current_tp = trade_data.get('take_profit', 'N/A')
472
- # 🔴 --- END OF CHANGE --- 🔴
473
-
474
- statistical_feedback_section = f"🧠 STATISTICAL FEEDBACK (Slow-Learner):\n{statistical_feedback}"; playbook_section = f"📚 LEARNING PLAYBOOK (Fast-Learner Active Rules):\n{active_context_playbook}"
475
- try: price_change = ((current_price - entry_price) / entry_price) * 100 if entry_price else 0; price_change_display = f"{price_change:+.2f}%"
476
- except (TypeError, ZeroDivisionError): price_change_display = "N/A"
 
 
 
 
477
 
478
- price_change_24h_raw = processed_data.get('price_change_24h', 0)
479
- price_change_24h_display = f"{price_change_24h_raw:+.2f}%" if isinstance(price_change_24h_raw, (int, float)) else "N/A"
 
480
 
481
- indicators_summary = format_technical_indicators(processed_data.get('advanced_indicators', {})); pattern_summary = self._format_pattern_analysis(pattern_analysis) if pattern_analysis else "Pattern analysis data not available for re-analysis."; whale_analysis_section = format_whale_analysis_for_llm(whale_data); market_context_section = self._format_market_context(processed_data.get('sentiment_data', {})); ohlcv_data = processed_data.get('raw_ohlcv') or processed_data.get('ohlcv', {}); candle_data_section = self._format_candle_data_comprehensive(ohlcv_data)
 
 
 
482
 
483
- exhaustion_warning = ""
484
- try:
485
- rsi_1d = processed_data.get('advanced_indicators', {}).get('1d', {}).get('rsi', 50)
486
- if price_change_24h_raw > 40 and rsi_1d > 75:
487
- exhaustion_warning = (
488
- "🚩 **تنبيه استراتيجي: تم رصد زخم مرتفع (احتمال إرهاق)** 🚩\n"
489
- f"الأصل مرتفع {price_change_24h_display} خلال 24 ساعة ومؤشر RSI على 1D/4H في منطقة تشبع شرائي.\n"
490
- "هذا يزيد من خطورة الاستمرار في الصفقة. قم بالتحقيق في قوة الاتجاه الحالية.\n"
491
- "هل بدأ الحجم (Volume) يضعف؟ هل تظهر إشارات انعكاس على 1H/4H؟\n"
492
- "------------------------------------------------------------------"
493
- )
494
- except Exception:
495
- pass
496
-
497
- # 🔴 --- START OF CHANGE (V5.4) --- 🔴
498
- prompt = f"""
499
- TRADE RE-ANALYSIS FOR {symbol} (SPOT ONLY - Currently Open LONG Position)
500
-
501
- {exhaustion_warning}
502
-
503
- 📊 CURRENT TRADE CONTEXT:
504
- * Strategy: {strategy}
505
- * Entry Price: {entry_price} (LONG position)
506
- * Current Price: {current_price}
507
- * 24H Price Change: {price_change_24h_display}
508
- * Current Performance: {price_change_display}
509
- * Current Exit Profile: {current_exit_profile}
510
- * CURRENT Stop Loss: {current_sl}
511
- * CURRENT Take Profit: {current_tp}
512
-
513
- --- LEARNING HUB INPUT (CRITICAL) ---
514
- {playbook_section}
515
- {statistical_feedback_section}
516
- --- END OF LEARNING INPUT ---
517
-
518
- 🔄 UPDATED TECHNICAL ANALYSIS:
519
- {indicators_summary}
520
-
521
- 📈 UPDATED RAW CANDLE DATA SUMMARY & STATISTICS:
522
- {candle_data_section}
523
- {chr(10)}--- END OF CANDLE DATA ---{chr(10)}
524
-
525
- 🐋 UPDATED WHALE ACTIVITY:
526
- {whale_analysis_section}
527
-
528
- 🌍 UPDATED MARKET CONTEXT:
529
- {market_context_section if market_context_section and "No market context" not in market_context_section else "Market context data not available for this re-analysis."}
530
-
531
- ---
532
- 🎯 RE-ANALYSIS INSTRUCTIONS (SPOT - LONG POSITION):
533
-
534
- 1. **Analyze the Data:** Review all new data (Indicators, Candles, Whale, Market).
535
- 2. **Evaluate Current State:** Is the original reason for entry still valid? Has the risk changed?
536
- 3. **Investigate Alert (if present):** If the 'Exhaustion Alert' is active, determine if the trend is weakening (exit) or just consolidating (hold).
537
- 4. **Decide Action (HOLD, CLOSE_TRADE, or UPDATE_TRADE):**
538
- * **HOLD:** The trade is still valid. The current strategy/targets are fine.
539
- * **CLOSE_TRADE:** The trade is invalidated (e.g., trend reversal, risk too high).
540
- * **UPDATE_TRADE:** The trade is valid, but the exit parameters need adjustment (e.g., raise stop loss to lock profit, or change exit profile).
541
-
542
- **[CRITICAL OUTPUT RULES - YOU MUST FOLLOW THESE]:**
543
- 1. You **MUST** return one of three actions: `HOLD`, `CLOSE_TRADE`, or `UPDATE_TRADE`.
544
- 2. If `action` is `HOLD` or `UPDATE_TRADE`, you **MUST** provide valid (non-zero) numeric values for `new_stop_loss` and `new_take_profit`.
545
- 3. **If `action` is `HOLD` and you do not want to change the targets,** you **MUST** return the *CURRENT* values (Current SL: {current_sl}, Current TP: {current_tp}) in the `new_stop_loss` and `new_take_profit` fields.
546
- 4. If `action` is `CLOSE_TRADE`, the values for targets/exit profile are irrelevant.
547
-
548
- OUTPUT FORMAT (JSON - SPOT RE-ANALYSIS):
549
- {{
550
- "action": "HOLD/CLOSE_TRADE/UPDATE_TRADE",
551
- "reasoning": "Comprehensive justification. If 'HOLD' or 'UPDATE', justify the new (or existing) SL/TP values.",
552
- "new_stop_loss": 0.000000,
553
- "new_take_profit": 0.000000,
554
- "new_exit_profile": "The new exit profile (or the existing one if HOLD)",
555
- "new_exit_parameters": {{ "example_key": "example_value" }},
556
- "new_expected_minutes": 15,
557
- "confidence_level": 0.85,
558
- "strategy": "{strategy}",
559
- "self_critique": {{
560
- "failure_modes": ["Primary risk of this new decision?", "Second risk?"],
561
- "confidence_adjustment_reason": "Brief reason if confidence was adjusted."
562
- }}
563
- }}
564
- """
565
- # 🔴 --- END OF CHANGE --- 🔴
566
- return prompt
567
 
568
- def _format_pattern_analysis(self, pattern_analysis):
569
- if not pattern_analysis or not pattern_analysis.get('pattern_detected') or pattern_analysis.get('pattern_detected') == 'no_clear_pattern': return "No clear chart pattern detected by the system."
570
- pattern = pattern_analysis.get('pattern_detected', 'N/A'); confidence = pattern_analysis.get('pattern_confidence', 0); direction = pattern_analysis.get('predicted_direction', 'N/A'); timeframe = pattern_analysis.get('timeframe', 'N/A'); tf_display = f"on {timeframe} timeframe" if timeframe != 'N/A' else ""
571
- return f"System Pattern Analysis: Detected '{pattern}' {tf_display} with {confidence:.2f} confidence. Predicted direction: {direction}."
 
 
572
 
573
- @_rate_limit_nvidia_api
574
- async def _call_llm(self, prompt: str) -> str:
575
- try:
576
- for attempt in range(2):
577
- try:
578
- response = self.client.chat.completions.create(
579
- model=self.model_name,
580
- messages=[{"role": "user", "content": prompt}],
581
- temperature=self.temperature,
582
- seed=int(time.time()),
583
- max_tokens=4000
584
- )
585
-
586
- content = None
587
- if response.choices and response.choices[0].message:
588
- content = response.choices[0].message.content
589
-
590
- if content and '{' in content and '}' in content:
591
- return content
592
- else:
593
- if content is None:
594
- print(f"⚠️ LLM returned NO content (None) (attempt {attempt+1}). Check content filters or API status.")
595
- else:
596
- print(f"⚠️ LLM returned invalid content (not JSON) (attempt {attempt+1}): {content[:100]}...")
597
-
598
- if attempt == 0: await asyncio.sleep(1)
599
-
600
- except (RateLimitError, APITimeoutError) as e:
601
- print(f"❌ LLM API Error (Rate Limit/Timeout): {e}. Retrying via backoff...")
602
- raise
603
- except Exception as e:
604
- print(f"❌ Unexpected LLM API error (attempt {attempt+1}): {e}")
605
- if attempt == 0: await asyncio.sleep(2)
606
- elif attempt == 1: raise
607
-
608
- print("❌ LLM failed to return valid content after retries.")
609
- return ""
610
-
611
- except Exception as e:
612
- print(f"❌ Final failure in _call_llm after backoff retries: {e}")
613
- raise
614
 
615
- print("✅ LLM Service loaded - V5.4 (Fixed Re-Analysis TP/SL Wipe Bug)")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # LLM.py (V18.1 - 24h Accumulation Analysis)
2
+ import os, traceback, json, time, re
3
+ import httpx
4
  from datetime import datetime
5
+ from typing import List, Dict, Any, Optional
6
+
7
+ try:
8
+ from r2 import R2Service
9
+ from learning_hub.hub_manager import LearningHubManager # (استيراد العقل)
10
+ except ImportError:
11
+ print("❌ [LLMService] فشل استيراد R2Service أو LearningHubManager")
12
+ R2Service = None
13
+ LearningHubManager = None
14
+
15
+ # (V8.1) استيراد NewsFetcher
16
+ try:
17
+ from sentiment_news import NewsFetcher
18
+ except ImportError:
19
+ NewsFetcher = None
20
+
21
+ # إعدادات النموذج
22
+ LLM_API_URL = os.getenv("LLM_API_URL", "https://api.groq.com/openai/v1/chat/completions")
23
+ LLM_API_KEY = os.getenv("LLM_API_KEY")
24
+ LLM_MODEL = os.getenv("LLM_MODEL", "llama3-70b-8192")
25
+
26
+ # إعدادات العميل
27
+ # (زيادة المهلة إلى 5 دقائق للتحليلات المعقدة)
28
+ CLIENT_TIMEOUT = 300.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
  class LLMService:
31
+ def __init__(self):
32
+ if not LLM_API_KEY:
33
+ raise ValueError("❌ [LLMService] متغير بيئة LLM_API_KEY غير موجود!")
34
+
35
+ self.headers = {
36
+ "Authorization": f"Bearer {LLM_API_KEY}",
37
+ "Content-Type": "application/json"
38
+ }
39
+ self.http_client = httpx.AsyncClient(timeout=CLIENT_TIMEOUT)
40
+
41
+ # --- (الربط بالخدمات الأخرى) ---
42
+ self.r2_service: Optional[R2Service] = None
43
+ self.learning_hub: Optional[LearningHubManager] = None
44
+
45
+ # (V8.1) إضافة NewsFetcher (للاستخدام في إعادة التحليل)
46
+ self.news_fetcher: Optional[NewsFetcher] = None
47
+
48
+ print(f"✅ [LLMService] مهيأ. النموذج: {LLM_MODEL}")
49
+
50
+ async def _call_llm(self, prompt: str, temperature: float = 0.2, max_tokens: int = 1500) -> Optional[str]:
51
+ """
52
+ (محدث) إجراء استدعاء API للنموذج الضخم (باللغة الإنجليزية).
53
+ """
54
+ payload = {
55
+ "model": LLM_MODEL,
56
+ "messages": [
57
+ {"role": "system", "content": "You are an expert AI trading analyst. You MUST respond ONLY with the requested JSON object, with no introductory text, markdown formatting (like ```json), or explanations."},
58
+ {"role": "user", "content": prompt}
59
+ ],
60
+ "temperature": temperature,
61
+ "max_tokens": max_tokens,
62
+ "top_p": 1,
63
+ "stream": False,
64
+ "response_format": {"type": "json_object"}
65
+ }
66
+
67
  try:
68
+ response = await self.http_client.post(LLM_API_URL, headers=self.headers, json=payload)
69
+ response.raise_for_status() # Check for HTTP 4xx/5xx errors
70
+
71
+ data = response.json()
72
+
73
+ if "choices" in data and len(data["choices"]) > 0:
74
+ content = data["choices"][0].get("message", {}).get("content")
75
+ if content:
76
+ return content.strip()
77
+
78
+ print(f"❌ [LLMService] استجابة API غي�� متوقعة: {data}")
79
+ return None
80
+
81
+ except httpx.HTTPStatusError as http_err:
82
+ print(f"❌ [LLMService] خطأ HTTP: {http_err} - {http_err.response.text}")
83
+ except httpx.RequestError as req_err:
84
+ print(f"❌ [LLMService] خطأ في الطلب: {req_err}")
85
+ except json.JSONDecodeError:
86
+ print(f"❌ [LLMService] فشل في تحليل استجابة JSON: {response.text}")
87
+ except Exception as e:
88
+ print(f"❌ [LLMService] خطأ غير متوقع في _call_llm: {e}")
89
+ traceback.print_exc()
90
+
91
+ return None
92
+
93
+ def _parse_llm_response_enhanced(self,
94
+ response_text: str,
95
+ fallback_strategy: str = "decision",
96
+ symbol: str = "N/A") -> Optional[Dict[str, Any]]:
97
+ """
98
+ (محدث V8) محلل JSON ذكي ومتسامح مع الأخطاء.
99
+ """
100
+ if not response_text:
101
+ print(f" ⚠️ [LLMParser] الاستجابة فارغة لـ {symbol}.")
102
+ return self._get_fallback_response(fallback_strategy, "Empty response")
103
+
104
+ # 1. محاولة تحليل JSON مباشرة (لأننا طلبنا response_format=json_object)
105
+ try:
106
+ return json.loads(response_text)
107
+ except json.JSONDecodeError:
108
+ print(f" ⚠️ [LLMParser] فشل تحليل JSON المباشر لـ {symbol}. محاولة استخراج JSON...")
109
+ pass # (الانتقال إلى المحاولة 2)
110
 
111
+ # 2. محاولة استخراج JSON من داخل نص (Fallback 1)
112
+ try:
113
+ # (البحث عن أول { وآخر })
114
+ json_match = re.search(r'\{.*\}', response_text, re.DOTALL)
115
+ if json_match:
116
+ json_string = json_match.group(0)
117
+ return json.loads(json_string)
118
+ else:
119
+ print(f" ⚠️ [LLMParser] لم يتم العثور على JSON مطابق لـ {symbol}.")
120
+ raise json.JSONDecodeError("No JSON object found in text", response_text, 0)
121
+ except json.JSONDecodeError as e:
122
+ print(f" ❌ [LLMParser] فشل الاستخراج النهائي لـ {symbol}. نص الاستجابة: {response_text[:200]}...")
123
+ return self._get_fallback_response(fallback_strategy, f"Final JSON parse fail: {e}")
124
+ except Exception as e:
125
+ print(f" ❌ [LLMParser] خطأ غير متوقع في المحلل لـ {symbol}: {e}")
126
+ return self._get_fallback_response(fallback_strategy, f"Unexpected parser error: {e}")
127
+
128
+ def _get_fallback_response(self, strategy: str, reason: str) -> Optional[Dict[str, Any]]:
129
+ """
130
+ (محدث V8) إرجاع استجابة آمنة عند فشل النموذج الضخم.
131
+ """
132
+ print(f" 🚨 [LLMService] تفعيل الاستجابة الاحتياطية (Fallback) لاستراتيجية '{strategy}' (السبب: {reason})")
133
+
134
+ if strategy == "decision":
135
+ # (القرار الآمن: لا تتداول)
136
+ return {
137
+ "action": "NO_DECISION",
138
+ "strategy_to_watch": "GENERIC",
139
+ "confidence_level": 0,
140
+ "reasoning": f"LLM analysis failed: {reason}",
141
+ "exit_profile": "Standard"
142
+ }
143
+ elif strategy == "reanalysis":
144
+ # (القرار الآمن: استمر في الصفقة الحالية)
145
+ return {
146
+ "action": "HOLD",
147
+ "strategy": "MAINTAIN_CURRENT",
148
+ "reasoning": f"LLM re-analysis failed: {reason}. Maintaining current trade strategy."
149
+ }
150
+ elif strategy == "reflection":
151
+ # (القرار الآمن: لا تقم بإنشاء قاعدة تعلم)
152
+ return None # (سيمنع Reflector من إنشاء دلتا)
153
+
154
+ elif strategy == "distillation":
155
+ # (القرار الآمن: لا تقم بإنشاء قواعد مقطرة)
156
+ return None # (سيمنع Curator من المتابعة)
157
 
158
+ return None # (Fallback عام)
159
+
160
+ async def get_trading_decision(self, candidate_data: Dict[str, Any]) -> Optional[Dict[str, Any]]:
161
+ """
162
+ (محدث V8.1)
163
+ يستدعي النموذج الضخم لاتخاذ قرار "WATCH" استراتيجي (Explorer Brain).
164
+ """
165
+ symbol = candidate_data.get('symbol', 'UNKNOWN')
166
+ try:
167
+ # 1. (العقل) جلب القواعد (Deltas) من محور التعلم
168
+ # (سيتم جلب القواعد العامة + قواعد الاستراتيجية بناءً على المرشح)
169
+ learning_context_prompt = "Playbook: No learning context available."
170
+ if self.learning_hub:
171
+ # (استعلام عام لجلب أفضل القواعد الشاملة)
172
+ learning_context_prompt = await self.learning_hub.get_active_context_for_llm(
173
+ domain="general",
174
+ query=f"{symbol} strategy decision"
175
  )
 
 
 
 
 
176
 
177
+ # 2. إنشاء الـ Prompt (باللغة الإنجليزية)
178
+ prompt = self._create_trading_prompt(candidate_data, learning_context_prompt)
179
+
180
+ # (اختياري: حفظ الـ Prompt للتدقيق)
181
  if self.r2_service:
182
+ await self.r2_service.save_llm_prompts_async(symbol, "trading_decision", prompt, candidate_data)
 
 
 
183
 
184
+ # 3. استدعاء النموذج الضخم (LLM)
185
+ response_text = await self._call_llm(prompt, temperature=0.1, max_tokens=1000)
186
 
187
+ # 4. تحليل الاستجابة (باستخدام المحلل الذكي)
188
+ decision_json = self._parse_llm_response_enhanced(
189
+ response_text,
190
+ fallback_strategy="decision",
191
+ symbol=symbol
192
+ )
193
 
194
+ return decision_json
 
 
 
 
 
 
 
 
 
 
195
 
196
  except Exception as e:
197
+ print(f"❌ [LLMService] فشل فادح في get_trading_decision لـ {symbol}: {e}")
198
+ traceback.print_exc()
199
+ return self._get_fallback_response("decision", str(e)) # (إرجاع قرار آمن)
200
+
201
+ async def re_analyze_trade_async(self, trade_data: Dict[str, Any], current_data: Dict[str, Any]) -> Optional[Dict[str, Any]]:
202
+ """
203
+ (محدث V8.1)
204
+ يستدعي النموذج الضخم لإعادة تحليل صفقة مفتوحة (Reflector Brain).
205
+ """
206
+ symbol = trade_data.get('symbol', 'UNKNOWN')
207
  try:
208
+ # 1. (العقل) جلب القواعد (Deltas) من محور التعلم
209
+ learning_context_prompt = "Playbook: No learning context available."
210
+ if self.learning_hub:
211
+ # (استعلام محدد لإعادة التحليل)
212
+ learning_context_prompt = await self.learning_hub.get_active_context_for_llm(
213
+ domain="strategy",
214
+ query=f"{symbol} re-analysis {trade_data.get('strategy', 'GENERIC')}"
215
+ )
 
 
 
 
216
 
217
+ # 2. (V8.1) جلب أحدث الأخبار (باستخدام NewsFetcher المخصص)
218
+ latest_news_text = "News data unavailable for re-analysis."
219
+ latest_news_score = 0.0
220
+ if self.news_fetcher:
221
+ # (هذا يجلب أحدث الأخبار في الوقت الفعلي)
222
+ latest_news_text = await self.news_fetcher.get_news_for_symbol(symbol)
223
+ if self.news_fetcher.vader_analyzer and latest_news_text:
224
+ vader_scores = self.news_fetcher.vader_analyzer.polarity_scores(latest_news_text)
225
+ latest_news_score = vader_scores.get('compound', 0.0)
 
 
 
226
 
227
+ # (إضافة الأخبار المحدثة إلى البيانات الحالية)
228
+ current_data['latest_news_text'] = latest_news_text
229
+ current_data['latest_news_score'] = latest_news_score
230
+
231
+ # 3. إنشاء الـ Prompt (باللغة الإنجليزية)
232
+ prompt = self._create_reanalysis_prompt(trade_data, current_data, learning_context_prompt)
 
 
 
 
233
 
234
+ # (اختياري: حفظ الـ Prompt للتدقيق)
235
+ if self.r2_service:
236
+ await self.r2_service.save_llm_prompts_async(symbol, "trade_reanalysis", prompt, current_data)
 
237
 
238
+ # 4. استدعاء النموذج الضخم (LLM)
239
+ response_text = await self._call_llm(prompt, temperature=0.0, max_tokens=1000) # (درجة حرارة 0.0 للحسم)
 
 
 
 
240
 
241
+ # 5. تحليل الاستجابة (باستخدام المحلل الذكي)
242
+ decision_json = self._parse_llm_response_enhanced(
243
+ response_text,
244
+ fallback_strategy="reanalysis",
245
+ symbol=symbol
246
+ )
247
+
248
+ return decision_json
249
 
 
 
 
 
 
 
250
  except Exception as e:
251
+ print(f"❌ [LLMService] فشل فادح في re_analyze_trade_async لـ {symbol}: {e}")
252
+ traceback.print_exc()
253
+ return self._get_fallback_response("reanalysis", str(e)) # (إرجاع قرار آمن)
254
+
255
+ # --- (دوال إنشاء الـ Prompts) ---
256
+ # (ملاحظة: هذه الدوال يجب أن تكون دائماً باللغة الإنجليزية)
257
+
258
+ def _create_trading_prompt(self,
259
+ candidate_data: Dict[str, Any],
260
+ learning_context: str) -> str:
261
+ """
262
+ (معدل V18.1)
263
+ إنشاء الـ Prompt (باللغة الإنجليزية) لاتخاذ قرار التداول الأولي (Explorer).
264
+ """
265
+
266
+ symbol = candidate_data.get('symbol', 'N/A')
267
 
268
+ # --- 1. استخراج بيانات ML (الطبقة 1) ---
269
+ l1_score = candidate_data.get('enhanced_final_score', 0)
270
+ l1_reasons = candidate_data.get('reasons_for_candidacy', [])
271
+ pattern_data = candidate_data.get('pattern_analysis', {})
272
+ mc_data = candidate_data.get('monte_carlo_distribution', {})
273
 
274
+ # --- 2. استخراج بيانات المشاعر والأخبار (الطبقة 1) ---
275
+ # (V8.2) جلب بيانات VADER الإحصائية (المتعلمة)
276
+ news_text = candidate_data.get('news_text', 'No news text provided.')
277
+ news_score_raw = candidate_data.get('news_score_raw', 0.0) # (درجة VADER الخام)
278
+ statistical_news_pnl = candidate_data.get('statistical_news_pnl', 0.0) # (الدرجة الإحصائية المتعلمة)
279
 
280
+ # --- 3. استخراج بيانات الحيتان (الطبقة 1) ---
281
+ # (ملاحظة: هذا هو القسم الذي سنقوم بتحديثه)
282
+ whale_data = candidate_data.get('whale_data', {})
283
+ whale_summary = whale_data.get('llm_friendly_summary', {})
284
+ exchange_flows = whale_data.get('exchange_flows', {})
 
 
 
 
 
 
 
 
 
 
285
 
286
+ whale_signal = whale_summary.get('recommended_action', 'HOLD')
287
+ whale_confidence = whale_summary.get('confidence', 0.3)
288
+ whale_reason = whale_summary.get('whale_activity_summary', 'No whale data.')
289
+
290
+ # (البيانات قصيرة المدى - نفترض أنها من أفضل نافذة متعلمة، e.g., 1h)
291
+ net_flow_usd = exchange_flows.get('net_flow_usd', 0.0)
292
+
293
+ # 🔴 --- START OF CHANGE (V18.1) --- 🔴
294
+ # (البيانات طويلة المدى - من تحليل 24 ساعة الجديد)
295
+ accumulation_data_24h = whale_data.get('accumulation_analysis_24h', {})
296
+ net_flow_24h_usd = accumulation_data_24h.get('net_flow_usd', 0.0)
297
+ total_inflow_24h_usd = accumulation_data_24h.get('to_exchanges_usd', 0.0)
298
+ total_outflow_24h_usd = accumulation_data_24h.get('from_exchanges_usd', 0.0)
299
+ relative_net_flow_24h_percent = accumulation_data_24h.get('relative_net_flow_percent', 0.0)
300
+ # 🔴 --- END OF CHANGE --- 🔴
301
 
302
+ # --- 4. استخراج بيانات السوق (الطبقة 0) ---
303
+ market_context = candidate_data.get('sentiment_data', {})
304
+ market_trend = market_context.get('market_trend', 'UNKNOWN')
305
+ btc_sentiment = market_context.get('btc_sentiment', 'UNKNOWN')
306
+
307
+ # --- 5. بناء أقسام الـ Prompt (الإنجليزية) ---
308
+
309
+ # (التعلم)
310
+ playbook_prompt = f"""
311
+ --- START OF LEARNING PLAYBOOK ---
312
+ {learning_context}
313
+ --- END OF PLAYBOOK ---
314
+ """
315
+ # (ملخص ML)
316
+ ml_summary_prompt = f"""
317
+ 1. **ML Analysis (Score: {l1_score:.3f}):**
318
+ * Reasons: {', '.join(l1_reasons)}
319
+ * Chart Pattern: {pattern_data.get('pattern_detected', 'None')} (Conf: {pattern_data.get('pattern_confidence', 0):.2f})
320
+ * Monte Carlo (1h): {mc_data.get('probability_of_gain', 0):.1f}% chance of profit (Expected: {mc_data.get('expected_return_pct', 0):.2f}%)
321
+ """
322
+ # (ملخص الأخبار)
323
+ news_prompt = f"""
324
+ 2. **News & Sentiment Analysis:**
325
+ * Market Trend: {market_trend} (BTC: {btc_sentiment})
326
+ * VADER (Raw): {news_score_raw:.3f}
327
+ * Statistical PnL (Learned): {statistical_news_pnl:+.2f}%
328
+ * News Text: {news_text[:300]}...
329
+ """
330
+ # (ملخص الحيتان - محدث)
331
+ whale_activity_prompt = f"""
332
+ 3. **Whale Activity (Real-time Flow - Optimized Window):**
333
+ * Signal: {whale_signal} (Confidence: {whale_confidence:.2f})
334
+ * Reason: {whale_reason}
335
+ * Net Flow (to/from Exchanges): ${net_flow_usd:,.2f}
336
+
337
+ 4. **Whale Activity (24h Accumulation):**
338
+ * 24h Net Flow (Accumulation): ${net_flow_24h_usd:,.2f}
339
+ * 24h Total Deposits: ${total_inflow_24h_usd:,.2f}
340
+ * 24h Total Withdrawals: ${total_outflow_24h_usd:,.2f}
341
+ * Relative 24h Net Flow (vs Daily Volume): {relative_net_flow_24h_percent:+.2f}%
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
342
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
343
 
344
+ prompt_sections = [
345
+ f"SYSTEM: You are an expert AI trading analyst (Explorer Brain). Analyze the provided data for {symbol} and decide if it's a high-potential candidate to 'WATCH'.",
346
+ playbook_prompt,
347
+ "--- START OF CANDIDATE DATA ---",
348
+ ml_summary_prompt,
349
+ news_prompt,
350
+ whale_activity_prompt,
351
+ "--- END OF CANDIDATE DATA ---",
352
+ """
353
+ TASK:
354
+ 1. Synthesize all data points (ML, News, Whale Flow, 24h Accumulation).
355
+ 2. Consult the "Playbook" for learned rules.
356
+ 3. Decide the final action: 'WATCH' (if high confidence) or 'NO_DECISION' (if low confidence or conflicting signals).
357
+ 4. If 'WATCH', define the 'strategy_to_watch' (e.g., 'Breakout', 'MeanReversion', 'WhaleAccumulation') and an appropriate 'exit_profile' (e.g., 'Aggressive', 'Standard', 'Patient').
358
+
359
+ OUTPUT (JSON Object ONLY):
360
+ {
361
+ "action": "WATCH" or "NO_DECISION",
362
+ "strategy_to_watch": "STRATEGY_NAME",
363
+ "confidence_level": 0.0_to_1.0,
364
+ "reasoning": "Brief justification (max 40 words) synthesizing all data points.",
365
+ "exit_profile": "Aggressive" or "Standard" or "Patient"
366
+ }
367
+ """
368
+ ]
369
+
370
+ return "\n".join(prompt_sections)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
371
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
372
 
373
+ def _create_reanalysis_prompt(self,
374
+ trade_data: Dict[str, Any],
375
+ current_data: Dict[str, Any],
376
+ learning_context: str) -> str:
377
+ """
378
+ (معدل V8.1)
379
+ إنشاء الـ Prompt (باللغة الإنجليزية) لإعادة تحليل صفقة مفتوحة (Reflector Brain).
380
+ """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
381
 
382
+ symbol = trade_data.get('symbol', 'N/A')
383
 
384
+ # --- 1. بيانات الصفقة الأصلية (القديمة) ---
385
+ original_strategy = trade_data.get('strategy', 'N/A')
386
+ original_reasoning = trade_data.get('decision_data', {}).get('reasoning', 'N/A')
387
+ entry_price = trade_data.get('entry_price', 0)
388
+ current_pnl = trade_data.get('pnl_percent', 0)
389
+ current_sl = trade_data.get('stop_loss', 0)
390
+ current_tp = trade_data.get('take_profit', 0)
391
+
392
+ # --- 2. البيانات الفنية المحدثة (الحالية) ---
393
+ current_price = current_data.get('current_price', 0)
394
+ mc_data = current_data.get('monte_carlo_distribution', {})
395
+ mc_prob = mc_data.get('probability_of_gain', 0)
396
+ mc_expected_return = mc_data.get('expected_return_pct', 0)
397
 
398
+ # --- 3. (V8.1) بيانات الأخبار المحدثة (الحالية) ---
399
+ latest_news_text = current_data.get('latest_news_text', 'No news.')
400
+ latest_news_score = current_data.get('latest_news_score', 0.0)
401
 
402
+ # --- 4. (العقل) بيانات التعلم الإحصائي ---
403
+ statistical_feedback = ""
404
+ if self.learning_hub:
405
+ statistical_feedback = await self.learning_hub.get_statistical_feedback_for_llm(original_strategy)
406
 
407
+ # --- 5. بناء أقسام الـ Prompt (الإنجليزية) ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
408
 
409
+ playbook_prompt = f"""
410
+ --- START OF LEARNING PLAYBOOK ---
411
+ {learning_context}
412
+ {statistical_feedback}
413
+ --- END OF PLAYBOOK ---
414
+ """
415
 
416
+ trade_status_prompt = f"""
417
+ 1. **Open Trade Status ({symbol}):**
418
+ * Current PnL: {current_pnl:+.2f}%
419
+ * Original Strategy: {original_strategy}
420
+ * Original Reasoning: {original_reasoning}
421
+ * Entry Price: {entry_price}
422
+ * Current Price: {current_price}
423
+ * Current StopLoss: {current_sl}
424
+ * Current TakeProfit: {current_tp}
425
+ """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
426
 
427
+ current_analysis_prompt = f"""
428
+ 2. **Current Real-time Analysis:**
429
+ * Monte Carlo (1h): {mc_prob:.1f}% chance of profit (Expected: {mc_expected_return:.2f}%)
430
+ * Latest News (VADER: {latest_news_score:.3f}): {latest_news_text[:300]}...
431
+ """
432
+
433
+ prompt_sections = [
434
+ f"SYSTEM: You are an expert AI trading analyst (Reflector Brain). An open trade for {symbol} has triggered a mandatory re-analysis. Analyze the new data and decide the next action.",
435
+ playbook_prompt,
436
+ "--- START OF TRADE DATA ---",
437
+ trade_status_prompt,
438
+ current_analysis_prompt,
439
+ "--- END OF TRADE DATA ---",
440
+ """
441
+ TASK:
442
+ 1. Analyze the "Open Trade Status" against the "Current Real-time Analysis".
443
+ 2. Consult the "Playbook" for learned rules and statistical feedback.
444
+ 3. Decide the best course of action:
445
+ * 'HOLD': Maintain the current strategy (if analysis is still valid or neutral).
446
+ * 'UPDATE_TRADE': Adjust StopLoss/TakeProfit (if risk/reward changed).
447
+ * 'CLOSE_TRADE': Immediately close the trade (if analysis is invalidated or high risk).
448
+ 4. You MUST provide new SL/TP targets if action is 'UPDATE_TRADE'.
449
+
450
+ OUTPUT (JSON Object ONLY):
451
+ {
452
+ "action": "HOLD" or "UPDATE_TRADE" or "CLOSE_TRADE",
453
+ "strategy": "MAINTAIN_CURRENT" or "ADAPTIVE_EXIT" or "IMMEDIATE_EXIT",
454
+ "reasoning": "Brief justification (max 40 words) for the decision.",
455
+ "new_stop_loss": (float or null, required if action is 'UPDATE_TRADE'),
456
+ "new_take_profit": (float or null, required if action is 'UPDATE_TRADE')
457
+ }
458
+ """
459
+ ]
460
+
461
+ return "\n".join(prompt_sections)