Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
# app.py (Fully updated
|
| 2 |
import os
|
| 3 |
import traceback
|
| 4 |
import signal
|
|
@@ -18,10 +18,8 @@ try:
|
|
| 18 |
from data_manager import DataManager
|
| 19 |
from ml_engine.processor import MLProcessor
|
| 20 |
|
| 21 |
-
#
|
| 22 |
-
# (Import the new hub manager instead of the old engine)
|
| 23 |
from learning_hub.hub_manager import LearningHubManager
|
| 24 |
-
# 🔴 --- END OF CHANGE --- 🔴
|
| 25 |
|
| 26 |
from sentiment_news import SentimentAnalyzer
|
| 27 |
from trade_manager import TradeManager
|
|
@@ -35,9 +33,7 @@ except ImportError as e:
|
|
| 35 |
r2_service_global = None
|
| 36 |
data_manager_global = None
|
| 37 |
llm_service_global = None
|
| 38 |
-
|
| 39 |
-
learning_hub_global = None # (Changed from learning_engine_global)
|
| 40 |
-
# 🔴 --- END OF CHANGE --- 🔴
|
| 41 |
trade_manager_global = None
|
| 42 |
sentiment_analyzer_global = None
|
| 43 |
symbol_whale_monitor_global = None
|
|
@@ -50,9 +46,7 @@ class StateManager:
|
|
| 50 |
self.initialization_error = None
|
| 51 |
self.services_initialized = {
|
| 52 |
'r2_service': False, 'data_manager': False, 'llm_service': False,
|
| 53 |
-
|
| 54 |
-
'learning_hub': False, # (Changed from learning_engine)
|
| 55 |
-
# 🔴 --- END OF CHANGE --- 🔴
|
| 56 |
'trade_manager': False, 'sentiment_analyzer': False,
|
| 57 |
'symbol_whale_monitor': False
|
| 58 |
}
|
|
@@ -80,9 +74,7 @@ state_manager = StateManager()
|
|
| 80 |
async def initialize_services():
|
| 81 |
"""تهيئة جميع الخدمات بشكل منفصل"""
|
| 82 |
global r2_service_global, data_manager_global, llm_service_global
|
| 83 |
-
|
| 84 |
-
global learning_hub_global # (Changed from learning_engine_global)
|
| 85 |
-
# 🔴 --- END OF CHANGE --- 🔴
|
| 86 |
global trade_manager_global, sentiment_analyzer_global
|
| 87 |
global symbol_whale_monitor_global
|
| 88 |
try:
|
|
@@ -106,8 +98,6 @@ async def initialize_services():
|
|
| 106 |
state_manager.set_service_initialized('sentiment_analyzer');
|
| 107 |
print(" ✅ محلل المشاعر مهيأ")
|
| 108 |
|
| 109 |
-
# 🔴 --- START OF CHANGE --- 🔴
|
| 110 |
-
# (Initialize the new Learning Hub Manager)
|
| 111 |
print(" 🔄 تهيئة محور التعلم (Learning Hub)...");
|
| 112 |
learning_hub_global = LearningHubManager(
|
| 113 |
r2_service=r2_service_global,
|
|
@@ -118,28 +108,25 @@ async def initialize_services():
|
|
| 118 |
state_manager.set_service_initialized('learning_hub');
|
| 119 |
print(" ✅ محور التعلم (Hub) مهيأ")
|
| 120 |
|
| 121 |
-
# (Connect the Hub to the LLM service)
|
| 122 |
llm_service_global.learning_hub = learning_hub_global
|
| 123 |
state_manager.set_service_initialized('llm_service');
|
| 124 |
print(" ✅ LLMService مهيأة (ومربوطة بمحور التعلم)")
|
| 125 |
|
| 126 |
print(" 🔄 تهيئة مدير الصفقات...");
|
| 127 |
-
# (Pass the new Learning Hub to the Trade Manager)
|
| 128 |
trade_manager_global = TradeManager(
|
| 129 |
r2_service_global,
|
| 130 |
-
learning_hub_global,
|
| 131 |
data_manager_global,
|
| 132 |
state_manager
|
| 133 |
)
|
| 134 |
state_manager.set_service_initialized('trade_manager');
|
| 135 |
print(" ✅ مدير الصفقات مهيأ (ومربوط بمحور التعلم)")
|
| 136 |
-
|
| 137 |
-
|
| 138 |
print("🎯 اكتملت تهيئة جميع الخدمات بنجاح"); return True
|
| 139 |
except Exception as e: error_msg = f"فشل تهيئة الخدمات: {str(e)}"; print(f"❌ {error_msg}"); state_manager.set_initialization_error(error_msg); return False
|
| 140 |
|
| 141 |
async def monitor_market_async():
|
| 142 |
-
"""
|
| 143 |
global data_manager_global, sentiment_analyzer_global
|
| 144 |
try:
|
| 145 |
if not await state_manager.wait_for_initialization(): print("❌ فشل تهيئة الخدمات - إيقاف مراقبة السوق"); return
|
|
@@ -160,8 +147,6 @@ async def monitor_market_async():
|
|
| 160 |
except Exception as error: print(f"❌ خطأ أثناء مراقبة السوق: {error}"); state.MARKET_STATE_OK = True; await asyncio.sleep(60)
|
| 161 |
except Exception as e: print(f"❌ فشل تشغيل مراقبة السوق: {e}")
|
| 162 |
|
| 163 |
-
# 🔴 --- START OF CHANGE --- 🔴
|
| 164 |
-
# (New background task for periodic distillation - Point 6)
|
| 165 |
async def run_periodic_distillation():
|
| 166 |
"""
|
| 167 |
Runs the Learning Hub's distillation process periodically.
|
|
@@ -185,10 +170,12 @@ async def run_periodic_distillation():
|
|
| 185 |
print(f"❌ [Scheduler] Error in periodic distillation task: {e}")
|
| 186 |
traceback.print_exc()
|
| 187 |
await asyncio.sleep(60 * 60) # (Wait 1 hour on error)
|
| 188 |
-
# 🔴 --- END OF CHANGE --- 🔴
|
| 189 |
|
| 190 |
async def process_batch_parallel(batch, ml_processor, batch_num, total_batches, preloaded_whale_data):
|
| 191 |
-
"""
|
|
|
|
|
|
|
|
|
|
| 192 |
try:
|
| 193 |
batch_tasks = []
|
| 194 |
for symbol_data in batch:
|
|
@@ -222,13 +209,18 @@ async def process_batch_parallel(batch, ml_processor, batch_num, total_batches,
|
|
| 222 |
return {'success': successful_results, 'low_score': low_score_results, 'failures': failed_results}
|
| 223 |
|
| 224 |
except Exception as error:
|
| 225 |
-
print(f"❌ [
|
| 226 |
return {'success': [], 'low_score': [], 'failures': []}
|
| 227 |
|
| 228 |
|
| 229 |
async def run_3_layer_analysis():
|
| 230 |
"""
|
| 231 |
-
(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 232 |
"""
|
| 233 |
|
| 234 |
layer1_candidates = []
|
|
@@ -240,20 +232,20 @@ async def run_3_layer_analysis():
|
|
| 240 |
preloaded_whale_data_dict = {}
|
| 241 |
|
| 242 |
try:
|
| 243 |
-
print("🎯
|
| 244 |
|
| 245 |
-
if not await state_manager.wait_for_initialization(): print("❌
|
| 246 |
|
| 247 |
-
#
|
| 248 |
-
print("\n🔍
|
| 249 |
layer1_candidates = await data_manager_global.layer1_rapid_screening()
|
| 250 |
-
if not layer1_candidates: print("❌
|
| 251 |
-
print(f"✅
|
| 252 |
layer1_symbols = [c['symbol'] for c in layer1_candidates]
|
| 253 |
|
| 254 |
-
#
|
| 255 |
start_whale_fetch = time.time()
|
| 256 |
-
print(f"\n🐋
|
| 257 |
async def fetch_whale_data_task(symbols, results_dict):
|
| 258 |
WHALE_FETCH_CONCURRENCY = 3
|
| 259 |
semaphore = asyncio.Semaphore(WHALE_FETCH_CONCURRENCY)
|
|
@@ -264,59 +256,68 @@ async def run_3_layer_analysis():
|
|
| 264 |
data = await data_manager_global.get_whale_data_for_symbol(symbol)
|
| 265 |
if data: results_dict[symbol] = data
|
| 266 |
except Exception as e:
|
|
|
|
| 267 |
results_dict[symbol] = {'data_available': False, 'error': str(e)}
|
| 268 |
for symbol in symbols: tasks.append(asyncio.create_task(get_data_with_semaphore(symbol)))
|
| 269 |
await asyncio.gather(*tasks)
|
| 270 |
whale_fetcher_task = asyncio.create_task(fetch_whale_data_task(layer1_symbols, preloaded_whale_data_dict))
|
|
|
|
| 271 |
|
| 272 |
-
# (
|
| 273 |
DATA_QUEUE_MAX_SIZE = 2
|
| 274 |
ohlcv_data_queue = asyncio.Queue(maxsize=DATA_QUEUE_MAX_SIZE)
|
| 275 |
ml_results_list = []
|
| 276 |
market_context = await data_manager_global.get_market_context_async()
|
| 277 |
-
|
| 278 |
-
# 🔴 --- START OF CHANGE --- 🔴
|
| 279 |
-
# (Pass the global Learning Hub to the MLProcessor)
|
| 280 |
ml_processor = MLProcessor(market_context, data_manager_global, learning_hub_global)
|
| 281 |
-
# 🔴 --- END OF CHANGE --- 🔴
|
| 282 |
-
|
| 283 |
batch_size = 15
|
| 284 |
total_batches = (len(layer1_candidates) + batch_size - 1) // batch_size
|
| 285 |
-
|
| 286 |
-
|
|
|
|
| 287 |
async def ml_consumer_task(queue: asyncio.Queue, results_list: list, whale_data_store: dict):
|
| 288 |
batch_num = 0
|
| 289 |
while True:
|
| 290 |
try:
|
| 291 |
batch_data = await queue.get()
|
| 292 |
-
if batch_data is None: queue.task_done(); break
|
| 293 |
batch_num += 1
|
|
|
|
|
|
|
| 294 |
batch_results_dict = await process_batch_parallel(
|
| 295 |
batch_data, ml_processor, batch_num, total_batches, whale_data_store
|
| 296 |
)
|
| 297 |
results_list.append(batch_results_dict)
|
| 298 |
queue.task_done()
|
| 299 |
-
|
|
|
|
| 300 |
|
| 301 |
-
# (
|
|
|
|
| 302 |
consumer_task = asyncio.create_task(ml_consumer_task(ohlcv_data_queue, ml_results_list, preloaded_whale_data_dict))
|
|
|
|
| 303 |
producer_task = asyncio.create_task(data_manager_global.stream_ohlcv_data(layer1_symbols, ohlcv_data_queue))
|
| 304 |
-
|
| 305 |
-
|
|
|
|
| 306 |
await ohlcv_data_queue.join()
|
| 307 |
-
await consumer_task;
|
| 308 |
|
| 309 |
-
#
|
|
|
|
| 310 |
WHALE_FETCH_TIMEOUT_SECONDS = 180
|
| 311 |
try:
|
| 312 |
await asyncio.wait_for(whale_fetcher_task, timeout=WHALE_FETCH_TIMEOUT_SECONDS)
|
|
|
|
|
|
|
| 313 |
except asyncio.TimeoutError:
|
| 314 |
-
|
|
|
|
| 315 |
except Exception as whale_task_err:
|
| 316 |
-
|
|
|
|
| 317 |
|
| 318 |
-
#
|
| 319 |
-
print("🔄
|
| 320 |
for batch_result in ml_results_list:
|
| 321 |
for success_item in batch_result['success']:
|
| 322 |
symbol = success_item['symbol']
|
|
@@ -329,127 +330,215 @@ async def run_3_layer_analysis():
|
|
| 329 |
layer2_candidates.append(success_item)
|
| 330 |
all_low_score_candidates.extend(batch_result['low_score'])
|
| 331 |
all_failed_candidates.extend(batch_result['failures'])
|
|
|
|
|
|
|
|
|
|
| 332 |
|
| 333 |
-
|
| 334 |
-
|
| 335 |
-
# (Sort and Filter - Unchanged)
|
| 336 |
layer2_candidates.sort(key=lambda x: x.get('enhanced_final_score', 0), reverse=True)
|
| 337 |
target_count = min(10, len(layer2_candidates))
|
| 338 |
final_layer2_candidates = layer2_candidates[:target_count]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 339 |
|
| 340 |
-
# (Layer 2.5: Advanced MC - Unchanged)
|
| 341 |
-
print(f"\n🔬 Layer 2.5: Running Advanced MC (GARCH+LGBM) on top {len(final_layer2_candidates)} candidates...")
|
| 342 |
-
advanced_mc_analyzer = ml_processor.monte_carlo_analyzer
|
| 343 |
updated_candidates_for_llm = []
|
| 344 |
for candidate in final_layer2_candidates:
|
| 345 |
symbol = candidate.get('symbol', 'UNKNOWN')
|
| 346 |
try:
|
|
|
|
|
|
|
| 347 |
advanced_mc_results = await advanced_mc_analyzer.generate_1h_distribution_advanced(
|
| 348 |
candidate.get('ohlcv')
|
| 349 |
)
|
|
|
|
| 350 |
if advanced_mc_results and advanced_mc_results.get('simulation_model') == 'Phase2_GARCH_LGBM':
|
|
|
|
|
|
|
| 351 |
candidate['monte_carlo_distribution'] = advanced_mc_results
|
| 352 |
candidate['monte_carlo_probability'] = advanced_mc_results.get('probability_of_gain', 0)
|
| 353 |
-
candidate['advanced_mc_run'] = True
|
| 354 |
else:
|
| 355 |
-
|
|
|
|
|
|
|
| 356 |
updated_candidates_for_llm.append(candidate)
|
|
|
|
| 357 |
except Exception as e:
|
| 358 |
-
print(f" ❌ [Advanced MC] {symbol} -
|
| 359 |
candidate['advanced_mc_run'] = False
|
| 360 |
updated_candidates_for_llm.append(candidate)
|
| 361 |
-
|
|
|
|
|
|
|
| 362 |
|
| 363 |
await r2_service_global.save_candidates_async(final_layer2_candidates)
|
| 364 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 365 |
|
| 366 |
-
#
|
| 367 |
-
print("\n🧠
|
| 368 |
for candidate in final_layer2_candidates:
|
| 369 |
try:
|
| 370 |
-
symbol = candidate['symbol']
|
| 371 |
ohlcv_data = candidate.get('ohlcv');
|
| 372 |
-
if not ohlcv_data: continue
|
| 373 |
candidate['raw_ohlcv'] = ohlcv_data
|
| 374 |
-
total_candles = sum(len(data) for data in ohlcv_data.values()) if ohlcv_data else 0
|
| 375 |
-
if total_candles < 30: continue
|
|
|
|
| 376 |
|
| 377 |
-
|
| 378 |
-
candidate['sentiment_data'] = await data_manager_global.get_market_context_async()
|
| 379 |
|
| 380 |
llm_analysis = await llm_service_global.get_trading_decision(candidate)
|
| 381 |
|
| 382 |
if llm_analysis and llm_analysis.get('action') in ['BUY']:
|
| 383 |
-
|
| 384 |
-
# (Save the 'candidate' object, which is the full context for the Reflector)
|
| 385 |
-
opportunity={
|
| 386 |
-
'symbol': symbol,
|
| 387 |
-
'current_price': candidate.get('current_price', 0),
|
| 388 |
-
'decision': llm_analysis,
|
| 389 |
-
'enhanced_score': candidate.get('enhanced_final_score', 0),
|
| 390 |
-
'llm_confidence': llm_analysis.get('confidence_level', 0),
|
| 391 |
-
'strategy': llm_analysis.get('strategy', 'GENERIC'),
|
| 392 |
-
'analysis_timestamp': datetime.now().isoformat(),
|
| 393 |
-
'decision_context_data': candidate # (This is the snapshot)
|
| 394 |
-
}
|
| 395 |
-
# 🔴 --- END OF CHANGE --- 🔴
|
| 396 |
final_opportunities.append(opportunity)
|
| 397 |
-
print(f" ✅ {symbol}: {llm_analysis.get('action')} -
|
| 398 |
-
else:
|
| 399 |
-
|
| 400 |
-
except Exception as e: print(f"❌ Error in LLM analysis for {candidate.get('symbol')}: {e}"); traceback.print_exc(); continue
|
| 401 |
|
| 402 |
if final_opportunities:
|
| 403 |
final_opportunities.sort(key=lambda x: (x['llm_confidence'] + x['enhanced_score']) / 2, reverse=True)
|
|
|
|
|
|
|
| 404 |
|
| 405 |
-
# (Audit log saving
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 406 |
|
| 407 |
-
if not final_opportunities: print("❌
|
| 408 |
return final_opportunities[0] if final_opportunities else None
|
| 409 |
|
| 410 |
except Exception as error:
|
| 411 |
-
print(f"❌
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 412 |
return None
|
| 413 |
|
|
|
|
|
|
|
| 414 |
async def re_analyze_open_trade_async(trade_data):
|
| 415 |
-
"""
|
| 416 |
symbol = trade_data.get('symbol')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 417 |
try:
|
| 418 |
async with state_manager.trade_analysis_lock:
|
| 419 |
-
print(f"🔄 [Re-Analyze]
|
| 420 |
market_context = await data_manager_global.get_market_context_async()
|
|
|
|
|
|
|
| 421 |
ohlcv_data_list = []
|
| 422 |
-
temp_queue = asyncio.Queue()
|
| 423 |
-
|
| 424 |
-
|
| 425 |
-
|
| 426 |
-
|
| 427 |
-
|
| 428 |
-
|
| 429 |
-
|
| 430 |
-
|
| 431 |
-
|
| 432 |
-
|
| 433 |
-
|
| 434 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 435 |
ohlcv_data = ohlcv_data_list[0]
|
| 436 |
|
| 437 |
l1_data = await data_manager_global._get_detailed_symbol_data(symbol)
|
| 438 |
if l1_data: ohlcv_data.update(l1_data); ohlcv_data['reasons_for_candidacy'] = ['re-analysis']
|
| 439 |
|
| 440 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 441 |
|
| 442 |
-
# 🔴 --- START OF CHANGE --- 🔴
|
| 443 |
-
# (Pass the global Learning Hub to the MLProcessor)
|
| 444 |
ml_processor = MLProcessor(market_context, data_manager_global, learning_hub_global)
|
| 445 |
-
# 🔴 --- END OF CHANGE --- 🔴
|
| 446 |
|
| 447 |
-
print(f"🔄 [Re-Analyze]
|
| 448 |
advanced_mc_results = await ml_processor.monte_carlo_analyzer.generate_1h_distribution_advanced(
|
| 449 |
ohlcv_data.get('ohlcv')
|
| 450 |
)
|
| 451 |
|
| 452 |
processed_data = await ml_processor.process_and_score_symbol_enhanced(ohlcv_data, {symbol: re_analysis_whale_data} if re_analysis_whale_data else {})
|
|
|
|
| 453 |
if not processed_data: return None
|
| 454 |
|
| 455 |
if advanced_mc_results:
|
|
@@ -460,39 +549,61 @@ async def re_analyze_open_trade_async(trade_data):
|
|
| 460 |
processed_data['ohlcv'] = processed_data['raw_ohlcv']
|
| 461 |
processed_data['sentiment_data'] = market_context
|
| 462 |
|
| 463 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 464 |
|
| 465 |
if re_analysis_decision:
|
| 466 |
await r2_service_global.save_system_logs_async({ "trade_reanalyzed": True, "symbol": symbol, "action": re_analysis_decision.get('action'), 'strategy': re_analysis_decision.get('strategy', 'GENERIC') })
|
| 467 |
-
print(f"✅ [Re-Analyze]
|
| 468 |
return {"symbol": symbol, "decision": re_analysis_decision, "current_price": processed_data.get('current_price')}
|
| 469 |
-
else:
|
| 470 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 471 |
|
| 472 |
|
| 473 |
async def run_bot_cycle_async():
|
| 474 |
-
"""
|
| 475 |
try:
|
| 476 |
-
if not await state_manager.wait_for_initialization(): print("❌
|
| 477 |
-
print("🔄
|
| 478 |
-
if not r2_service_global.acquire_lock(): print("❌
|
| 479 |
|
| 480 |
open_trades = []
|
| 481 |
try:
|
| 482 |
-
open_trades = await trade_manager_global.get_open_trades(); print(f"📋
|
|
|
|
| 483 |
if open_trades:
|
| 484 |
now = datetime.now()
|
| 485 |
trades_to_reanalyze = [t for t in open_trades if now >= datetime.fromisoformat(t.get('expected_target_time', now.isoformat()))]
|
| 486 |
if trades_to_reanalyze:
|
| 487 |
-
print(f"🔄
|
| 488 |
reanalysis_results = await asyncio.gather(*[re_analyze_open_trade_async(trade) for trade in trades_to_reanalyze], return_exceptions=True)
|
| 489 |
for i, result in enumerate(reanalysis_results):
|
| 490 |
trade = trades_to_reanalyze[i]
|
| 491 |
-
if isinstance(result, Exception): print(f" ❌
|
| 492 |
-
elif result and result['decision'].get('action') == "CLOSE_TRADE": print(f" ✅
|
| 493 |
-
elif result and result['decision'].get('action') == "UPDATE_TRADE": print(f" ✅
|
| 494 |
-
elif result: print(f" ℹ️
|
| 495 |
-
else: print(f" ⚠️
|
| 496 |
|
| 497 |
current_open_trades_count = len(await trade_manager_global.get_open_trades())
|
| 498 |
should_look_for_new_trade = current_open_trades_count == 0
|
|
@@ -500,22 +611,19 @@ async def run_bot_cycle_async():
|
|
| 500 |
if should_look_for_new_trade:
|
| 501 |
portfolio_state = await r2_service_global.get_portfolio_state_async(); current_capital = portfolio_state.get("current_capital_usd", 0)
|
| 502 |
if current_capital > 1:
|
| 503 |
-
print("🎯
|
| 504 |
best_opportunity = await run_3_layer_analysis()
|
| 505 |
-
|
| 506 |
-
# 🔴 --- START OF CHANGE --- 🔴
|
| 507 |
-
# (Pass the decision_context to open_trade for the Reflector)
|
| 508 |
if best_opportunity:
|
| 509 |
-
print(f"✅
|
| 510 |
|
| 511 |
symbol = best_opportunity['symbol']
|
| 512 |
decision = best_opportunity['decision']
|
| 513 |
price = best_opportunity['current_price']
|
| 514 |
|
| 515 |
-
# (
|
| 516 |
context_data = best_opportunity.get('decision_context_data', {})
|
| 517 |
|
| 518 |
-
# (
|
| 519 |
decision_context_snapshot = {
|
| 520 |
"market": context_data.get('sentiment_data', {}),
|
| 521 |
"indicators": context_data.get('advanced_indicators', {}),
|
|
@@ -531,14 +639,13 @@ async def run_bot_cycle_async():
|
|
| 531 |
price,
|
| 532 |
decision_context=decision_context_snapshot
|
| 533 |
)
|
| 534 |
-
|
| 535 |
-
|
| 536 |
-
|
| 537 |
-
else: print("ℹ️ A trade is already open, skipping new trade search.")
|
| 538 |
finally:
|
| 539 |
if r2_service_global.lock_acquired: r2_service_global.release_lock()
|
| 540 |
await r2_service_global.save_system_logs_async({ "cycle_completed": True, "open_trades": len(open_trades)})
|
| 541 |
-
print("✅
|
| 542 |
|
| 543 |
except Exception as error:
|
| 544 |
print(f"❌ Unhandled error in main cycle: {error}"); traceback.print_exc()
|
|
@@ -547,24 +654,21 @@ async def run_bot_cycle_async():
|
|
| 547 |
|
| 548 |
@asynccontextmanager
|
| 549 |
async def lifespan(application: FastAPI):
|
| 550 |
-
"""
|
| 551 |
-
print("🚀
|
| 552 |
try:
|
| 553 |
success = await initialize_services()
|
| 554 |
-
if not success: print("❌
|
| 555 |
-
|
| 556 |
asyncio.create_task(monitor_market_async())
|
| 557 |
asyncio.create_task(trade_manager_global.start_trade_monitoring())
|
| 558 |
|
| 559 |
-
#
|
| 560 |
-
# (Start the new periodic distillation task)
|
| 561 |
asyncio.create_task(run_periodic_distillation())
|
| 562 |
-
# 🔴 --- END OF CHANGE --- 🔴
|
| 563 |
|
| 564 |
await r2_service_global.save_system_logs_async({"application_started": True})
|
| 565 |
-
print("🎯
|
| 566 |
-
print(" -> 📈
|
| 567 |
-
print(" -> 🧠
|
| 568 |
yield
|
| 569 |
except Exception as error:
|
| 570 |
print(f"❌ Application startup failed: {error}");
|
|
@@ -576,48 +680,41 @@ async def lifespan(application: FastAPI):
|
|
| 576 |
await cleanup_on_shutdown()
|
| 577 |
|
| 578 |
|
| 579 |
-
application = FastAPI(lifespan=lifespan, title="AI Trading Bot", description="
|
| 580 |
|
| 581 |
-
# (Endpoints remain unchanged)
|
| 582 |
@application.get("/")
|
| 583 |
-
async def root(): return {"message": "
|
| 584 |
@application.get("/run-cycle")
|
| 585 |
async def run_cycle_api():
|
| 586 |
-
if not state_manager.initialization_complete: raise HTTPException(status_code=503, detail="
|
| 587 |
asyncio.create_task(run_bot_cycle_async())
|
| 588 |
return {"message": "Bot cycle initiated (Learning Hub Enabled)", "system": "3-Layer Analysis"}
|
| 589 |
@application.get("/health")
|
| 590 |
-
async def health_check(): return {"status": "healthy" if state_manager.initialization_complete else "initializing", "initialization_complete": state_manager.initialization_complete, "services_initialized": state_manager.services_initialized, "initialization_error": state_manager.initialization_error, "timestamp": datetime.now().isoformat(), "system_architecture": "3-Layer Analysis (Learning Hub V4.0)"}
|
| 591 |
@application.get("/analyze-market")
|
| 592 |
async def analyze_market_api():
|
| 593 |
-
if not state_manager.initialization_complete: raise HTTPException(status_code=503, detail="
|
| 594 |
result = await run_3_layer_analysis()
|
| 595 |
if result: return {"opportunity_found": True, "symbol": result['symbol'], "action": result['decision'].get('action'), "confidence": result['llm_confidence'], "strategy": result['strategy'], "exit_profile": result['decision'].get('exit_profile')}
|
| 596 |
else: return {"opportunity_found": False, "message": "No suitable opportunities found"}
|
| 597 |
@application.get("/portfolio")
|
| 598 |
async def get_portfolio_api():
|
| 599 |
-
if not state_manager.initialization_complete: raise HTTPException(status_code=503, detail="
|
| 600 |
try: portfolio_state = await r2_service_global.get_portfolio_state_async(); open_trades = await trade_manager_global.get_open_trades(); return {"portfolio": portfolio_state, "open_trades": open_trades, "timestamp": datetime.now().isoformat()}
|
| 601 |
-
except Exception as e: raise HTTPException(status_code=500, detail=f"
|
| 602 |
@application.get("/system-status")
|
| 603 |
async def get_system_status(): monitoring_status = trade_manager_global.get_monitoring_status() if trade_manager_global else {}; return {"initialization_complete": state_manager.initialization_complete, "services_initialized": state_manager.services_initialized, "initialization_error": state_manager.initialization_error, "market_state_ok": state.MARKET_STATE_OK, "monitoring_status": monitoring_status, "timestamp": datetime.now().isoformat()}
|
| 604 |
|
| 605 |
async def cleanup_on_shutdown():
|
| 606 |
-
# 🔴 --- START OF CHANGE --- 🔴
|
| 607 |
global r2_service_global, data_manager_global, trade_manager_global, learning_hub_global
|
| 608 |
-
# 🔴 --- END OF CHANGE --- 🔴
|
| 609 |
-
|
| 610 |
print("🛑 Shutdown signal received. Cleaning up...")
|
| 611 |
if trade_manager_global: trade_manager_global.stop_monitoring(); print("✅ Trade monitoring stopped")
|
| 612 |
|
| 613 |
-
# 🔴 --- START OF CHANGE --- 🔴
|
| 614 |
-
# (Call the new hub's shutdown method)
|
| 615 |
if learning_hub_global and learning_hub_global.initialized:
|
| 616 |
try:
|
| 617 |
await learning_hub_global.shutdown()
|
| 618 |
print("✅ Learning hub data saved")
|
| 619 |
except Exception as e: print(f"❌ Failed to save learning hub data: {e}")
|
| 620 |
-
# 🔴 --- END OF CHANGE --- 🔴
|
| 621 |
|
| 622 |
if data_manager_global: await data_manager_global.close(); print("✅ Data manager closed")
|
| 623 |
if r2_service_global:
|
|
|
|
| 1 |
+
# app.py (Fully updated with timeout-safe re_analyze_open_trade_async)
|
| 2 |
import os
|
| 3 |
import traceback
|
| 4 |
import signal
|
|
|
|
| 18 |
from data_manager import DataManager
|
| 19 |
from ml_engine.processor import MLProcessor
|
| 20 |
|
| 21 |
+
# (Import the new hub manager)
|
|
|
|
| 22 |
from learning_hub.hub_manager import LearningHubManager
|
|
|
|
| 23 |
|
| 24 |
from sentiment_news import SentimentAnalyzer
|
| 25 |
from trade_manager import TradeManager
|
|
|
|
| 33 |
r2_service_global = None
|
| 34 |
data_manager_global = None
|
| 35 |
llm_service_global = None
|
| 36 |
+
learning_hub_global = None
|
|
|
|
|
|
|
| 37 |
trade_manager_global = None
|
| 38 |
sentiment_analyzer_global = None
|
| 39 |
symbol_whale_monitor_global = None
|
|
|
|
| 46 |
self.initialization_error = None
|
| 47 |
self.services_initialized = {
|
| 48 |
'r2_service': False, 'data_manager': False, 'llm_service': False,
|
| 49 |
+
'learning_hub': False,
|
|
|
|
|
|
|
| 50 |
'trade_manager': False, 'sentiment_analyzer': False,
|
| 51 |
'symbol_whale_monitor': False
|
| 52 |
}
|
|
|
|
| 74 |
async def initialize_services():
|
| 75 |
"""تهيئة جميع الخدمات بشكل منفصل"""
|
| 76 |
global r2_service_global, data_manager_global, llm_service_global
|
| 77 |
+
global learning_hub_global
|
|
|
|
|
|
|
| 78 |
global trade_manager_global, sentiment_analyzer_global
|
| 79 |
global symbol_whale_monitor_global
|
| 80 |
try:
|
|
|
|
| 98 |
state_manager.set_service_initialized('sentiment_analyzer');
|
| 99 |
print(" ✅ محلل المشاعر مهيأ")
|
| 100 |
|
|
|
|
|
|
|
| 101 |
print(" 🔄 تهيئة محور التعلم (Learning Hub)...");
|
| 102 |
learning_hub_global = LearningHubManager(
|
| 103 |
r2_service=r2_service_global,
|
|
|
|
| 108 |
state_manager.set_service_initialized('learning_hub');
|
| 109 |
print(" ✅ محور التعلم (Hub) مهيأ")
|
| 110 |
|
|
|
|
| 111 |
llm_service_global.learning_hub = learning_hub_global
|
| 112 |
state_manager.set_service_initialized('llm_service');
|
| 113 |
print(" ✅ LLMService مهيأة (ومربوطة بمحور التعلم)")
|
| 114 |
|
| 115 |
print(" 🔄 تهيئة مدير الصفقات...");
|
|
|
|
| 116 |
trade_manager_global = TradeManager(
|
| 117 |
r2_service_global,
|
| 118 |
+
learning_hub_global,
|
| 119 |
data_manager_global,
|
| 120 |
state_manager
|
| 121 |
)
|
| 122 |
state_manager.set_service_initialized('trade_manager');
|
| 123 |
print(" ✅ مدير الصفقات مهيأ (ومربوط بمحور التعلم)")
|
| 124 |
+
|
|
|
|
| 125 |
print("🎯 اكتملت تهيئة جميع الخدمات بنجاح"); return True
|
| 126 |
except Exception as e: error_msg = f"فشل تهيئة الخدمات: {str(e)}"; print(f"❌ {error_msg}"); state_manager.set_initialization_error(error_msg); return False
|
| 127 |
|
| 128 |
async def monitor_market_async():
|
| 129 |
+
"""مراقبة السوق"""
|
| 130 |
global data_manager_global, sentiment_analyzer_global
|
| 131 |
try:
|
| 132 |
if not await state_manager.wait_for_initialization(): print("❌ فشل تهيئة الخدمات - إيقاف مراقبة السوق"); return
|
|
|
|
| 147 |
except Exception as error: print(f"❌ خطأ أثناء مراقبة السوق: {error}"); state.MARKET_STATE_OK = True; await asyncio.sleep(60)
|
| 148 |
except Exception as e: print(f"❌ فشل تشغيل مراقبة السوق: {e}")
|
| 149 |
|
|
|
|
|
|
|
| 150 |
async def run_periodic_distillation():
|
| 151 |
"""
|
| 152 |
Runs the Learning Hub's distillation process periodically.
|
|
|
|
| 170 |
print(f"❌ [Scheduler] Error in periodic distillation task: {e}")
|
| 171 |
traceback.print_exc()
|
| 172 |
await asyncio.sleep(60 * 60) # (Wait 1 hour on error)
|
|
|
|
| 173 |
|
| 174 |
async def process_batch_parallel(batch, ml_processor, batch_num, total_batches, preloaded_whale_data):
|
| 175 |
+
"""
|
| 176 |
+
(معدلة) معالجة دفعة من الرموز بشكل متوازي وإرجاع نتائج مفصلة
|
| 177 |
+
- تستخدم بيانات الحيتان المحملة مسبقًا
|
| 178 |
+
"""
|
| 179 |
try:
|
| 180 |
batch_tasks = []
|
| 181 |
for symbol_data in batch:
|
|
|
|
| 209 |
return {'success': successful_results, 'low_score': low_score_results, 'failures': failed_results}
|
| 210 |
|
| 211 |
except Exception as error:
|
| 212 |
+
print(f"❌ [المستهلك] خطأ في معالجة الدفعة {batch_num}: {error}")
|
| 213 |
return {'success': [], 'low_score': [], 'failures': []}
|
| 214 |
|
| 215 |
|
| 216 |
async def run_3_layer_analysis():
|
| 217 |
"""
|
| 218 |
+
(معدلة) تشغيل النظام الطبقي (مع فصل جلب الحيتان)
|
| 219 |
+
الطبقة 1: data_manager - الفحص السريع
|
| 220 |
+
الطبقة 1.5: جلب بيانات الحيتان بشكل منفصل (غير معرقل)
|
| 221 |
+
الطبقة 2: MLProcessor - التحليل المتدفق (يستخدم مونت كارلو المرحلة 1)
|
| 222 |
+
الطبقة 2.5: (جديد) مونت كارلو (المرحلة 2+3) لأفضل 10 عملات
|
| 223 |
+
الطبقة 3: LLMService - النموذج الضخم (يستخدم نتائج المرحلة 2.5)
|
| 224 |
"""
|
| 225 |
|
| 226 |
layer1_candidates = []
|
|
|
|
| 232 |
preloaded_whale_data_dict = {}
|
| 233 |
|
| 234 |
try:
|
| 235 |
+
print("🎯 بدء النظام الطبقي المكون من 3 طبقات (مع فصل جلب الحيتان)...")
|
| 236 |
|
| 237 |
+
if not await state_manager.wait_for_initialization(): print("❌ الخدمات غير مهيأة بالكامل"); return None
|
| 238 |
|
| 239 |
+
# الطبقة 1
|
| 240 |
+
print("\n🔍 الطبقة 1: الفحص السريع (data_manager)...")
|
| 241 |
layer1_candidates = await data_manager_global.layer1_rapid_screening()
|
| 242 |
+
if not layer1_candidates: print("❌ لم يتم العثور على مرشحين في الطبقة 1"); return None
|
| 243 |
+
print(f"✅ تم اختيار {len(layer1_candidates)} عملة للطبقة 2")
|
| 244 |
layer1_symbols = [c['symbol'] for c in layer1_candidates]
|
| 245 |
|
| 246 |
+
# الطبقة 1.5: جلب بيانات الحيتان
|
| 247 |
start_whale_fetch = time.time()
|
| 248 |
+
print(f"\n🐋 الطبقة 1.5: بدء جلب بيانات الحيتان لـ {len(layer1_symbols)} عملة (بشكل غير معرقل)...")
|
| 249 |
async def fetch_whale_data_task(symbols, results_dict):
|
| 250 |
WHALE_FETCH_CONCURRENCY = 3
|
| 251 |
semaphore = asyncio.Semaphore(WHALE_FETCH_CONCURRENCY)
|
|
|
|
| 256 |
data = await data_manager_global.get_whale_data_for_symbol(symbol)
|
| 257 |
if data: results_dict[symbol] = data
|
| 258 |
except Exception as e:
|
| 259 |
+
print(f" ❌ [Whale Fetch] فشل جلب بيانات الحيتان لـ {symbol}: {e}")
|
| 260 |
results_dict[symbol] = {'data_available': False, 'error': str(e)}
|
| 261 |
for symbol in symbols: tasks.append(asyncio.create_task(get_data_with_semaphore(symbol)))
|
| 262 |
await asyncio.gather(*tasks)
|
| 263 |
whale_fetcher_task = asyncio.create_task(fetch_whale_data_task(layer1_symbols, preloaded_whale_data_dict))
|
| 264 |
+
print(" ⏳ مهمة جلب بيانات الحيتان تعمل في الخلفية...")
|
| 265 |
|
| 266 |
+
# إعداد المنتج/المستهلك (OHLCV/ML)
|
| 267 |
DATA_QUEUE_MAX_SIZE = 2
|
| 268 |
ohlcv_data_queue = asyncio.Queue(maxsize=DATA_QUEUE_MAX_SIZE)
|
| 269 |
ml_results_list = []
|
| 270 |
market_context = await data_manager_global.get_market_context_async()
|
|
|
|
|
|
|
|
|
|
| 271 |
ml_processor = MLProcessor(market_context, data_manager_global, learning_hub_global)
|
|
|
|
|
|
|
| 272 |
batch_size = 15
|
| 273 |
total_batches = (len(layer1_candidates) + batch_size - 1) // batch_size
|
| 274 |
+
print(f" 🚀 إعداد المنتج/المستهلك (OHLCV/ML): {total_batches} دفعة متوقعة (بحجم {batch_size})")
|
| 275 |
+
|
| 276 |
+
# وظ��فة المستهلك (ML Consumer)
|
| 277 |
async def ml_consumer_task(queue: asyncio.Queue, results_list: list, whale_data_store: dict):
|
| 278 |
batch_num = 0
|
| 279 |
while True:
|
| 280 |
try:
|
| 281 |
batch_data = await queue.get()
|
| 282 |
+
if batch_data is None: queue.task_done(); print(" 🛑 [ML Consumer] تلقى إشارة التوقف."); break
|
| 283 |
batch_num += 1
|
| 284 |
+
print(f" 📬 [ML Consumer] استلم دفعة OHLCV {batch_num}/{total_batches} ({len(batch_data)} عملة)")
|
| 285 |
+
# 🔴 هنا يتم استخدام مونت كارلو (المرحلة 1) السريع
|
| 286 |
batch_results_dict = await process_batch_parallel(
|
| 287 |
batch_data, ml_processor, batch_num, total_batches, whale_data_store
|
| 288 |
)
|
| 289 |
results_list.append(batch_results_dict)
|
| 290 |
queue.task_done()
|
| 291 |
+
print(f" ✅ [ML Consumer] أكمل معالجة الدفعة {batch_num}/{total_batches}")
|
| 292 |
+
except Exception as e: print(f"❌ [ML Consumer] خطأ فادح: {e}"); traceback.print_exc(); queue.task_done()
|
| 293 |
|
| 294 |
+
# تشغيل المستهلك (ML Consumer) والمنتج (OHLCV Producer)
|
| 295 |
+
print(" ▶️ [ML Consumer] بدء تشغيل مهمة المستهلك...")
|
| 296 |
consumer_task = asyncio.create_task(ml_consumer_task(ohlcv_data_queue, ml_results_list, preloaded_whale_data_dict))
|
| 297 |
+
print(" ▶️ [OHLCV Producer] بدء تشغيل مهمة المنتج (تدفق بيانات OHLCV)...")
|
| 298 |
producer_task = asyncio.create_task(data_manager_global.stream_ohlcv_data(layer1_symbols, ohlcv_data_queue))
|
| 299 |
+
|
| 300 |
+
# انتظار انتهاء المنتج والمستهلك
|
| 301 |
+
await producer_task; print(" ✅ [OHLCV Producer] أنهى جلب جميع بيانات OHLCV.")
|
| 302 |
await ohlcv_data_queue.join()
|
| 303 |
+
await consumer_task; print(" ✅ [ML Consumer] أنهى معالجة جميع الدفعات.")
|
| 304 |
|
| 305 |
+
# انتظار اكتمال مهمة جلب الحيتان (مع Timeout)
|
| 306 |
+
print(" ⏳ انتظار اكتمال مهمة جلب بيانات الحيتان (بحد أقصى للمهلة)...")
|
| 307 |
WHALE_FETCH_TIMEOUT_SECONDS = 180
|
| 308 |
try:
|
| 309 |
await asyncio.wait_for(whale_fetcher_task, timeout=WHALE_FETCH_TIMEOUT_SECONDS)
|
| 310 |
+
end_whale_fetch = time.time()
|
| 311 |
+
print(f" ✅ اكتمل جلب بيانات الحيتان في {end_whale_fetch - start_whale_fetch:.2f} ثانية. تم جلب/محاولة جلب بيانات لـ {len(preloaded_whale_data_dict)} عملة.")
|
| 312 |
except asyncio.TimeoutError:
|
| 313 |
+
end_whale_fetch = time.time()
|
| 314 |
+
print(f" ⚠️ انتهت مهلة انتظار جلب بيانات الحيتان ({WHALE_FETCH_TIMEOUT_SECONDS} ثانية)! تم جلب/محاولة جلب بيانات لـ {len(preloaded_whale_data_dict)} عملة حتى الآن.")
|
| 315 |
except Exception as whale_task_err:
|
| 316 |
+
end_whale_fetch = time.time()
|
| 317 |
+
print(f" ❌ حدث خطأ غير متوقع أثناء انتظار مهمة جلب الحيتان: {whale_task_err}")
|
| 318 |
|
| 319 |
+
# تجميع النتائج
|
| 320 |
+
print("🔄 تجميع جميع النتائج...")
|
| 321 |
for batch_result in ml_results_list:
|
| 322 |
for success_item in batch_result['success']:
|
| 323 |
symbol = success_item['symbol']
|
|
|
|
| 330 |
layer2_candidates.append(success_item)
|
| 331 |
all_low_score_candidates.extend(batch_result['low_score'])
|
| 332 |
all_failed_candidates.extend(batch_result['failures'])
|
| 333 |
+
|
| 334 |
+
print(f"✅ اكتمل التحليل المتقدم (MC-Phase1): {len(layer2_candidates)} نجاح (عالي) | {len(all_low_score_candidates)} نجاح (منخفض) | {len(all_failed_candidates)} فشل")
|
| 335 |
+
if not layer2_candidates: print("❌ لم يتم العثور على مرشحين في الطبقة 2")
|
| 336 |
|
| 337 |
+
# الترتيب والفلترة (بناءً على الدرجة التي تتضمن MC-Phase1)
|
|
|
|
|
|
|
| 338 |
layer2_candidates.sort(key=lambda x: x.get('enhanced_final_score', 0), reverse=True)
|
| 339 |
target_count = min(10, len(layer2_candidates))
|
| 340 |
final_layer2_candidates = layer2_candidates[:target_count]
|
| 341 |
+
print(f"🎯 تم اختيار {len(final_layer2_candidates)} عملة للطبقة 2.5 (الأقوى فقط)")
|
| 342 |
+
|
| 343 |
+
# 🔴 --- بدء الطبقة 2.5: التحليل المتقدم (GARCH+LGBM) --- 🔴
|
| 344 |
+
print(f"\n🔬 الطبقة 2.5: تشغيل التحليل المتقدم (GARCH+LGBM) على أفضل {len(final_layer2_candidates)} مرشح...")
|
| 345 |
+
advanced_mc_analyzer = ml_processor.monte_carlo_analyzer # الحصول على محلل مونت كارلو
|
| 346 |
|
|
|
|
|
|
|
|
|
|
| 347 |
updated_candidates_for_llm = []
|
| 348 |
for candidate in final_layer2_candidates:
|
| 349 |
symbol = candidate.get('symbol', 'UNKNOWN')
|
| 350 |
try:
|
| 351 |
+
print(f" 🔄 [Advanced MC] تحليل {symbol}...")
|
| 352 |
+
# استدعاء الدالة الجديدة المتقدمة
|
| 353 |
advanced_mc_results = await advanced_mc_analyzer.generate_1h_distribution_advanced(
|
| 354 |
candidate.get('ohlcv')
|
| 355 |
)
|
| 356 |
+
|
| 357 |
if advanced_mc_results and advanced_mc_results.get('simulation_model') == 'Phase2_GARCH_LGBM':
|
| 358 |
+
print(f" ✅ [Advanced MC] {symbol} - تم التحديث بنموذج GARCH/LGBM.")
|
| 359 |
+
# استبدال نتائج المرحلة 1 بنتائج المرحلة 2+3
|
| 360 |
candidate['monte_carlo_distribution'] = advanced_mc_results
|
| 361 |
candidate['monte_carlo_probability'] = advanced_mc_results.get('probability_of_gain', 0)
|
| 362 |
+
candidate['advanced_mc_run'] = True # إضافة علامة للتدقيق
|
| 363 |
else:
|
| 364 |
+
print(f" ⚠️ [Advanced MC] {symbol} - فشل التحليل المتقدم، استخدام نتائج المرحلة 1.")
|
| 365 |
+
candidate['advanced_mc_run'] = False # إضافة علامة للتدقيق
|
| 366 |
+
|
| 367 |
updated_candidates_for_llm.append(candidate)
|
| 368 |
+
|
| 369 |
except Exception as e:
|
| 370 |
+
print(f" ❌ [Advanced MC] {symbol} - خطأ فادح: {e}. استخدام نتائج المرحلة 1.")
|
| 371 |
candidate['advanced_mc_run'] = False
|
| 372 |
updated_candidates_for_llm.append(candidate)
|
| 373 |
+
|
| 374 |
+
final_layer2_candidates = updated_candidates_for_llm # استخدام القائمة المحدثة للطبقة 3
|
| 375 |
+
# 🔴 --- نهاية الطبقة 2.5 --- 🔴
|
| 376 |
|
| 377 |
await r2_service_global.save_candidates_async(final_layer2_candidates)
|
| 378 |
+
print("\n🏆 أفضل 10 عملات (بعد التدقيق) جاهزة للطبقة 3:")
|
| 379 |
+
for i, candidate in enumerate(final_layer2_candidates):
|
| 380 |
+
score=candidate.get('enhanced_final_score',0); strategy=candidate.get('target_strategy','GENERIC'); mc_dist=candidate.get('monte_carlo_distribution'); pattern=candidate.get('pattern_analysis',{}).get('pattern_detected','no_pattern'); timeframes=candidate.get('successful_timeframes',0); symbol=candidate.get('symbol','UNKNOWN')
|
| 381 |
+
print(f" {i+1}. {symbol}: 📊 {score:.3f} | الأطر: {timeframes}/6")
|
| 382 |
+
|
| 383 |
+
if mc_dist:
|
| 384 |
+
mc_model = mc_dist.get('simulation_model', 'Phase1')
|
| 385 |
+
mc_pi_90 = mc_dist.get('prediction_interval_90', [0,0])
|
| 386 |
+
mc_var = mc_dist.get('risk_metrics', {}).get('VaR_95_value', 0)
|
| 387 |
+
print(f" 🎯 مونت كارلو ({mc_model}): 90% PI [{mc_pi_90[0]:.4f} - {mc_pi_90[1]:.4f}] | VaR: ${mc_var:.4f}")
|
| 388 |
+
|
| 389 |
+
print(f" 🎯 استراتيجية: {strategy} | نمط: {pattern}")
|
| 390 |
+
whale_data = candidate.get('whale_data')
|
| 391 |
+
if whale_data and whale_data.get('data_available'): signal = whale_data.get('trading_signal', {}); print(f" 🐋 حيتان: {signal.get('action', 'HOLD')} (ثقة: {signal.get('confidence', 0):.2f}){' ⚠️' if signal.get('critical_alert') else ''}")
|
| 392 |
+
elif whale_data and whale_data.get('error'): print(f" 🐋 حيتان: خطأ ({whale_data.get('error')[:50]}...)")
|
| 393 |
|
| 394 |
+
# الطبقة 3
|
| 395 |
+
print("\n🧠 الطبقة 3: التحليل بالنموذج الضخم (LLMService)...")
|
| 396 |
for candidate in final_layer2_candidates:
|
| 397 |
try:
|
| 398 |
+
symbol = candidate['symbol']; print(f" 🤔 تحليل {symbol} بالنموذج الضخم (بيانات MC متقدمة)...")
|
| 399 |
ohlcv_data = candidate.get('ohlcv');
|
| 400 |
+
if not ohlcv_data: print(f" ⚠️ لا توجد بيانات شموع لـ {symbol}"); continue
|
| 401 |
candidate['raw_ohlcv'] = ohlcv_data
|
| 402 |
+
timeframes_count = candidate.get('successful_timeframes', 0); total_candles = sum(len(data) for data in ohlcv_data.values()) if ohlcv_data else 0
|
| 403 |
+
if total_candles < 30: print(f" ⚠️ بيانات شموع غير كافية لـ {symbol}: {total_candles} شمعة فقط"); continue
|
| 404 |
+
print(f" 📊 إرسال {symbol} للنموذج: {total_candles} شمعة في {timeframes_count} إطار زمني")
|
| 405 |
|
| 406 |
+
candidate['sentiment_data'] = await data_manager_global.get_market_context_async()
|
|
|
|
| 407 |
|
| 408 |
llm_analysis = await llm_service_global.get_trading_decision(candidate)
|
| 409 |
|
| 410 |
if llm_analysis and llm_analysis.get('action') in ['BUY']:
|
| 411 |
+
opportunity={'symbol': symbol, 'current_price': candidate.get('current_price', 0), 'decision': llm_analysis, 'enhanced_score': candidate.get('enhanced_final_score', 0), 'llm_confidence': llm_analysis.get('confidence_level', 0), 'strategy': llm_analysis.get('strategy', 'GENERIC'), 'analysis_timestamp': datetime.now().isoformat(), 'timeframes_count': timeframes_count, 'total_candles': total_candles, 'decision_context_data': candidate.copy()} # (نسخ السياق)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 412 |
final_opportunities.append(opportunity)
|
| 413 |
+
print(f" ✅ {symbol}: {llm_analysis.get('action')} - ثقة: {llm_analysis.get('confidence_level', 0):.2f} (ملف خروج: {llm_analysis.get('exit_profile')})")
|
| 414 |
+
else: action = llm_analysis.get('action', 'NO_DECISION') if llm_analysis else 'NO_RESPONSE'; print(f" ⚠️ {symbol}: لا يوجد قرار تداول من النموذج الضخم ({action})")
|
| 415 |
+
except Exception as e: print(f"❌ خطأ في تحليل النموذج الضخم لـ {candidate.get('symbol')}: {e}"); traceback.print_exc(); continue
|
|
|
|
| 416 |
|
| 417 |
if final_opportunities:
|
| 418 |
final_opportunities.sort(key=lambda x: (x['llm_confidence'] + x['enhanced_score']) / 2, reverse=True)
|
| 419 |
+
print(f"\n🏆 النظام الطبقي اكتمل: {len(final_opportunities)} فرصة تداول")
|
| 420 |
+
for i, opportunity in enumerate(final_opportunities[:5]): print(f" {i+1}. {opportunity['symbol']}: {opportunity['decision'].get('action')} - ثقة: {opportunity['llm_confidence']:.2f} - أطر: {opportunity['timeframes_count']}")
|
| 421 |
|
| 422 |
+
# (Audit log saving - unchanged)
|
| 423 |
+
try:
|
| 424 |
+
top_10_detailed_summary = []
|
| 425 |
+
for c in final_layer2_candidates:
|
| 426 |
+
whale_summary = "Not Available"; whale_data = c.get('whale_data')
|
| 427 |
+
if whale_data and whale_data.get('data_available'): signal = whale_data.get('trading_signal', {}); action = signal.get('action', 'HOLD'); confidence = signal.get('confidence', 0); reason_preview = signal.get('reason', 'N/A')[:75] + "..." if signal.get('reason') else 'N/A'; whale_summary = f"Action: {action}, Conf: {confidence:.2f}, Alert: {signal.get('critical_alert', False)}, Reason: {reason_preview}"
|
| 428 |
+
elif whale_data and whale_data.get('error'): whale_summary = f"Error: {whale_data['error'][:50]}..."
|
| 429 |
+
mc_summary = "N/A"
|
| 430 |
+
mc_dist = c.get('monte_carlo_distribution')
|
| 431 |
+
if mc_dist:
|
| 432 |
+
mc_model = mc_dist.get('simulation_model', 'Unknown')
|
| 433 |
+
if mc_model == 'Phase2_GARCH_LGBM':
|
| 434 |
+
drift = mc_dist.get('forecasted_drift_lgbm', 0); vol = mc_dist.get('forecasted_vol_garch', 0); mc_summary = f"Phase2_GARCH(vol={vol:.5f})_LGBM(drift={drift:.5f})"
|
| 435 |
+
else:
|
| 436 |
+
var_val = mc_dist.get('risk_metrics', {}).get('VaR_95_value', 0); mc_summary = f"{mc_model}_VaR({var_val:.4f})"
|
| 437 |
+
top_10_detailed_summary.append({ "symbol": c.get('symbol'), "score": c.get('enhanced_final_score', 0), "timeframes": f"{c.get('successful_timeframes', 'N/A')}/6", "whale_data_summary": whale_summary, "strategy": c.get('target_strategy', 'N/A'), "pattern": c.get('pattern_analysis', {}).get('pattern_detected', 'N/A'), "mc_analysis_level": mc_summary })
|
| 438 |
+
other_successful_candidates = layer2_candidates[target_count:]; other_success_summary = [{"symbol": c['symbol'], "score": c.get('enhanced_final_score', 0), "timeframes": f"{c.get('successful_timeframes', 'N/A')}/6", "whale_data": "Available" if c.get('whale_data', {}).get('data_available') else ("Error" if c.get('whale_data', {}).get('error') else "Not Available")} for c in other_successful_candidates]; low_score_summary = [{"symbol": c['symbol'], "score": c.get('enhanced_final_score', 0), "timeframes": f"{c.get('successful_timeframes', 'N/A')}/6", "whale_data": "Available" if c.get('whale_data', {}).get('data_available') else ("Error" if c.get('whale_data', {}).get('error') else "Not Available")} for c in all_low_score_candidates]
|
| 439 |
+
audit_data = { "timestamp": datetime.now().isoformat(), "total_layer1_candidates": len(layer1_candidates), "total_processed_in_layer2": len(layer2_candidates) + len(all_low_score_candidates) + len(all_failed_candidates), "counts": {"sent_to_llm": len(final_layer2_candidates), "success_not_top_10": len(other_successful_candidates), "success_low_score": len(all_low_score_candidates), "failures": len(all_failed_candidates)}, "top_candidates_for_llm": top_10_detailed_summary, "other_successful_candidates": other_success_summary, "low_score_candidates": low_score_summary, "failed_candidates": all_failed_candidates, }
|
| 440 |
+
await r2_service_global.save_analysis_audit_log_async(audit_data)
|
| 441 |
+
print(f"✅ تم حفظ سجل تدقيق التحليل في R2 (مع تفاصيل MC المتقدمة).")
|
| 442 |
+
except Exception as audit_error: print(f"❌ فشل حفظ سجل تدقيق التحليل: {audit_error}"); traceback.print_exc()
|
| 443 |
|
| 444 |
+
if not final_opportunities: print("❌ لم يتم العثور على فرص تداول مناسبة"); return None
|
| 445 |
return final_opportunities[0] if final_opportunities else None
|
| 446 |
|
| 447 |
except Exception as error:
|
| 448 |
+
print(f"❌ خطأ فادح في النظام الطبقي: {error}"); traceback.print_exc()
|
| 449 |
+
try:
|
| 450 |
+
audit_data = { "timestamp": datetime.now().isoformat(), "status": "FAILED", "error": str(error), "traceback": traceback.format_exc(), "total_layer1_candidates": len(layer1_candidates), "counts": {"sent_to_llm": 0, "success_not_top_10": 0, "success_low_score": len(all_low_score_candidates), "failures": len(all_failed_candidates)}, "failed_candidates": all_failed_candidates }
|
| 451 |
+
await r2_service_global.save_analysis_audit_log_async(audit_data)
|
| 452 |
+
print("⚠️ تم حفظ سجل تدقيق جزئي بعد الفشل.")
|
| 453 |
+
except Exception as audit_fail_error: print(f"❌ فشل حفظ سجل التدقيق أثناء معالجة خطأ آخر: {audit_fail_error}")
|
| 454 |
return None
|
| 455 |
|
| 456 |
+
# 🔴 --- START OF CHANGE --- 🔴
|
| 457 |
+
# (This function is now hardened with timeouts and correct queue handling)
|
| 458 |
async def re_analyze_open_trade_async(trade_data):
|
| 459 |
+
"""إعادة تحليل الصفقة المفتوحة (نسخة محصنة ضد التعليق)"""
|
| 460 |
symbol = trade_data.get('symbol')
|
| 461 |
+
|
| 462 |
+
# تعريف المهلات الزمنية (بالثواني)
|
| 463 |
+
OHLCV_PRODUCER_TIMEOUT = 30.0
|
| 464 |
+
OHLCV_QUEUE_TIMEOUT = 10.0
|
| 465 |
+
WHALE_DATA_TIMEOUT = 20.0
|
| 466 |
+
LLM_REANALYSIS_TIMEOUT = 60.0
|
| 467 |
+
|
| 468 |
try:
|
| 469 |
async with state_manager.trade_analysis_lock:
|
| 470 |
+
print(f"🔄 [Re-Analyze] بدء التحليل الاستراتيجي لـ {symbol}...")
|
| 471 |
market_context = await data_manager_global.get_market_context_async()
|
| 472 |
+
|
| 473 |
+
# (إصلاح نمط المنتج/المستهلك لبيانات OHLCV)
|
| 474 |
ohlcv_data_list = []
|
| 475 |
+
temp_queue = asyncio.Queue(maxsize=2)
|
| 476 |
+
|
| 477 |
+
print(f" 🔄 [Re-Analyze] بدء جلب بيانات OHLCV لـ {symbol} (مهلة {OHLCV_PRODUCER_TIMEOUT} ثانية)...")
|
| 478 |
+
producer_task = asyncio.create_task(
|
| 479 |
+
data_manager_global.stream_ohlcv_data([symbol], temp_queue)
|
| 480 |
+
)
|
| 481 |
+
|
| 482 |
+
try:
|
| 483 |
+
# (تشغيل المنتج والمستهلك بالتوازي مع مهلة زمنية)
|
| 484 |
+
async with asyncio.timeout(OHLCV_PRODUCER_TIMEOUT):
|
| 485 |
+
while True:
|
| 486 |
+
try:
|
| 487 |
+
# (انتظار المستهلك مع مهلة أقصر)
|
| 488 |
+
batch = await asyncio.wait_for(temp_queue.get(), timeout=OHLCV_QUEUE_TIMEOUT)
|
| 489 |
+
|
| 490 |
+
if batch is None: # (هذا يعمل الآن بسبب الإصلاح في data_manager.py)
|
| 491 |
+
temp_queue.task_done()
|
| 492 |
+
print(" ✅ [Re-Analyze] تلقى إشارة إنهاء OHLCV.")
|
| 493 |
+
break # الخروج من حلقة المستهلك
|
| 494 |
+
|
| 495 |
+
ohlcv_data_list.extend(batch)
|
| 496 |
+
temp_queue.task_done()
|
| 497 |
+
|
| 498 |
+
except asyncio.TimeoutError:
|
| 499 |
+
print(f" ⚠️ [Re-Analyze] مهلة انتظار طابور OHLCV لـ {symbol}. (المنتج قد يكون معلقاً)")
|
| 500 |
+
if producer_task.done():
|
| 501 |
+
break # المنتج انتهى والطابور فارغ
|
| 502 |
+
|
| 503 |
+
except asyncio.TimeoutError:
|
| 504 |
+
print(f" ❌ [Re-Analyze] فشل جلب OHLCV بالكامل (مهلة {OHLCV_PRODUCER_TIMEOUT} ثانية). إلغاء المهمة.")
|
| 505 |
+
if not producer_task.done():
|
| 506 |
+
producer_task.cancel()
|
| 507 |
+
|
| 508 |
+
await producer_task # (التأكد من انتهاء المنتج)
|
| 509 |
+
|
| 510 |
+
if not ohlcv_data_list:
|
| 511 |
+
print(f"⚠️ فشل جلب بيانات إعادة التحليل لـ {symbol}");
|
| 512 |
+
return None
|
| 513 |
+
|
| 514 |
ohlcv_data = ohlcv_data_list[0]
|
| 515 |
|
| 516 |
l1_data = await data_manager_global._get_detailed_symbol_data(symbol)
|
| 517 |
if l1_data: ohlcv_data.update(l1_data); ohlcv_data['reasons_for_candidacy'] = ['re-analysis']
|
| 518 |
|
| 519 |
+
# (إضافة مهلة زمنية لمكالمة الحيتان)
|
| 520 |
+
re_analysis_whale_data = None
|
| 521 |
+
try:
|
| 522 |
+
print(f" 🔄 [Re-Analyze] بدء جلب بيانات الحيتان لـ {symbol} (مهلة {WHALE_DATA_TIMEOUT} ثانية)...")
|
| 523 |
+
re_analysis_whale_data = await asyncio.wait_for(
|
| 524 |
+
data_manager_global.get_whale_data_for_symbol(symbol),
|
| 525 |
+
timeout=WHALE_DATA_TIMEOUT
|
| 526 |
+
)
|
| 527 |
+
print(f" ✅ [Re-Analyze] تم جلب بيانات الحيتان.")
|
| 528 |
+
except asyncio.TimeoutError:
|
| 529 |
+
print(f" ⚠️ [Re-Analyze] مهلة جلب بيانات الحيتان لـ {symbol}. المتابعة بدونها.")
|
| 530 |
+
except Exception as whale_e:
|
| 531 |
+
print(f" ❌ [Re-Analyze] خطأ في جلب بيانات الحيتان: {whale_e}. المتابعة بدونها.")
|
| 532 |
|
|
|
|
|
|
|
| 533 |
ml_processor = MLProcessor(market_context, data_manager_global, learning_hub_global)
|
|
|
|
| 534 |
|
| 535 |
+
print(f"🔄 [Re-Analyze] استخدام مونت كارلو (Phase 2+3) لـ {symbol}...")
|
| 536 |
advanced_mc_results = await ml_processor.monte_carlo_analyzer.generate_1h_distribution_advanced(
|
| 537 |
ohlcv_data.get('ohlcv')
|
| 538 |
)
|
| 539 |
|
| 540 |
processed_data = await ml_processor.process_and_score_symbol_enhanced(ohlcv_data, {symbol: re_analysis_whale_data} if re_analysis_whale_data else {})
|
| 541 |
+
|
| 542 |
if not processed_data: return None
|
| 543 |
|
| 544 |
if advanced_mc_results:
|
|
|
|
| 549 |
processed_data['ohlcv'] = processed_data['raw_ohlcv']
|
| 550 |
processed_data['sentiment_data'] = market_context
|
| 551 |
|
| 552 |
+
# (إضافة مهلة زمنية لمكالمة النموذج الضخم)
|
| 553 |
+
re_analysis_decision = None
|
| 554 |
+
try:
|
| 555 |
+
print(f" 🔄 [Re-Analyze] بدء استدعاء النموذج الضخم لـ {symbol} (مهلة {LLM_REANALYSIS_TIMEOUT} ثانية)...")
|
| 556 |
+
re_analysis_decision = await asyncio.wait_for(
|
| 557 |
+
llm_service_global.re_analyze_trade_async(trade_data, processed_data),
|
| 558 |
+
timeout=LLM_REANALYSIS_TIMEOUT
|
| 559 |
+
)
|
| 560 |
+
except asyncio.TimeoutError:
|
| 561 |
+
print(f" ❌ [Re-Analyze] مهلة استدعاء النموذج الضخم لـ {symbol}. اتخاذ قرار HOLD افتراضي.")
|
| 562 |
+
return None
|
| 563 |
+
except Exception as llm_e:
|
| 564 |
+
print(f" ❌ [Re-Analyze] خطأ في استدعاء النموذج الضخم: {llm_e}.")
|
| 565 |
+
return None
|
| 566 |
|
| 567 |
if re_analysis_decision:
|
| 568 |
await r2_service_global.save_system_logs_async({ "trade_reanalyzed": True, "symbol": symbol, "action": re_analysis_decision.get('action'), 'strategy': re_analysis_decision.get('strategy', 'GENERIC') })
|
| 569 |
+
print(f"✅ [Re-Analyze] اكتمل التحليل الاستراتيجي لـ {symbol}. القرار: {re_analysis_decision.get('action')}")
|
| 570 |
return {"symbol": symbol, "decision": re_analysis_decision, "current_price": processed_data.get('current_price')}
|
| 571 |
+
else:
|
| 572 |
+
print(f" ⚠️ [Re-Analyze] النموذج الضخم لم يُرجع قراراً لـ {symbol}.")
|
| 573 |
+
return None
|
| 574 |
+
|
| 575 |
+
except Exception as error:
|
| 576 |
+
await r2_service_global.save_system_logs_async({ "reanalysis_error": True, "symbol": symbol, "error": str(error) });
|
| 577 |
+
print(f"❌ Error in re_analyze_open_trade_async for {symbol}: {error}");
|
| 578 |
+
traceback.print_exc();
|
| 579 |
+
return None
|
| 580 |
+
# 🔴 --- END OF CHANGE --- 🔴
|
| 581 |
|
| 582 |
|
| 583 |
async def run_bot_cycle_async():
|
| 584 |
+
"""دورة التداول الرئيسية"""
|
| 585 |
try:
|
| 586 |
+
if not await state_manager.wait_for_initialization(): print("❌ الخدمات غير مهيأة بالكامل - تخطي الدورة"); return
|
| 587 |
+
print("🔄 بدء دورة التداول..."); await r2_service_global.save_system_logs_async({"cycle_started": True})
|
| 588 |
+
if not r2_service_global.acquire_lock(): print("❌ فشل الحصول على القفل - تخطي الدورة"); return
|
| 589 |
|
| 590 |
open_trades = []
|
| 591 |
try:
|
| 592 |
+
open_trades = await trade_manager_global.get_open_trades(); print(f"📋 الصفقات المفتوحة: {len(open_trades)}")
|
| 593 |
+
should_look_for_new_trade = len(open_trades) == 0
|
| 594 |
if open_trades:
|
| 595 |
now = datetime.now()
|
| 596 |
trades_to_reanalyze = [t for t in open_trades if now >= datetime.fromisoformat(t.get('expected_target_time', now.isoformat()))]
|
| 597 |
if trades_to_reanalyze:
|
| 598 |
+
print(f"🔄 إعادة تحليل {len(trades_to_reanalyze)} صفقة (باستخدام MC المتقدم)")
|
| 599 |
reanalysis_results = await asyncio.gather(*[re_analyze_open_trade_async(trade) for trade in trades_to_reanalyze], return_exceptions=True)
|
| 600 |
for i, result in enumerate(reanalysis_results):
|
| 601 |
trade = trades_to_reanalyze[i]
|
| 602 |
+
if isinstance(result, Exception): print(f" ❌ فشل إعادة تحليل {trade.get('symbol')}: {result}")
|
| 603 |
+
elif result and result['decision'].get('action') == "CLOSE_TRADE": print(f" ✅ إغلاق {trade.get('symbol')} بناءً على إعادة التحليل."); await trade_manager_global.close_trade(trade, result['current_price'], 'CLOSED_BY_REANALYSIS');
|
| 604 |
+
elif result and result['decision'].get('action') == "UPDATE_TRADE": print(f" ✅ تحديث {trade.get('symbol')} بناءً على إعادة التحليل."); await trade_manager_global.update_trade(trade, result['decision'])
|
| 605 |
+
elif result: print(f" ℹ️ الاحتفاظ بـ {trade.get('symbol')} بناءً على إعادة التحليل.")
|
| 606 |
+
else: print(f" ⚠️ إعادة تحليل {trade.get('symbol')} لم تنتج قرارًا.")
|
| 607 |
|
| 608 |
current_open_trades_count = len(await trade_manager_global.get_open_trades())
|
| 609 |
should_look_for_new_trade = current_open_trades_count == 0
|
|
|
|
| 611 |
if should_look_for_new_trade:
|
| 612 |
portfolio_state = await r2_service_global.get_portfolio_state_async(); current_capital = portfolio_state.get("current_capital_usd", 0)
|
| 613 |
if current_capital > 1:
|
| 614 |
+
print("🎯 البحث عن فرص تداول جديدة (نظام MC ثنائي المراحل)...")
|
| 615 |
best_opportunity = await run_3_layer_analysis()
|
|
|
|
|
|
|
|
|
|
| 616 |
if best_opportunity:
|
| 617 |
+
print(f"✅ فتح صفقة جديدة: {best_opportunity['symbol']}")
|
| 618 |
|
| 619 |
symbol = best_opportunity['symbol']
|
| 620 |
decision = best_opportunity['decision']
|
| 621 |
price = best_opportunity['current_price']
|
| 622 |
|
| 623 |
+
# (استخراج اللقطة (snapshot) التي تم حفظها)
|
| 624 |
context_data = best_opportunity.get('decision_context_data', {})
|
| 625 |
|
| 626 |
+
# (بناء اللقطة (snapshot) للمنعكس (Reflector))
|
| 627 |
decision_context_snapshot = {
|
| 628 |
"market": context_data.get('sentiment_data', {}),
|
| 629 |
"indicators": context_data.get('advanced_indicators', {}),
|
|
|
|
| 639 |
price,
|
| 640 |
decision_context=decision_context_snapshot
|
| 641 |
)
|
| 642 |
+
else: print("❌ لم يتم العثور على فرص تداول مناسبة")
|
| 643 |
+
else: print("❌ رأس المال غير كافي لفتح صفقات جديدة")
|
| 644 |
+
else: print("ℹ️ يوجد صفقة مفتوحة بالفعل، تخطي البحث عن صفقة جديدة.")
|
|
|
|
| 645 |
finally:
|
| 646 |
if r2_service_global.lock_acquired: r2_service_global.release_lock()
|
| 647 |
await r2_service_global.save_system_logs_async({ "cycle_completed": True, "open_trades": len(open_trades)})
|
| 648 |
+
print("✅ اكتملت دورة التداول")
|
| 649 |
|
| 650 |
except Exception as error:
|
| 651 |
print(f"❌ Unhandled error in main cycle: {error}"); traceback.print_exc()
|
|
|
|
| 654 |
|
| 655 |
@asynccontextmanager
|
| 656 |
async def lifespan(application: FastAPI):
|
| 657 |
+
"""إدارة دورة حياة التطبيق"""
|
| 658 |
+
print("🚀 بدء تهيئة التطبيق...")
|
| 659 |
try:
|
| 660 |
success = await initialize_services()
|
| 661 |
+
if not success: print("❌ فشل تهيئة التطبيق - إغلاق..."); yield; return
|
|
|
|
| 662 |
asyncio.create_task(monitor_market_async())
|
| 663 |
asyncio.create_task(trade_manager_global.start_trade_monitoring())
|
| 664 |
|
| 665 |
+
# (بدء مهمة التقطير الدوري الجديدة)
|
|
|
|
| 666 |
asyncio.create_task(run_periodic_distillation())
|
|
|
|
| 667 |
|
| 668 |
await r2_service_global.save_system_logs_async({"application_started": True})
|
| 669 |
+
print("🎯 التطبيق جاهز للعمل - نظام الطبقات 3 (محور التعلم V4) فعال")
|
| 670 |
+
print(" -> 📈 المراقب التكتيكي (Dynamic Exit) نشط الآن")
|
| 671 |
+
print(" -> 🧠 التقطير الدوري (Curator) مجدول الآن")
|
| 672 |
yield
|
| 673 |
except Exception as error:
|
| 674 |
print(f"❌ Application startup failed: {error}");
|
|
|
|
| 680 |
await cleanup_on_shutdown()
|
| 681 |
|
| 682 |
|
| 683 |
+
application = FastAPI(lifespan=lifespan, title="AI Trading Bot", description="نظام تداول ذكي بتحليل مونت كارلو ثنائي المراحل (GARCH+LGBM) مع محور تعلم تشغيلي (Reflexion Architecture)", version="4.0.0")
|
| 684 |
|
|
|
|
| 685 |
@application.get("/")
|
| 686 |
+
async def root(): return {"message": "مرحباً بك في نظام التداول الذكي", "system": "3-Layer Analysis System (Learning Hub Enabled)", "status": "running" if state_manager.initialization_complete else "initializing", "timestamp": datetime.now().isoformat()}
|
| 687 |
@application.get("/run-cycle")
|
| 688 |
async def run_cycle_api():
|
| 689 |
+
if not state_manager.initialization_complete: raise HTTPException(status_code=503, detail="الخدمات غير مهيأة بالكامل")
|
| 690 |
asyncio.create_task(run_bot_cycle_async())
|
| 691 |
return {"message": "Bot cycle initiated (Learning Hub Enabled)", "system": "3-Layer Analysis"}
|
| 692 |
@application.get("/health")
|
| 693 |
+
async def health_check(): return {"status": "healthy" if state_manager.initialization_complete else "initializing", "initialization_complete": state_manager.initialization_complete, "services_initialized": state_manager.services_initialized, "initialization_error": state_manager.initialization_error, "timestamp": datetime.now().isoformat(), "system_architecture": "3-Layer Analysis System (Learning Hub V4.0)"}
|
| 694 |
@application.get("/analyze-market")
|
| 695 |
async def analyze_market_api():
|
| 696 |
+
if not state_manager.initialization_complete: raise HTTPException(status_code=503, detail="الخدمات غير مهيأة بالكامل")
|
| 697 |
result = await run_3_layer_analysis()
|
| 698 |
if result: return {"opportunity_found": True, "symbol": result['symbol'], "action": result['decision'].get('action'), "confidence": result['llm_confidence'], "strategy": result['strategy'], "exit_profile": result['decision'].get('exit_profile')}
|
| 699 |
else: return {"opportunity_found": False, "message": "No suitable opportunities found"}
|
| 700 |
@application.get("/portfolio")
|
| 701 |
async def get_portfolio_api():
|
| 702 |
+
if not state_manager.initialization_complete: raise HTTPException(status_code=503, detail="الخدمات غير مهيأة بالكامل")
|
| 703 |
try: portfolio_state = await r2_service_global.get_portfolio_state_async(); open_trades = await trade_manager_global.get_open_trades(); return {"portfolio": portfolio_state, "open_trades": open_trades, "timestamp": datetime.now().isoformat()}
|
| 704 |
+
except Exception as e: raise HTTPException(status_code=500, detail=f"خطأ في جلب بيانات المحفظة: {str(e)}")
|
| 705 |
@application.get("/system-status")
|
| 706 |
async def get_system_status(): monitoring_status = trade_manager_global.get_monitoring_status() if trade_manager_global else {}; return {"initialization_complete": state_manager.initialization_complete, "services_initialized": state_manager.services_initialized, "initialization_error": state_manager.initialization_error, "market_state_ok": state.MARKET_STATE_OK, "monitoring_status": monitoring_status, "timestamp": datetime.now().isoformat()}
|
| 707 |
|
| 708 |
async def cleanup_on_shutdown():
|
|
|
|
| 709 |
global r2_service_global, data_manager_global, trade_manager_global, learning_hub_global
|
|
|
|
|
|
|
| 710 |
print("🛑 Shutdown signal received. Cleaning up...")
|
| 711 |
if trade_manager_global: trade_manager_global.stop_monitoring(); print("✅ Trade monitoring stopped")
|
| 712 |
|
|
|
|
|
|
|
| 713 |
if learning_hub_global and learning_hub_global.initialized:
|
| 714 |
try:
|
| 715 |
await learning_hub_global.shutdown()
|
| 716 |
print("✅ Learning hub data saved")
|
| 717 |
except Exception as e: print(f"❌ Failed to save learning hub data: {e}")
|
|
|
|
| 718 |
|
| 719 |
if data_manager_global: await data_manager_global.close(); print("✅ Data manager closed")
|
| 720 |
if r2_service_global:
|