|
|
|
|
|
import os |
|
|
import traceback |
|
|
import signal |
|
|
import sys |
|
|
import uvicorn |
|
|
import asyncio |
|
|
import json |
|
|
import time |
|
|
from contextlib import asynccontextmanager |
|
|
from fastapi import FastAPI, HTTPException |
|
|
from datetime import datetime |
|
|
|
|
|
|
|
|
try: |
|
|
from r2 import R2Service |
|
|
from LLM import LLMService |
|
|
from data_manager import DataManager |
|
|
from ml_engine.processor import MLProcessor |
|
|
from learning_engine import LearningEngine |
|
|
from sentiment_news import SentimentAnalyzer |
|
|
from trade_manager import TradeManager |
|
|
import state |
|
|
from helpers import safe_float_conversion, validate_candidate_data_enhanced |
|
|
except ImportError as e: |
|
|
print(f"❌ خطأ في استيراد الوحدات: {e}") |
|
|
sys.exit(1) |
|
|
|
|
|
|
|
|
r2_service_global = None |
|
|
data_manager_global = None |
|
|
llm_service_global = None |
|
|
learning_engine_global = None |
|
|
trade_manager_global = None |
|
|
sentiment_analyzer_global = None |
|
|
symbol_whale_monitor_global = None |
|
|
|
|
|
class StateManager: |
|
|
|
|
|
|
|
|
def __init__(self): |
|
|
self.market_analysis_lock = asyncio.Lock() |
|
|
self.trade_analysis_lock = asyncio.Lock() |
|
|
self.initialization_complete = False |
|
|
self.initialization_error = None |
|
|
self.services_initialized = { |
|
|
'r2_service': False, 'data_manager': False, 'llm_service': False, |
|
|
'learning_engine': False, 'trade_manager': False, 'sentiment_analyzer': False, |
|
|
'symbol_whale_monitor': False |
|
|
} |
|
|
|
|
|
async def wait_for_initialization(self, timeout=60): |
|
|
start_time = time.time() |
|
|
while not self.initialization_complete and (time.time() - start_time) < timeout: |
|
|
if self.initialization_error: raise Exception(f"فشل التهيئة: {self.initialization_error}") |
|
|
await asyncio.sleep(2) |
|
|
if not self.initialization_complete: raise Exception(f"انتهت مهلة التهيئة ({timeout} ثانية)") |
|
|
return self.initialization_complete |
|
|
|
|
|
def set_service_initialized(self, service_name): |
|
|
self.services_initialized[service_name] = True |
|
|
if all(self.services_initialized.values()): |
|
|
self.initialization_complete = True |
|
|
print("🎯 جميع الخدمات مهيأة بالكامل") |
|
|
|
|
|
def set_initialization_error(self, error): |
|
|
self.initialization_error = error |
|
|
print(f"❌ خطأ في التهيئة: {error}") |
|
|
|
|
|
|
|
|
state_manager = StateManager() |
|
|
|
|
|
async def initialize_services(): |
|
|
"""تهيئة جميع الخدمات بشكل منفصل""" |
|
|
global r2_service_global, data_manager_global, llm_service_global |
|
|
global learning_engine_global, trade_manager_global, sentiment_analyzer_global |
|
|
global symbol_whale_monitor_global |
|
|
try: |
|
|
print("🚀 بدء تهيئة الخدمات...") |
|
|
print(" 🔄 تهيئة R2Service..."); r2_service_global = R2Service(); state_manager.set_service_initialized('r2_service'); print(" ✅ R2Service مهيأة") |
|
|
print(" 🔄 جلب قاعدة بيانات العقود..."); contracts_database = await r2_service_global.load_contracts_db_async(); print(f" ✅ تم تحميل {len(contracts_database)} عقد") |
|
|
print(" 🔄 تهيئة مراقب الحيتان..."); |
|
|
try: |
|
|
from whale_news_data import EnhancedWhaleMonitor |
|
|
symbol_whale_monitor_global = EnhancedWhaleMonitor(contracts_database, r2_service_global) |
|
|
state_manager.set_service_initialized('symbol_whale_monitor'); print(" ✅ مراقب الحيتان مهيأ") |
|
|
except Exception as e: print(f" ⚠️ فشل تهيئة مراقب الحيتان: {e}"); symbol_whale_monitor_global = None |
|
|
print(" 🔄 تهيئة DataManager..."); data_manager_global = DataManager(contracts_database, symbol_whale_monitor_global); await data_manager_global.initialize(); state_manager.set_service_initialized('data_manager'); print(" ✅ DataManager مهيأة") |
|
|
|
|
|
|
|
|
|
|
|
print(" 🔄 تهيئة LLMService..."); |
|
|
llm_service_global = LLMService(); |
|
|
llm_service_global.r2_service = r2_service_global; |
|
|
|
|
|
|
|
|
print(" 🔄 تهيئة محلل المشاعر..."); |
|
|
sentiment_analyzer_global = SentimentAnalyzer(data_manager_global); |
|
|
state_manager.set_service_initialized('sentiment_analyzer'); |
|
|
print(" ✅ محلل المشاعر مهيأ") |
|
|
|
|
|
print(" 🔄 تهيئة محرك التعلم..."); |
|
|
learning_engine_global = LearningEngine(r2_service_global, data_manager_global); |
|
|
await learning_engine_global.initialize_enhanced(); |
|
|
state_manager.set_service_initialized('learning_engine'); |
|
|
print(" ✅ محرك التعلم مهيأ") |
|
|
|
|
|
|
|
|
llm_service_global.learning_engine = learning_engine_global |
|
|
state_manager.set_service_initialized('llm_service'); |
|
|
print(" ✅ LLMService مهيأة (ومربوطة بمحرك التعلم)") |
|
|
|
|
|
print(" 🔄 تهيئة مدير الصفقات..."); |
|
|
|
|
|
trade_manager_global = TradeManager( |
|
|
r2_service_global, |
|
|
learning_engine_global, |
|
|
data_manager_global, |
|
|
state_manager |
|
|
) |
|
|
state_manager.set_service_initialized('trade_manager'); |
|
|
print(" ✅ مدير الصفقات مهيأ (ومدرك لحالة النظام)") |
|
|
|
|
|
|
|
|
|
|
|
print("🎯 اكتملت تهيئة جميع الخدمات بنجاح"); return True |
|
|
except Exception as e: error_msg = f"فشل تهيئة الخدمات: {str(e)}"; print(f"❌ {error_msg}"); state_manager.set_initialization_error(error_msg); return False |
|
|
|
|
|
async def monitor_market_async(): |
|
|
"""مراقبة السوق""" |
|
|
global data_manager_global, sentiment_analyzer_global |
|
|
try: |
|
|
if not await state_manager.wait_for_initialization(): print("❌ فشل تهيئة الخدمات - إيقاف مراقبة السوق"); return |
|
|
while True: |
|
|
try: |
|
|
|
|
|
async with state_manager.market_analysis_lock: |
|
|
market_context = await sentiment_analyzer_global.get_market_sentiment() |
|
|
if not market_context: state.MARKET_STATE_OK = True; await asyncio.sleep(60); continue |
|
|
bitcoin_sentiment = market_context.get('btc_sentiment') |
|
|
fear_greed_index = market_context.get('fear_and_greed_index') |
|
|
should_halt_trading, halt_reason = False, "" |
|
|
if bitcoin_sentiment == 'BEARISH' and (fear_greed_index is not None and fear_greed_index < 30): should_halt_trading, halt_reason = True, "ظروف سوق هابطة" |
|
|
if should_halt_trading: state.MARKET_STATE_OK = False; await r2_service_global.save_system_logs_async({"market_halt": True, "reason": halt_reason}) |
|
|
else: |
|
|
if not state.MARKET_STATE_OK: print("✅ تحسنت ظروف السوق. استئناف العمليات العادية.") |
|
|
state.MARKET_STATE_OK = True |
|
|
await asyncio.sleep(60) |
|
|
except Exception as error: print(f"❌ خطأ أثناء مراقبة السوق: {error}"); state.MARKET_STATE_OK = True; await asyncio.sleep(60) |
|
|
except Exception as e: print(f"❌ فشل تشغيل مراقبة السوق: {e}") |
|
|
|
|
|
|
|
|
async def process_batch_parallel(batch, ml_processor, batch_num, total_batches, preloaded_whale_data): |
|
|
""" |
|
|
(معدلة) معالجة دفعة من الرموز بشكل متوازي وإرجاع نتائج مفصلة |
|
|
- تستخدم بيانات الحيتان المحملة مسبقًا |
|
|
""" |
|
|
try: |
|
|
batch_tasks = [] |
|
|
for symbol_data in batch: |
|
|
task = asyncio.create_task(ml_processor.process_multiple_symbols_parallel([symbol_data], preloaded_whale_data)) |
|
|
batch_tasks.append(task) |
|
|
|
|
|
batch_results_list_of_lists = await asyncio.gather(*batch_tasks, return_exceptions=True) |
|
|
|
|
|
successful_results = [] |
|
|
low_score_results = [] |
|
|
failed_results = [] |
|
|
|
|
|
for i, result_list in enumerate(batch_results_list_of_lists): |
|
|
symbol = batch[i].get('symbol', 'unknown') |
|
|
if isinstance(result_list, Exception): |
|
|
failed_results.append({"symbol": symbol, "error": f"Task Execution Error: {str(result_list)}"}) |
|
|
continue |
|
|
|
|
|
if result_list: |
|
|
result = result_list[0] |
|
|
if isinstance(result, dict): |
|
|
if result.get('enhanced_final_score', 0) > 0.4: |
|
|
successful_results.append(result) |
|
|
else: |
|
|
low_score_results.append(result) |
|
|
else: |
|
|
failed_results.append({"symbol": symbol, "error": f"ML processor returned invalid type: {type(result)}"}) |
|
|
else: |
|
|
failed_results.append({"symbol": symbol, "error": "ML processing returned None or empty list"}) |
|
|
|
|
|
return {'success': successful_results, 'low_score': low_score_results, 'failures': failed_results} |
|
|
|
|
|
except Exception as error: |
|
|
print(f"❌ [المستهلك] خطأ في معالجة الدفعة {batch_num}: {error}") |
|
|
return {'success': [], 'low_score': [], 'failures': []} |
|
|
|
|
|
|
|
|
async def run_3_layer_analysis(): |
|
|
""" |
|
|
(معدلة) تشغيل النظام الطبقي (مع فصل جلب الحيتان) |
|
|
الطبقة 1: data_manager - الفحص السريع |
|
|
الطبقة 1.5: جلب بيانات الحيتان بشكل منفصل (غير معرقل) |
|
|
الطبقة 2: MLProcessor - التحليل المتدفق (يستخدم مونت كارلو المرحلة 1) |
|
|
الطبقة 2.5: (جديد) مونت كارلو (المرحلة 2+3) لأفضل 10 عملات |
|
|
الطبقة 3: LLMService - النموذج الضخم (يستخدم نتائج المرحلة 2.5) |
|
|
""" |
|
|
|
|
|
layer1_candidates = [] |
|
|
layer2_candidates = [] |
|
|
all_low_score_candidates = [] |
|
|
all_failed_candidates = [] |
|
|
final_layer2_candidates = [] |
|
|
final_opportunities = [] |
|
|
preloaded_whale_data_dict = {} |
|
|
|
|
|
try: |
|
|
print("🎯 بدء النظام الطبقي المكون من 3 طبقات (مع فصل جلب الحيتان)...") |
|
|
|
|
|
if not await state_manager.wait_for_initialization(): print("❌ الخدمات غير مهيأة بالكامل"); return None |
|
|
|
|
|
|
|
|
print("\n🔍 الطبقة 1: الفحص السريع (data_manager)...") |
|
|
layer1_candidates = await data_manager_global.layer1_rapid_screening() |
|
|
if not layer1_candidates: print("❌ لم يتم العثور على مرشحين في الطبقة 1"); return None |
|
|
print(f"✅ تم اختيار {len(layer1_candidates)} عملة للطبقة 2") |
|
|
layer1_symbols = [c['symbol'] for c in layer1_candidates] |
|
|
|
|
|
|
|
|
start_whale_fetch = time.time() |
|
|
print(f"\n🐋 الطبقة 1.5: بدء جلب بيانات الحيتان لـ {len(layer1_symbols)} عملة (بشكل غير معرقل)...") |
|
|
async def fetch_whale_data_task(symbols, results_dict): |
|
|
WHALE_FETCH_CONCURRENCY = 3 |
|
|
semaphore = asyncio.Semaphore(WHALE_FETCH_CONCURRENCY) |
|
|
tasks = [] |
|
|
async def get_data_with_semaphore(symbol): |
|
|
async with semaphore: |
|
|
try: |
|
|
data = await data_manager_global.get_whale_data_for_symbol(symbol) |
|
|
if data: results_dict[symbol] = data |
|
|
except Exception as e: |
|
|
print(f" ❌ [Whale Fetch] فشل جلب بيانات الحيتان لـ {symbol}: {e}") |
|
|
results_dict[symbol] = {'data_available': False, 'error': str(e)} |
|
|
for symbol in symbols: tasks.append(asyncio.create_task(get_data_with_semaphore(symbol))) |
|
|
await asyncio.gather(*tasks) |
|
|
whale_fetcher_task = asyncio.create_task(fetch_whale_data_task(layer1_symbols, preloaded_whale_data_dict)) |
|
|
print(" ⏳ مهمة جلب بيانات الحيتان تعمل في الخلفية...") |
|
|
|
|
|
|
|
|
DATA_QUEUE_MAX_SIZE = 2 |
|
|
ohlcv_data_queue = asyncio.Queue(maxsize=DATA_QUEUE_MAX_SIZE) |
|
|
ml_results_list = [] |
|
|
market_context = await data_manager_global.get_market_context_async() |
|
|
ml_processor = MLProcessor(market_context, data_manager_global, learning_engine_global) |
|
|
batch_size = 15 |
|
|
total_batches = (len(layer1_candidates) + batch_size - 1) // batch_size |
|
|
print(f" 🚀 إعداد المنتج/المستهلك (OHLCV/ML): {total_batches} دفعة متوقعة (بحجم {batch_size})") |
|
|
|
|
|
|
|
|
async def ml_consumer_task(queue: asyncio.Queue, results_list: list, whale_data_store: dict): |
|
|
batch_num = 0 |
|
|
while True: |
|
|
try: |
|
|
batch_data = await queue.get() |
|
|
if batch_data is None: queue.task_done(); print(" 🛑 [ML Consumer] تلقى إشارة التوقف."); break |
|
|
batch_num += 1 |
|
|
print(f" 📬 [ML Consumer] استلم دفعة OHLCV {batch_num}/{total_batches} ({len(batch_data)} عملة)") |
|
|
|
|
|
batch_results_dict = await process_batch_parallel( |
|
|
batch_data, ml_processor, batch_num, total_batches, whale_data_store |
|
|
) |
|
|
results_list.append(batch_results_dict) |
|
|
queue.task_done() |
|
|
print(f" ✅ [ML Consumer] أكمل معالجة الدفعة {batch_num}/{total_batches}") |
|
|
except Exception as e: print(f"❌ [ML Consumer] خطأ فادح: {e}"); traceback.print_exc(); queue.task_done() |
|
|
|
|
|
|
|
|
print(" ▶️ [ML Consumer] بدء تشغيل مهمة المستهلك...") |
|
|
consumer_task = asyncio.create_task(ml_consumer_task(ohlcv_data_queue, ml_results_list, preloaded_whale_data_dict)) |
|
|
print(" ▶️ [OHLCV Producer] بدء تشغيل مهمة المنتج (تدفق بيانات OHLCV)...") |
|
|
producer_task = asyncio.create_task(data_manager_global.stream_ohlcv_data(layer1_symbols, ohlcv_data_queue)) |
|
|
|
|
|
|
|
|
await producer_task; print(" ✅ [OHLCV Producer] أنهى جلب جميع بيانات OHLCV.") |
|
|
await ohlcv_data_queue.put(None) |
|
|
await ohlcv_data_queue.join() |
|
|
await consumer_task; print(" ✅ [ML Consumer] أنهى معالجة جميع الدفعات.") |
|
|
|
|
|
|
|
|
print(" ⏳ انتظار اكتمال مهمة جلب بيانات الحيتان (بحد أقصى للمهلة)...") |
|
|
WHALE_FETCH_TIMEOUT_SECONDS = 180 |
|
|
try: |
|
|
await asyncio.wait_for(whale_fetcher_task, timeout=WHALE_FETCH_TIMEOUT_SECONDS) |
|
|
end_whale_fetch = time.time() |
|
|
print(f" ✅ اكتمل جلب بيانات الحيتان في {end_whale_fetch - start_whale_fetch:.2f} ثانية. تم جلب/محاولة جلب بيانات لـ {len(preloaded_whale_data_dict)} عملة.") |
|
|
except asyncio.TimeoutError: |
|
|
end_whale_fetch = time.time() |
|
|
print(f" ⚠️ انتهت مهلة انتظار جلب بيانات الحيتان ({WHALE_FETCH_TIMEOUT_SECONDS} ثانية)! تم جلب/محاولة جلب بيانات لـ {len(preloaded_whale_data_dict)} عملة حتى الآن.") |
|
|
except Exception as whale_task_err: |
|
|
end_whale_fetch = time.time() |
|
|
print(f" ❌ حدث خطأ غير متوقع أثناء انتظار مهمة جلب الحيتان: {whale_task_err}") |
|
|
|
|
|
|
|
|
print("🔄 تجميع جميع النتائج...") |
|
|
for batch_result in ml_results_list: |
|
|
for success_item in batch_result['success']: |
|
|
symbol = success_item['symbol'] |
|
|
l1_data = next((c for c in layer1_candidates if c['symbol'] == symbol), None) |
|
|
if l1_data: |
|
|
success_item['reasons_for_candidacy'] = l1_data.get('reasons', []) |
|
|
success_item['layer1_score'] = l1_data.get('layer1_score', 0) |
|
|
if symbol in preloaded_whale_data_dict: success_item['whale_data'] = preloaded_whale_data_dict[symbol] |
|
|
elif 'whale_data' not in success_item: success_item['whale_data'] = {'data_available': False, 'reason': 'Fetch timed out or failed'} |
|
|
layer2_candidates.append(success_item) |
|
|
all_low_score_candidates.extend(batch_result['low_score']) |
|
|
all_failed_candidates.extend(batch_result['failures']) |
|
|
|
|
|
print(f"✅ اكتمل التحليل المتقدم (MC-Phase1): {len(layer2_candidates)} نجاح (عالي) | {len(all_low_score_candidates)} نجاح (منخفض) | {len(all_failed_candidates)} فشل") |
|
|
if not layer2_candidates: print("❌ لم يتم العثور على مرشحين في الطبقة 2") |
|
|
|
|
|
|
|
|
layer2_candidates.sort(key=lambda x: x.get('enhanced_final_score', 0), reverse=True) |
|
|
target_count = min(10, len(layer2_candidates)) |
|
|
final_layer2_candidates = layer2_candidates[:target_count] |
|
|
print(f"🎯 تم اختيار {len(final_layer2_candidates)} عملة للطبقة 2.5 (الأقوى فقط)") |
|
|
|
|
|
|
|
|
print(f"\n🔬 الطبقة 2.5: تشغيل التحليل المتقدم (GARCH+LGBM) على أفضل {len(final_layer2_candidates)} مرشح...") |
|
|
advanced_mc_analyzer = ml_processor.monte_carlo_analyzer |
|
|
|
|
|
updated_candidates_for_llm = [] |
|
|
for candidate in final_layer2_candidates: |
|
|
symbol = candidate.get('symbol', 'UNKNOWN') |
|
|
try: |
|
|
print(f" 🔄 [Advanced MC] تحليل {symbol}...") |
|
|
|
|
|
advanced_mc_results = await advanced_mc_analyzer.generate_1h_distribution_advanced( |
|
|
candidate.get('ohlcv') |
|
|
) |
|
|
|
|
|
if advanced_mc_results and advanced_mc_results.get('simulation_model') == 'Phase2_GARCH_LGBM': |
|
|
print(f" ✅ [Advanced MC] {symbol} - تم التحديث بنموذج GARCH/LGBM.") |
|
|
|
|
|
candidate['monte_carlo_distribution'] = advanced_mc_results |
|
|
candidate['monte_carlo_probability'] = advanced_mc_results.get('probability_of_gain', 0) |
|
|
candidate['advanced_mc_run'] = True |
|
|
else: |
|
|
print(f" ⚠️ [Advanced MC] {symbol} - فشل التحليل المتقدم، استخدام نتائج المرحلة 1.") |
|
|
candidate['advanced_mc_run'] = False |
|
|
|
|
|
updated_candidates_for_llm.append(candidate) |
|
|
|
|
|
except Exception as e: |
|
|
print(f" ❌ [Advanced MC] {symbol} - خطأ فادح: {e}. استخدام نتائج المرحلة 1.") |
|
|
candidate['advanced_mc_run'] = False |
|
|
updated_candidates_for_llm.append(candidate) |
|
|
|
|
|
final_layer2_candidates = updated_candidates_for_llm |
|
|
|
|
|
|
|
|
await r2_service_global.save_candidates_async(final_layer2_candidates) |
|
|
print("\n🏆 أفضل 10 عملات (بعد التدقيق) جاهزة للطبقة 3:") |
|
|
for i, candidate in enumerate(final_layer2_candidates): |
|
|
score=candidate.get('enhanced_final_score',0); strategy=candidate.get('target_strategy','GENERIC'); mc_dist=candidate.get('monte_carlo_distribution'); pattern=candidate.get('pattern_analysis',{}).get('pattern_detected','no_pattern'); timeframes=candidate.get('successful_timeframes',0); symbol=candidate.get('symbol','UNKNOWN') |
|
|
print(f" {i+1}. {symbol}: 📊 {score:.3f} | الأطر: {timeframes}/6") |
|
|
|
|
|
if mc_dist: |
|
|
mc_model = mc_dist.get('simulation_model', 'Phase1') |
|
|
mc_pi_90 = mc_dist.get('prediction_interval_90', [0,0]) |
|
|
mc_var = mc_dist.get('risk_metrics', {}).get('VaR_95_value', 0) |
|
|
print(f" 🎯 مونت كارلو ({mc_model}): 90% PI [{mc_pi_90[0]:.4f} - {mc_pi_90[1]:.4f}] | VaR: ${mc_var:.4f}") |
|
|
|
|
|
print(f" 🎯 استراتيجية: {strategy} | نمط: {pattern}") |
|
|
whale_data = candidate.get('whale_data') |
|
|
if whale_data and whale_data.get('data_available'): signal = whale_data.get('trading_signal', {}); print(f" 🐋 حيتان: {signal.get('action', 'HOLD')} (ثقة: {signal.get('confidence', 0):.2f}){' ⚠️' if signal.get('critical_alert') else ''}") |
|
|
elif whale_data and whale_data.get('error'): print(f" 🐋 حيتان: خطأ ({whale_data.get('error')[:50]}...)") |
|
|
|
|
|
|
|
|
print("\n🧠 الطبقة 3: التحليل بالنموذج الضخم (LLMService)...") |
|
|
|
|
|
for candidate in final_layer2_candidates: |
|
|
try: |
|
|
symbol = candidate['symbol']; print(f" 🤔 تحليل {symbol} بالنموذج الضخم (بيانات MC متقدمة)...") |
|
|
ohlcv_data = candidate.get('ohlcv'); |
|
|
if not ohlcv_data: print(f" ⚠️ لا توجد بيانات شموع لـ {symbol}"); continue |
|
|
candidate['raw_ohlcv'] = ohlcv_data |
|
|
timeframes_count = candidate.get('successful_timeframes', 0); total_candles = sum(len(data) for data in ohlcv_data.values()) if ohlcv_data else 0 |
|
|
if total_candles < 30: print(f" ⚠️ بيانات شموع غير كافية لـ {symbol}: {total_candles} شمعة فقط"); continue |
|
|
print(f" 📊 إرسال {symbol} للنموذج: {total_candles} شمعة في {timeframes_count} إطار زمني") |
|
|
|
|
|
|
|
|
candidate['sentiment_data'] = await data_manager_global.get_market_context_async() |
|
|
|
|
|
llm_analysis = await llm_service_global.get_trading_decision(candidate) |
|
|
|
|
|
if llm_analysis and llm_analysis.get('action') in ['BUY']: |
|
|
opportunity={'symbol': symbol, 'current_price': candidate.get('current_price', 0), 'decision': llm_analysis, 'enhanced_score': candidate.get('enhanced_final_score', 0), 'llm_confidence': llm_analysis.get('confidence_level', 0), 'strategy': llm_analysis.get('strategy', 'GENERIC'), 'analysis_timestamp': datetime.now().isoformat(), 'timeframes_count': timeframes_count, 'total_candles': total_candles} |
|
|
final_opportunities.append(opportunity) |
|
|
print(f" ✅ {symbol}: {llm_analysis.get('action')} - ثقة: {llm_analysis.get('confidence_level', 0):.2f} (ملف خروج: {llm_analysis.get('exit_profile')})") |
|
|
else: action = llm_analysis.get('action', 'NO_DECISION') if llm_analysis else 'NO_RESPONSE'; print(f" ⚠️ {symbol}: لا يوجد قرار تداول من النموذج الضخم ({action})") |
|
|
except Exception as e: print(f"❌ خطأ في تحليل النموذج الضخم لـ {candidate.get('symbol')}: {e}"); traceback.print_exc(); continue |
|
|
|
|
|
if final_opportunities: |
|
|
final_opportunities.sort(key=lambda x: (x['llm_confidence'] + x['enhanced_score']) / 2, reverse=True) |
|
|
print(f"\n🏆 النظام الطبقي اكتمل: {len(final_opportunities)} فرصة تداول") |
|
|
for i, opportunity in enumerate(final_opportunities[:5]): print(f" {i+1}. {opportunity['symbol']}: {opportunity['decision'].get('action')} - ثقة: {opportunity['llm_confidence']:.2f} - أطر: {opportunity['timeframes_count']}") |
|
|
|
|
|
|
|
|
try: |
|
|
top_10_detailed_summary = [] |
|
|
for c in final_layer2_candidates: |
|
|
whale_summary = "Not Available"; whale_data = c.get('whale_data') |
|
|
if whale_data and whale_data.get('data_available'): signal = whale_data.get('trading_signal', {}); action = signal.get('action', 'HOLD'); confidence = signal.get('confidence', 0); reason_preview = signal.get('reason', 'N/A')[:75] + "..." if signal.get('reason') else 'N/A'; whale_summary = f"Action: {action}, Conf: {confidence:.2f}, Alert: {signal.get('critical_alert', False)}, Reason: {reason_preview}" |
|
|
elif whale_data and whale_data.get('error'): whale_summary = f"Error: {whale_data['error'][:50]}..." |
|
|
|
|
|
mc_summary = "N/A" |
|
|
mc_dist = c.get('monte_carlo_distribution') |
|
|
if mc_dist: |
|
|
mc_model = mc_dist.get('simulation_model', 'Unknown') |
|
|
if mc_model == 'Phase2_GARCH_LGBM': |
|
|
drift = mc_dist.get('forecasted_drift_lgbm', 0) |
|
|
vol = mc_dist.get('forecasted_vol_garch', 0) |
|
|
mc_summary = f"Phase2_GARCH(vol={vol:.5f})_LGBM(drift={drift:.5f})" |
|
|
else: |
|
|
var_val = mc_dist.get('risk_metrics', {}).get('VaR_95_value', 0) |
|
|
mc_summary = f"{mc_model}_VaR({var_val:.4f})" |
|
|
|
|
|
top_10_detailed_summary.append({ |
|
|
"symbol": c.get('symbol'), |
|
|
"score": c.get('enhanced_final_score', 0), |
|
|
"timeframes": f"{c.get('successful_timeframes', 'N/A')}/6", |
|
|
"whale_data_summary": whale_summary, |
|
|
"strategy": c.get('target_strategy', 'N/A'), |
|
|
"pattern": c.get('pattern_analysis', {}).get('pattern_detected', 'N/A'), |
|
|
"mc_analysis_level": mc_summary |
|
|
}) |
|
|
|
|
|
other_successful_candidates = layer2_candidates[target_count:] |
|
|
other_success_summary = [{"symbol": c['symbol'], "score": c.get('enhanced_final_score', 0), "timeframes": f"{c.get('successful_timeframes', 'N/A')}/6", "whale_data": "Available" if c.get('whale_data', {}).get('data_available') else ("Error" if c.get('whale_data', {}).get('error') else "Not Available")} for c in other_successful_candidates] |
|
|
low_score_summary = [{"symbol": c['symbol'], "score": c.get('enhanced_final_score', 0), "timeframes": f"{c.get('successful_timeframes', 'N/A')}/6", "whale_data": "Available" if c.get('whale_data', {}).get('data_available') else ("Error" if c.get('whale_data', {}).get('error') else "Not Available")} for c in all_low_score_candidates] |
|
|
audit_data = { "timestamp": datetime.now().isoformat(), "total_layer1_candidates": len(layer1_candidates), "total_processed_in_layer2": len(layer2_candidates) + len(all_low_score_candidates) + len(all_failed_candidates), "counts": {"sent_to_llm": len(final_layer2_candidates), "success_not_top_10": len(other_successful_candidates), "success_low_score": len(all_low_score_candidates), "failures": len(all_failed_candidates)}, "top_candidates_for_llm": top_10_detailed_summary, "other_successful_candidates": other_success_summary, "low_score_candidates": low_score_summary, "failed_candidates": all_failed_candidates, } |
|
|
await r2_service_global.save_analysis_audit_log_async(audit_data) |
|
|
print(f"✅ تم حفظ سجل تدقيق التحليل في R2 (مع تفاصيل MC المتقدمة).") |
|
|
except Exception as audit_error: print(f"❌ فشل حفظ سجل تدقيق التحليل: {audit_error}"); traceback.print_exc() |
|
|
|
|
|
if not final_opportunities: print("❌ لم يتم العثور على فرص تداول مناسبة"); return None |
|
|
return final_opportunities[0] if final_opportunities else None |
|
|
|
|
|
except Exception as error: |
|
|
print(f"❌ خطأ فادح في النظام الطبقي: {error}"); traceback.print_exc() |
|
|
try: |
|
|
audit_data = { "timestamp": datetime.now().isoformat(), "status": "FAILED", "error": str(error), "traceback": traceback.format_exc(), "total_layer1_candidates": len(layer1_candidates), "counts": {"sent_to_llm": 0, "success_not_top_10": 0, "success_low_score": len(all_low_score_candidates), "failures": len(all_failed_candidates)}, "failed_candidates": all_failed_candidates } |
|
|
await r2_service_global.save_analysis_audit_log_async(audit_data) |
|
|
print("⚠️ تم حفظ سجل تدقيق جزئي بعد الفشل.") |
|
|
except Exception as audit_fail_error: print(f"❌ فشل حفظ سجل التدقيق أثناء معالجة خطأ آخر: {audit_fail_error}") |
|
|
return None |
|
|
|
|
|
async def re_analyze_open_trade_async(trade_data): |
|
|
"""إعادة تحليل الصفقة المفتوحة""" |
|
|
symbol = trade_data.get('symbol') |
|
|
try: |
|
|
|
|
|
async with state_manager.trade_analysis_lock: |
|
|
print(f"🔄 [Re-Analyze] بدء التحليل الاستراتيجي لـ {symbol}...") |
|
|
market_context = await data_manager_global.get_market_context_async() |
|
|
ohlcv_data_list = [] |
|
|
temp_queue = asyncio.Queue() |
|
|
await data_manager_global.stream_ohlcv_data([symbol], temp_queue) |
|
|
while True: |
|
|
try: |
|
|
batch = await asyncio.wait_for(temp_queue.get(), timeout=1.0) |
|
|
if batch is None: temp_queue.task_done(); break |
|
|
ohlcv_data_list.extend(batch) |
|
|
temp_queue.task_done() |
|
|
except asyncio.TimeoutError: |
|
|
if temp_queue.empty(): break |
|
|
except Exception as q_err: print(f"Error draining queue for re-analysis: {q_err}"); break |
|
|
|
|
|
if not ohlcv_data_list: print(f"⚠️ فشل جلب بيانات إعادة التحليل لـ {symbol}"); return None |
|
|
ohlcv_data = ohlcv_data_list[0] |
|
|
|
|
|
l1_data = await data_manager_global._get_detailed_symbol_data(symbol) |
|
|
if l1_data: ohlcv_data.update(l1_data); ohlcv_data['reasons_for_candidacy'] = ['re-analysis'] |
|
|
|
|
|
re_analysis_whale_data = await data_manager_global.get_whale_data_for_symbol(symbol) |
|
|
|
|
|
ml_processor = MLProcessor(market_context, data_manager_global, learning_engine_global) |
|
|
|
|
|
print(f"🔄 [Re-Analyze] استخدام مونت كارلو (Phase 2+3) لـ {symbol}...") |
|
|
advanced_mc_results = await ml_processor.monte_carlo_analyzer.generate_1h_distribution_advanced( |
|
|
ohlcv_data.get('ohlcv') |
|
|
) |
|
|
|
|
|
processed_data = await ml_processor.process_and_score_symbol_enhanced(ohlcv_data, {symbol: re_analysis_whale_data} if re_analysis_whale_data else {}) |
|
|
|
|
|
if not processed_data: return None |
|
|
|
|
|
if advanced_mc_results: |
|
|
processed_data['monte_carlo_distribution'] = advanced_mc_results |
|
|
processed_data['monte_carlo_probability'] = advanced_mc_results.get('probability_of_gain', 0) |
|
|
|
|
|
processed_data['raw_ohlcv'] = ohlcv_data.get('raw_ohlcv') or ohlcv_data.get('ohlcv') |
|
|
processed_data['ohlcv'] = processed_data['raw_ohlcv'] |
|
|
|
|
|
|
|
|
processed_data['sentiment_data'] = market_context |
|
|
|
|
|
re_analysis_decision = await llm_service_global.re_analyze_trade_async(trade_data, processed_data) |
|
|
|
|
|
if re_analysis_decision: |
|
|
await r2_service_global.save_system_logs_async({ "trade_reanalyzed": True, "symbol": symbol, "action": re_analysis_decision.get('action'), 'strategy': re_analysis_decision.get('strategy', 'GENERIC') }) |
|
|
print(f"✅ [Re-Analyze] اكتمل التحليل الاستراتيجي لـ {symbol}. القرار: {re_analysis_decision.get('action')}") |
|
|
return {"symbol": symbol, "decision": re_analysis_decision, "current_price": processed_data.get('current_price')} |
|
|
else: return None |
|
|
except Exception as error: await r2_service_global.save_system_logs_async({ "reanalysis_error": True, "symbol": symbol, "error": str(error) }); print(f"❌ Error in re_analyze_open_trade_async for {symbol}: {error}"); traceback.print_exc(); return None |
|
|
|
|
|
|
|
|
async def run_bot_cycle_async(): |
|
|
"""دورة التداول الرئيسية""" |
|
|
try: |
|
|
if not await state_manager.wait_for_initialization(): print("❌ الخدمات غير مهيأة بالكامل - تخطي الدورة"); return |
|
|
print("🔄 بدء دورة التداول..."); await r2_service_global.save_system_logs_async({"cycle_started": True}) |
|
|
if not r2_service_global.acquire_lock(): print("❌ فشل الحصول على القفل - تخطي الدورة"); return |
|
|
|
|
|
open_trades = [] |
|
|
try: |
|
|
open_trades = await trade_manager_global.get_open_trades(); print(f"📋 الصفقات المفتوحة: {len(open_trades)}") |
|
|
should_look_for_new_trade = len(open_trades) == 0 |
|
|
if open_trades: |
|
|
now = datetime.now() |
|
|
trades_to_reanalyze = [t for t in open_trades if now >= datetime.fromisoformat(t.get('expected_target_time', now.isoformat()))] |
|
|
if trades_to_reanalyze: |
|
|
print(f"🔄 إعادة تحليل {len(trades_to_reanalyze)} صفقة (باستخدام MC المتقدم)") |
|
|
|
|
|
|
|
|
|
|
|
reanalysis_results = await asyncio.gather(*[re_analyze_open_trade_async(trade) for trade in trades_to_reanalyze], return_exceptions=True) |
|
|
for i, result in enumerate(reanalysis_results): |
|
|
trade = trades_to_reanalyze[i] |
|
|
if isinstance(result, Exception): print(f" ❌ فشل إعادة تحليل {trade.get('symbol')}: {result}") |
|
|
elif result and result['decision'].get('action') == "CLOSE_TRADE": print(f" ✅ إغلاق {trade.get('symbol')} بناءً على إعادة التحليل."); await trade_manager_global.close_trade(trade, result['current_price'], 'CLOSED_BY_REANALYSIS'); |
|
|
elif result and result['decision'].get('action') == "UPDATE_TRADE": print(f" ✅ تحديث {trade.get('symbol')} بناءً على إعادة التحليل."); await trade_manager_global.update_trade(trade, result['decision']) |
|
|
elif result: print(f" ℹ️ الاحتفاظ بـ {trade.get('symbol')} بناءً على إعادة التحليل.") |
|
|
else: print(f" ⚠️ إعادة تحليل {trade.get('symbol')} لم تنتج قرارًا.") |
|
|
|
|
|
current_open_trades_count = len(await trade_manager_global.get_open_trades()) |
|
|
should_look_for_new_trade = current_open_trades_count == 0 |
|
|
|
|
|
if should_look_for_new_trade: |
|
|
portfolio_state = await r2_service_global.get_portfolio_state_async(); current_capital = portfolio_state.get("current_capital_usd", 0) |
|
|
if current_capital > 1: |
|
|
print("🎯 البحث عن فرص تداول جديدة (نظام MC ثنائي المراحل)...") |
|
|
best_opportunity = await run_3_layer_analysis() |
|
|
if best_opportunity: |
|
|
print(f"✅ فتح صفقة جديدة: {best_opportunity['symbol']}") |
|
|
await trade_manager_global.open_trade( best_opportunity['symbol'], best_opportunity['decision'], best_opportunity['current_price']) |
|
|
else: print("❌ لم يتم العثور على فرص تداول مناسبة") |
|
|
else: print("❌ رأس المال غير كافي لفتح صفقات جديدة") |
|
|
else: print("ℹ️ يوجد صفقة مفتوحة بالفعل، تخطي البحث عن صفقة جديدة.") |
|
|
finally: |
|
|
if r2_service_global.lock_acquired: r2_service_global.release_lock() |
|
|
await r2_service_global.save_system_logs_async({ "cycle_completed": True, "open_trades": len(open_trades)}) |
|
|
print("✅ اكتملت دورة التداول") |
|
|
|
|
|
except Exception as error: |
|
|
print(f"❌ Unhandled error in main cycle: {error}"); traceback.print_exc() |
|
|
await r2_service_global.save_system_logs_async({ "cycle_error": True, "error": str(error) }); |
|
|
if r2_service_global and r2_service_global.lock_acquired: r2_service_global.release_lock() |
|
|
|
|
|
@asynccontextmanager |
|
|
async def lifespan(application: FastAPI): |
|
|
"""إدارة دورة حياة التطبيق""" |
|
|
print("🚀 بدء تهيئة التطبيق...") |
|
|
try: |
|
|
success = await initialize_services() |
|
|
if not success: print("❌ فشل تهيئة التطبيق - إغلاق..."); yield; return |
|
|
asyncio.create_task(monitor_market_async()) |
|
|
|
|
|
asyncio.create_task(trade_manager_global.start_trade_monitoring()) |
|
|
await r2_service_global.save_system_logs_async({"application_started": True}) |
|
|
print("🎯 التطبيق جاهز للعمل - نظام الطبقات 3 (MC ثنائي المراحل) فعال") |
|
|
print(" -> 📈 المراقب التكتيكي (Dynamic Exit) نشط الآن") |
|
|
yield |
|
|
except Exception as error: |
|
|
print(f"❌ Application startup failed: {error}"); |
|
|
traceback.print_exc() |
|
|
if r2_service_global: |
|
|
await r2_service_global.save_system_logs_async({ "application_startup_failed": True, "error": str(error) }) |
|
|
raise |
|
|
finally: |
|
|
await cleanup_on_shutdown() |
|
|
|
|
|
|
|
|
application = FastAPI(lifespan=lifespan, title="AI Trading Bot", description="نظام تداول ذكي بتحليل مونت كارلو ثنائي المراحل (GARCH+LGBM) مع إدارة خروج ديناميكية", version="3.4.0") |
|
|
|
|
|
@application.get("/") |
|
|
async def root(): return {"message": "مرحباً بك في نظام التداول الذكي", "system": "3-Layer Analysis System (Dynamic Exit Management)", "status": "running" if state_manager.initialization_complete else "initializing", "timestamp": datetime.now().isoformat()} |
|
|
@application.get("/run-cycle") |
|
|
async def run_cycle_api(): |
|
|
if not state_manager.initialization_complete: raise HTTPException(status_code=503, detail="الخدمات غير مهيأة بالكامل") |
|
|
asyncio.create_task(run_bot_cycle_async()) |
|
|
return {"message": "Bot cycle initiated (Dynamic Exit Management)", "system": "3-Layer Analysis"} |
|
|
@application.get("/health") |
|
|
async def health_check(): return {"status": "healthy" if state_manager.initialization_complete else "initializing", "initialization_complete": state_manager.initialization_complete, "services_initialized": state_manager.services_initialized, "initialization_error": state_manager.initialization_error, "timestamp": datetime.now().isoformat(), "system_architecture": "3-Layer Analysis System (Dynamic Exit Management)", "layers": {"layer1": "Data Manager - Rapid Screening", "layer1.5": "Whale Data Fetcher (Async)", "layer2": "ML Processor (MC-Phase1 Filter)", "layer2.5": "Advanced MC (GARCH+LGBM) for Top 10", "layer3": "LLM Service - Strategic Decision + Exit Profile", "TacticalLayer": "TradeManager - Dynamic Exit Monitor (1-min)"}} |
|
|
@application.get("/analyze-market") |
|
|
async def analyze_market_api(): |
|
|
if not state_manager.initialization_complete: raise HTTPException(status_code=503, detail="الخدمات غير مهيأة بالكامل") |
|
|
result = await run_3_layer_analysis() |
|
|
if result: return {"opportunity_found": True, "symbol": result['symbol'], "action": result['decision'].get('action'), "confidence": result['llm_confidence'], "strategy": result['strategy'], "exit_profile": result['decision'].get('exit_profile')} |
|
|
else: return {"opportunity_found": False, "message": "No suitable opportunities found"} |
|
|
@application.get("/portfolio") |
|
|
async def get_portfolio_api(): |
|
|
if not state_manager.initialization_complete: raise HTTPException(status_code=503, detail="الخدمات غير مهيأة بالكامل") |
|
|
try: portfolio_state = await r2_service_global.get_portfolio_state_async(); open_trades = await trade_manager_global.get_open_trades(); return {"portfolio": portfolio_state, "open_trades": open_trades, "timestamp": datetime.now().isoformat()} |
|
|
except Exception as e: raise HTTPException(status_code=500, detail=f"خطأ في جلب بيانات المحفظة: {str(e)}") |
|
|
@application.get("/system-status") |
|
|
async def get_system_status(): monitoring_status = trade_manager_global.get_monitoring_status() if trade_manager_global else {}; return {"initialization_complete": state_manager.initialization_complete, "services_initialized": state_manager.services_initialized, "initialization_error": state_manager.initialization_error, "market_state_ok": state.MARKET_STATE_OK, "monitoring_status": monitoring_status, "timestamp": datetime.now().isoformat()} |
|
|
|
|
|
async def cleanup_on_shutdown(): |
|
|
global r2_service_global, data_manager_global, trade_manager_global, learning_engine_global |
|
|
print("🛑 Shutdown signal received. Cleaning up...") |
|
|
if trade_manager_global: trade_manager_global.stop_monitoring(); print("✅ Trade monitoring stopped") |
|
|
if learning_engine_global and learning_engine_global.initialized: |
|
|
try: |
|
|
await learning_engine_global.save_weights_to_r2(); |
|
|
await learning_engine_global.save_performance_history(); |
|
|
await learning_engine_global.save_exit_profile_effectiveness(); |
|
|
print("✅ Learning engine data saved") |
|
|
except Exception as e: print(f"❌ Failed to save learning engine data: {e}") |
|
|
if data_manager_global: await data_manager_global.close(); print("✅ Data manager closed") |
|
|
if r2_service_global: |
|
|
try: await r2_service_global.save_system_logs_async({"application_shutdown": True}); print("✅ Shutdown log saved") |
|
|
except Exception as e: print(f"❌ Failed to save shutdown log: {e}") |
|
|
if r2_service_global.lock_acquired: r2_service_global.release_lock(); print("✅ R2 lock released") |
|
|
|
|
|
def signal_handler(signum, frame): print(f"🛑 Received signal {signum}. Initiating shutdown..."); asyncio.create_task(cleanup_on_shutdown()); sys.exit(0) |
|
|
signal.signal(signal.SIGINT, signal_handler); signal.signal(signal.SIGTERM, signal_handler) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
print("🚀 Starting AI Trading Bot with 3-Layer Analysis System (Dynamic Exit Management)...") |
|
|
uvicorn.run( application, host="0.0.0.0", port=7860, log_level="info", access_log=True ) |