Spaces:
Sleeping
Sleeping
File size: 5,192 Bytes
d28c36c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 |
"""
Test rapide de comparaison des modèles pour les tâches MCP
Focus sur les tests les plus importants
"""
import sys
import os
import json
import time
# Ajouter le chemin pour les imports
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
def test_model(model_path, model_name):
"""Test rapide d'un modèle"""
try:
from llama_cpp import Llama
print(f"🔄 Test de {model_name}...")
# Initialiser avec des paramètres plus rapides
llm = Llama(
model_path=model_path,
n_ctx=1024,
n_threads=1,
verbose=False
)
# Test simple de compréhension MCP
prompt = """Tu es un assistant IA pour un jeu RTS via MCP.
Outils: get_game_state(), move_units(unit_ids, target_x, target_y)
Commande: "Montre-moi l'état du jeu"
Réponds avec JSON: {{"tool": "nom_outil", "args": {{}}}}"""
start_time = time.time()
response = llm(
prompt,
max_tokens=100,
temperature=0.1,
stop=["</s>", "<|im_end|>"]
)
response_time = time.time() - start_time
response_text = response['choices'][0]['text'].strip()
# Analyser la réponse
score = 0
# Vérifier JSON
try:
json.loads(response_text)
score += 3
except:
pass
# Vérifier outil correct
if "get_game_state" in response_text:
score += 4
# Vérifier structure
if "tool" in response_text:
score += 2
# Vérifier cohérence
if "game" in response_text.lower():
score += 1
score = min(score, 10)
print(f"✅ {model_name}: {score}/10 | Temps: {response_time:.2f}s")
print(f" Réponse: {response_text[:100]}...")
return {
'name': model_name,
'score': score,
'time': response_time,
'response': response_text
}
except Exception as e:
print(f"❌ {model_name}: Erreur - {e}")
return {
'name': model_name,
'score': 0,
'time': 0,
'error': str(e)
}
def main():
"""Test rapide comparatif"""
print("🚀 TEST RAPIDE COMPARATIF MCP")
print("=" * 50)
# Modèles à tester
models = [
{
'name': 'Qwen2.5-0.5B',
'path': 'qwen2.5-0.5b-instruct-q4_0.gguf'
},
{
'name': 'Qwen3-0.6B',
'path': 'Qwen3-0.6B-Q8_0.gguf'
},
{
'name': 'Gemma-3-1B',
'path': 'google_gemma-3-1b-it-qat-Q4_0.gguf'
}
]
results = []
for model in models:
if os.path.exists(model['path']):
result = test_model(model['path'], model['name'])
results.append(result)
else:
print(f"❌ Fichier non trouvé: {model['path']}")
# Résultats
print("\n" + "=" * 50)
print("📊 RÉSULTATS COMPARATIFS")
print("=" * 50)
# Classement
sorted_results = sorted(results, key=lambda x: x['score'], reverse=True)
print(f"\n🏆 CLASSEMENT:")
for i, result in enumerate(sorted_results, 1):
if 'error' not in result:
print(f" {i}. {result['name']}: {result['score']}/10 ({result['time']:.2f}s)")
else:
print(f" {i}. {result['name']}: ÉCHEC")
# Analyse
successful_results = [r for r in results if 'error' not in r and r['score'] > 0]
if successful_results:
best_model = successful_results[0]
print(f"\n🎯 MEILLEUR MODÈLE: {best_model['name']}")
print(f" Score: {best_model['score']}/10")
print(f" Temps: {best_model['time']:.2f}s")
# Recommandations
if best_model['score'] >= 7:
print(f"\n✅ RECOMMANDATION: {best_model['name']} est EXCELLENT pour MCP")
elif best_model['score'] >= 5:
print(f"\n👍 RECOMMANDATION: {best_model['name']} est BON pour MCP")
else:
print(f"\n⚠️ RECOMMANDATION: {best_model['name']} est LIMITÉ pour MCP")
# Performance vs taille
print(f"\n⚖️ PERFORMANCE:")
for result in successful_results:
efficiency = result['score'] / result['time'] if result['time'] > 0 else 0
file_size = os.path.getsize([m['path'] for m in models if m['name'] == result['name']][0]) / (1024*1024)
print(f" {result['name']}: {efficiency:.2f} score/s | {file_size:.0f} MB")
# Sauvegarder
with open("quick_model_comparison.json", "w", encoding="utf-8") as f:
json.dump({
'results': results,
'ranking': sorted_results,
'best_model': successful_results[0]['name'] if successful_results else None
}, f, indent=2, ensure_ascii=False)
print(f"\n📄 Résultats sauvegardés dans: quick_model_comparison.json")
if __name__ == "__main__":
main() |