import os import re import time import json from datetime import datetime import streamlit as st # ===================================================== # CONFIGURAZIONE BASE # ===================================================== st.set_page_config(page_title="Colegium-AI", page_icon="🤖", layout="wide") st.title("🤖 Colegium-AI - Assistant Conversationnel") # ===================================================== # GESTIONE CONVERSAZIONI # ===================================================== CONV_FILE = "conversations.json" def load_conversations(): """Carica le conversazioni salvate.""" if os.path.exists(CONV_FILE): try: with open(CONV_FILE, "r", encoding="utf-8") as f: return json.load(f) except: return {} return {} def save_conversations(convs): """Salva le conversazioni.""" with open(CONV_FILE, "w", encoding="utf-8") as f: json.dump(convs, f, ensure_ascii=False, indent=2) # ===================================================== # INIZIALIZZAZIONE SESSIONE # ===================================================== if "conversations" not in st.session_state: st.session_state.conversations = load_conversations() if "current_chat" not in st.session_state: st.session_state.current_chat = None if "messages" not in st.session_state: st.session_state.messages = [] if "model_loaded" not in st.session_state: st.session_state.model_loaded = False # ===================================================== # CARICAMENTO MODELLO LEGGERO # ===================================================== @st.cache_resource(show_spinner=False) def load_model(): """Carica un modello leggero e veloce per CPU.""" try: from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline import torch # 🎯 Modello leggero: microsoft/DialoGPT-medium (350M parametri) # Ottimo per conversazioni, veloce su CPU model_name = "microsoft/DialoGPT-medium" st.info("🔄 Caricamento del modello... (30-60 secondi)") tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained( model_name, torch_dtype=torch.float32, low_cpu_mem_usage=True ) # Configura padding token if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token device = "cuda" if torch.cuda.is_available() else "cpu" model.to(device) st.success("✅ Modello caricato con successo!") return tokenizer, model, device except Exception as e: st.error(f"❌ Errore: {e}") return None, None, None # ===================================================== # SIDEBAR - GESTIONE CONVERSAZIONI # ===================================================== with st.sidebar: st.header("💬 Conversazioni") # Nuovo chat if st.button("➕ Nuova Conversazione", use_container_width=True): chat_id = datetime.now().strftime("%Y-%m-%d %H:%M:%S") st.session_state.conversations[chat_id] = [] st.session_state.current_chat = chat_id st.session_state.messages = [] save_conversations(st.session_state.conversations) st.rerun() st.divider() # Lista conversazioni chat_keys = list(st.session_state.conversations.keys()) if chat_keys: for chat in reversed(chat_keys[-10:]): # Mostra ultime 10 col1, col2 = st.columns([4, 1]) with col1: if st.button( f"📝 {chat}", key=f"chat_{chat}", use_container_width=True, type="primary" if chat == st.session_state.current_chat else "secondary" ): st.session_state.current_chat = chat st.session_state.messages = st.session_state.conversations[chat] st.rerun() with col2: if st.button("🗑️", key=f"del_{chat}"): del st.session_state.conversations[chat] save_conversations(st.session_state.conversations) if st.session_state.current_chat == chat: st.session_state.current_chat = None st.session_state.messages = [] st.rerun() else: st.info("Nessuna conversazione.\nClicca '➕' per iniziare!") st.divider() # Informazioni st.caption("🤖 **Colegium AI**") st.caption("Creato da Pepe Musafiri") st.caption(f"💬 {len(chat_keys)} conversazioni salvate") # ===================================================== # CARICA MODELLO # ===================================================== if not st.session_state.model_loaded: with st.spinner("Inizializzazione..."): tokenizer, model, device = load_model() if model is not None: st.session_state.tokenizer = tokenizer st.session_state.model = model st.session_state.device = device st.session_state.model_loaded = True else: st.error("Impossibile caricare il modello. Ricarica la pagina.") st.stop() # ===================================================== # FUNZIONE GENERAZIONE RISPOSTA # ===================================================== def generate_response(prompt, chat_history): """Genera risposta usando il modello.""" try: tokenizer = st.session_state.tokenizer model = st.session_state.model device = st.session_state.device # Costruisci il contesto (ultimi 5 scambi) context_ids = [] for msg in chat_history[-5:]: if msg["role"] == "user": input_ids = tokenizer.encode(msg["content"] + tokenizer.eos_token, return_tensors="pt") context_ids.append(input_ids) elif msg["role"] == "assistant": response_ids = tokenizer.encode(msg["content"] + tokenizer.eos_token, return_tensors="pt") context_ids.append(response_ids) # Aggiungi nuovo input new_input_ids = tokenizer.encode(prompt + tokenizer.eos_token, return_tensors="pt") # Concatena tutto il contesto if context_ids: bot_input_ids = torch.cat(context_ids + [new_input_ids], dim=-1) else: bot_input_ids = new_input_ids # Limita lunghezza per CPU if bot_input_ids.shape[-1] > 512: bot_input_ids = bot_input_ids[:, -512:] bot_input_ids = bot_input_ids.to(device) # Genera risposta with torch.no_grad(): chat_history_ids = model.generate( bot_input_ids, max_new_tokens=100, do_sample=True, temperature=0.7, top_p=0.9, top_k=50, repetition_penalty=1.2, pad_token_id=tokenizer.eos_token_id, eos_token_id=tokenizer.eos_token_id, no_repeat_ngram_size=3 ) # Decodifica solo la nuova risposta response = tokenizer.decode( chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True ) # Pulizia response = response.strip() response = re.sub(r'\n{3,}', '\n\n', response) response = re.sub(r'[ ]{2,}', ' ', response) if not response: response = "Je suis désolé, je n'ai pas pu générer une réponse appropriée. Pouvez-vous reformuler votre question ?" return response except Exception as e: return f"⚠️ Erreur lors de la génération: {str(e)}" # ===================================================== # EFFETTO TYPEWRITER # ===================================================== def typewriter_effect(text, placeholder, speed=0.02): """Effetto macchina da scrivere.""" displayed = "" for char in text: displayed += char placeholder.markdown(displayed + "▌") time.sleep(speed) placeholder.markdown(displayed) # ===================================================== # AREA CHAT # ===================================================== # Mostra messaggi esistenti for msg in st.session_state.messages: with st.chat_message(msg["role"], avatar="👤" if msg["role"] == "user" else "🤖"): st.write(msg["content"]) # Input utente if prompt := st.chat_input("💬 Posez votre question ici..."): # Crea nuova conversazione se necessario if st.session_state.current_chat is None: chat_id = datetime.now().strftime("%Y-%m-%d %H:%M:%S") st.session_state.current_chat = chat_id st.session_state.conversations[chat_id] = [] # Aggiungi messaggio utente user_message = {"role": "user", "content": prompt} st.session_state.messages.append(user_message) # Mostra messaggio utente with st.chat_message("user", avatar="👤"): st.write(prompt) # Genera e mostra risposta AI with st.chat_message("assistant", avatar="🤖"): placeholder = st.empty() with st.spinner("🤔 Réflexion en cours..."): response = generate_response(prompt, st.session_state.messages) # Effetto typewriter typewriter_effect(response, placeholder, speed=0.015) # Aggiungi risposta AI assistant_message = {"role": "assistant", "content": response} st.session_state.messages.append(assistant_message) # Salva conversazione st.session_state.conversations[st.session_state.current_chat] = st.session_state.messages save_conversations(st.session_state.conversations) st.rerun() # ===================================================== # MESSAGGIO INIZIALE # ===================================================== if len(st.session_state.messages) == 0: st.info("👋 **Bienvenue sur Colegium AI !**\n\nJe suis votre assistant conversationnel créé par Pepe Musafiri.\n\nPosez-moi une question pour commencer la conversation !") # Suggerimenti st.subheader("💡 Exemples de questions:") col1, col2 = st.columns(2) with col1: if st.button("🌍 Parle-moi de l'intelligence artificielle", use_container_width=True): st.session_state.temp_prompt = "Parle-moi de l'intelligence artificielle" st.rerun() if st.button("📚 Qu'est-ce que le machine learning ?", use_container_width=True): st.session_state.temp_prompt = "Qu'est-ce que le machine learning ?" st.rerun() with col2: if st.button("💻 Comment devenir développeur ?", use_container_width=True): st.session_state.temp_prompt = "Comment devenir développeur ?" st.rerun() if st.button("🤖 Raconte-moi une blague", use_container_width=True): st.session_state.temp_prompt = "Raconte-moi une blague" st.rerun()