|
|
|
|
|
|
|
|
from client import LlamaClient |
|
|
import asyncio |
|
|
import time |
|
|
from typing import List |
|
|
import json |
|
|
|
|
|
class AdvancedLlamaClient: |
|
|
"""Cliente extendido con funcionalidades avanzadas""" |
|
|
|
|
|
def __init__(self, base_url: str): |
|
|
self.client = LlamaClient(base_url) |
|
|
self.conversation_history = [] |
|
|
|
|
|
def continuous_chat(self): |
|
|
"""Chat interactivo continuo""" |
|
|
print("🦙 Chat con Llama 3.2 3B - Escribe 'salir' para terminar") |
|
|
print("=" * 50) |
|
|
|
|
|
system_prompt = input("System prompt (opcional): ").strip() |
|
|
if not system_prompt: |
|
|
system_prompt = "Eres un asistente útil y amigable." |
|
|
|
|
|
while True: |
|
|
try: |
|
|
message = input("\nTú: ").strip() |
|
|
|
|
|
if message.lower() in ['salir', 'exit', 'quit']: |
|
|
print("¡Hasta luego! 👋") |
|
|
break |
|
|
|
|
|
if not message: |
|
|
continue |
|
|
|
|
|
print("🦙: ", end="", flush=True) |
|
|
full_response = "" |
|
|
|
|
|
for chunk in self.client.chat_stream( |
|
|
message=message, |
|
|
system_prompt=system_prompt, |
|
|
history=self.conversation_history, |
|
|
max_tokens=512, |
|
|
temperature=0.7 |
|
|
): |
|
|
if "error" in chunk: |
|
|
print(f"Error: {chunk['error']}") |
|
|
break |
|
|
|
|
|
|
|
|
new_text = chunk['response'][len(full_response):] |
|
|
print(new_text, end="", flush=True) |
|
|
full_response = chunk['response'] |
|
|
|
|
|
if chunk.get("is_complete", False): |
|
|
print() |
|
|
break |
|
|
|
|
|
|
|
|
if full_response and not full_response.startswith("Error:"): |
|
|
self.conversation_history.append([message, full_response]) |
|
|
|
|
|
|
|
|
if len(self.conversation_history) > 10: |
|
|
self.conversation_history = self.conversation_history[-10:] |
|
|
|
|
|
except KeyboardInterrupt: |
|
|
print("\n\n¡Hasta luego! 👋") |
|
|
break |
|
|
except Exception as e: |
|
|
print(f"\nError inesperado: {e}") |
|
|
|
|
|
def batch_questions(self, questions: List[str], system_prompt: str = ""): |
|
|
"""Procesar múltiples preguntas en lote""" |
|
|
print(f"Procesando {len(questions)} preguntas...") |
|
|
results = [] |
|
|
|
|
|
for i, question in enumerate(questions, 1): |
|
|
print(f"\nPregunta {i}/{len(questions)}: {question}") |
|
|
print("-" * 40) |
|
|
|
|
|
response = self.client.chat( |
|
|
message=question, |
|
|
system_prompt=system_prompt, |
|
|
max_tokens=300 |
|
|
) |
|
|
|
|
|
if "error" in response: |
|
|
print(f"Error: {response['error']}") |
|
|
results.append({"question": question, "error": response['error']}) |
|
|
else: |
|
|
print(f"Respuesta: {response['response']}") |
|
|
results.append({ |
|
|
"question": question, |
|
|
"response": response['response'], |
|
|
"queue_status": response['queue_status'] |
|
|
}) |
|
|
|
|
|
|
|
|
time.sleep(1) |
|
|
|
|
|
return results |
|
|
|
|
|
def compare_temperatures(self, message: str, temperatures: List[float] = [0.3, 0.7, 1.2]): |
|
|
"""Comparar respuestas con diferentes temperaturas""" |
|
|
print(f"Comparando respuestas para: '{message}'") |
|
|
print("=" * 60) |
|
|
|
|
|
results = {} |
|
|
|
|
|
for temp in temperatures: |
|
|
print(f"\n🌡️ Temperature: {temp}") |
|
|
print("-" * 30) |
|
|
|
|
|
response = self.client.chat( |
|
|
message=message, |
|
|
temperature=temp, |
|
|
max_tokens=200 |
|
|
) |
|
|
|
|
|
if "error" in response: |
|
|
print(f"Error: {response['error']}") |
|
|
results[temp] = {"error": response['error']} |
|
|
else: |
|
|
print(response['response']) |
|
|
results[temp] = {"response": response['response']} |
|
|
|
|
|
return results |
|
|
|
|
|
def roleplay_scenario(self, scenario: str, turns: int = 5): |
|
|
"""Escenario de roleplay interactivo""" |
|
|
print(f"🎭 Escenario: {scenario}") |
|
|
print("=" * 50) |
|
|
|
|
|
system_prompt = f"Actúa como {scenario}. Mantén el rol consistentemente y responde de manera inmersiva." |
|
|
history = [] |
|
|
|
|
|
for turn in range(turns): |
|
|
user_input = input(f"\nTurno {turn + 1} - Tú: ").strip() |
|
|
|
|
|
if not user_input or user_input.lower() == 'salir': |
|
|
break |
|
|
|
|
|
print("🎭: ", end="", flush=True) |
|
|
|
|
|
for chunk in self.client.chat_stream( |
|
|
message=user_input, |
|
|
system_prompt=system_prompt, |
|
|
history=history, |
|
|
temperature=0.8, |
|
|
max_tokens=300 |
|
|
): |
|
|
if "error" in chunk: |
|
|
print(f"Error: {chunk['error']}") |
|
|
break |
|
|
|
|
|
print(f"\r🎭: {chunk['response']}", end="", flush=True) |
|
|
|
|
|
if chunk.get("is_complete", False): |
|
|
history.append([user_input, chunk['response']]) |
|
|
print() |
|
|
break |
|
|
|
|
|
return history |
|
|
|
|
|
def academic_tutor_example(): |
|
|
"""Ejemplo: Tutor académico para física""" |
|
|
client = LlamaClient("https://tu-usuario-llama-chat.hf.space") |
|
|
|
|
|
system_prompt = """Eres un tutor de física especializado en bachillerato español. |
|
|
Explicas conceptos de forma clara, usas ejemplos cotidianos y siempre verificas |
|
|
que el estudiante entienda antes de avanzar. Puedes resolver problemas paso a paso.""" |
|
|
|
|
|
physics_questions = [ |
|
|
"¿Qué es la velocidad angular y cómo se relaciona con la velocidad lineal?", |
|
|
"Explica el principio de conservación de la energía con un ejemplo", |
|
|
"¿Cómo funciona el efecto Doppler?", |
|
|
"Diferencia entre masa y peso físicamente" |
|
|
] |
|
|
|
|
|
print("🔬 Tutor de Física - Bachillerato") |
|
|
print("=" * 40) |
|
|
|
|
|
for question in physics_questions: |
|
|
print(f"\n📚 Pregunta: {question}") |
|
|
print("-" * 50) |
|
|
|
|
|
full_response = "" |
|
|
for chunk in client.chat_stream( |
|
|
message=question, |
|
|
system_prompt=system_prompt, |
|
|
max_tokens=400, |
|
|
temperature=0.6 |
|
|
): |
|
|
if "error" in chunk: |
|
|
print(f"Error: {chunk['error']}") |
|
|
break |
|
|
|
|
|
print(f"\r👨🏫: {chunk['response']}", end="", flush=True) |
|
|
full_response = chunk['response'] |
|
|
|
|
|
if chunk.get("is_complete", False): |
|
|
print("\n") |
|
|
break |
|
|
|
|
|
input("Presiona Enter para la siguiente pregunta...") |
|
|
|
|
|
def programming_assistant_example(): |
|
|
"""Ejemplo: Asistente de programación""" |
|
|
client = LlamaClient("https://tu-usuario-llama-chat.hf.space") |
|
|
|
|
|
system_prompt = """Eres un desarrollador senior especializado en Python y Flutter. |
|
|
Ayudas a estudiantes con código, debugging y mejores prácticas. Siempre explicas |
|
|
el código línea por línea y sugieres mejoras.""" |
|
|
|
|
|
code_questions = [ |
|
|
"¿Cómo implementar un patrón Singleton en Python?", |
|
|
"Explica la diferencia entre async/await y threading", |
|
|
"¿Cómo manejo errores de API en Flutter?", |
|
|
"Mejores prácticas para estructurar un proyecto Flutter" |
|
|
] |
|
|
|
|
|
print("💻 Asistente de Programación") |
|
|
print("=" * 35) |
|
|
|
|
|
for question in code_questions: |
|
|
print(f"\n🤔 {question}") |
|
|
print("-" * 60) |
|
|
|
|
|
response = client.chat( |
|
|
message=question, |
|
|
system_prompt=system_prompt, |
|
|
max_tokens=600, |
|
|
temperature=0.4 |
|
|
) |
|
|
|
|
|
if "error" in response: |
|
|
print(f"❌ Error: {response['error']}") |
|
|
else: |
|
|
print(f"💡 {response['response']}") |
|
|
|
|
|
print("\n" + "="*60) |
|
|
time.sleep(2) |
|
|
|
|
|
def creative_writing_example(): |
|
|
"""Ejemplo: Escritura creativa colaborativa""" |
|
|
client = LlamaClient("https://tu-usuario-llama-chat.hf.space") |
|
|
|
|
|
system_prompt = """Eres un escritor creativo experto. Ayudas a desarrollar historias, |
|
|
personajes y narrativas. Puedes continuar historias, sugerir tramas y crear diálogos |
|
|
naturales. Eres imaginativo pero coherente.""" |
|
|
|
|
|
print("✍️ Escritura Creativa Colaborativa") |
|
|
print("=" * 40) |
|
|
|
|
|
story_start = input("Escribe el inicio de una historia (2-3 líneas): ") |
|
|
|
|
|
current_story = story_start |
|
|
history = [] |
|
|
|
|
|
for chapter in range(3): |
|
|
print(f"\n📖 Capítulo {chapter + 1}") |
|
|
print("-" * 30) |
|
|
|
|
|
prompt = f"Continúa esta historia de manera creativa e interesante:\n\n{current_story}" |
|
|
|
|
|
print("✨ Continuando la historia...") |
|
|
continuation = "" |
|
|
|
|
|
for chunk in client.chat_stream( |
|
|
message=prompt, |
|
|
system_prompt=system_prompt, |
|
|
history=history, |
|
|
max_tokens=400, |
|
|
temperature=1.0 |
|
|
): |
|
|
if "error" in chunk: |
|
|
print(f"Error: {chunk['error']}") |
|
|
break |
|
|
|
|
|
continuation = chunk['response'] |
|
|
print(f"\r{continuation}", end="", flush=True) |
|
|
|
|
|
if chunk.get("is_complete", False): |
|
|
print("\n") |
|
|
break |
|
|
|
|
|
current_story += "\n\n" + continuation |
|
|
history.append([prompt, continuation]) |
|
|
|
|
|
|
|
|
direction = input("\n¿Quieres sugerir una dirección para la historia? (opcional): ") |
|
|
if direction.strip(): |
|
|
current_story += "\n\n[Dirección sugerida: " + direction + "]" |
|
|
|
|
|
print("\n📚 Historia completa:") |
|
|
print("=" * 50) |
|
|
print(current_story) |
|
|
|
|
|
def main(): |
|
|
"""Menú principal de ejemplos""" |
|
|
examples = { |
|
|
"1": ("Chat Continuo", lambda: AdvancedLlamaClient("https://tu-usuario-llama-chat.hf.space").continuous_chat()), |
|
|
"2": ("Tutor de Física", academic_tutor_example), |
|
|
"3": ("Asistente de Programación", programming_assistant_example), |
|
|
"4": ("Escritura Creativa", creative_writing_example), |
|
|
"5": ("Comparar Temperaturas", lambda: AdvancedLlamaClient("https://tu-usuario-llama-chat.hf.space").compare_temperatures( |
|
|
"Explica la inteligencia artificial", [0.3, 0.7, 1.2] |
|
|
)), |
|
|
} |
|
|
|
|
|
print("🦙 Ejemplos Avanzados - Llama 3.2 Chat") |
|
|
print("=" * 45) |
|
|
print("IMPORTANTE: Cambia la URL por tu Space real") |
|
|
print("=" * 45) |
|
|
|
|
|
while True: |
|
|
print("\nSelecciona un ejemplo:") |
|
|
for key, (name, _) in examples.items(): |
|
|
print(f"{key}. {name}") |
|
|
print("0. Salir") |
|
|
|
|
|
choice = input("\nOpción: ").strip() |
|
|
|
|
|
if choice == "0": |
|
|
print("¡Hasta luego! 🦙") |
|
|
break |
|
|
elif choice in examples: |
|
|
try: |
|
|
print(f"\n🚀 Ejecutando: {examples[choice][0]}") |
|
|
print("=" * 50) |
|
|
examples[choice][1]() |
|
|
except Exception as e: |
|
|
print(f"Error ejecutando ejemplo: {e}") |
|
|
else: |
|
|
print("Opción no válida") |
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|