|
|
|
|
|
examples_py_content = '''# examples.py - Ejemplos avanzados de uso del cliente |
|
|
|
|
|
from client import LlamaClient |
|
|
import asyncio |
|
|
import time |
|
|
from typing import List |
|
|
import json |
|
|
|
|
|
class AdvancedLlamaClient: |
|
|
"""Cliente extendido con funcionalidades avanzadas""" |
|
|
|
|
|
def __init__(self, base_url: str): |
|
|
self.client = LlamaClient(base_url) |
|
|
self.conversation_history = [] |
|
|
|
|
|
def continuous_chat(self): |
|
|
"""Chat interactivo continuo""" |
|
|
print("🦙 Chat con Llama 3.2 3B - Escribe 'salir' para terminar") |
|
|
print("=" * 50) |
|
|
|
|
|
system_prompt = input("System prompt (opcional): ").strip() |
|
|
if not system_prompt: |
|
|
system_prompt = "Eres un asistente útil y amigable." |
|
|
|
|
|
while True: |
|
|
try: |
|
|
message = input("\\nTú: ").strip() |
|
|
|
|
|
if message.lower() in ['salir', 'exit', 'quit']: |
|
|
print("¡Hasta luego! 👋") |
|
|
break |
|
|
|
|
|
if not message: |
|
|
continue |
|
|
|
|
|
print("🦙: ", end="", flush=True) |
|
|
full_response = "" |
|
|
|
|
|
for chunk in self.client.chat_stream( |
|
|
message=message, |
|
|
system_prompt=system_prompt, |
|
|
history=self.conversation_history, |
|
|
max_tokens=512, |
|
|
temperature=0.7 |
|
|
): |
|
|
if "error" in chunk: |
|
|
print(f"Error: {chunk['error']}") |
|
|
break |
|
|
|
|
|
# Mostrar solo el texto nuevo |
|
|
new_text = chunk['response'][len(full_response):] |
|
|
print(new_text, end="", flush=True) |
|
|
full_response = chunk['response'] |
|
|
|
|
|
if chunk.get("is_complete", False): |
|
|
print() # Nueva línea al final |
|
|
break |
|
|
|
|
|
# Agregar al historial |
|
|
if full_response and not full_response.startswith("Error:"): |
|
|
self.conversation_history.append([message, full_response]) |
|
|
|
|
|
# Limitar historial a 10 intercambios |
|
|
if len(self.conversation_history) > 10: |
|
|
self.conversation_history = self.conversation_history[-10:] |
|
|
|
|
|
except KeyboardInterrupt: |
|
|
print("\\n\\n¡Hasta luego! 👋") |
|
|
break |
|
|
except Exception as e: |
|
|
print(f"\\nError inesperado: {e}") |
|
|
|
|
|
def batch_questions(self, questions: List[str], system_prompt: str = ""): |
|
|
"""Procesar múltiples preguntas en lote""" |
|
|
print(f"Procesando {len(questions)} preguntas...") |
|
|
results = [] |
|
|
|
|
|
for i, question in enumerate(questions, 1): |
|
|
print(f"\\nPregunta {i}/{len(questions)}: {question}") |
|
|
print("-" * 40) |
|
|
|
|
|
response = self.client.chat( |
|
|
message=question, |
|
|
system_prompt=system_prompt, |
|
|
max_tokens=300 |
|
|
) |
|
|
|
|
|
if "error" in response: |
|
|
print(f"Error: {response['error']}") |
|
|
results.append({"question": question, "error": response['error']}) |
|
|
else: |
|
|
print(f"Respuesta: {response['response']}") |
|
|
results.append({ |
|
|
"question": question, |
|
|
"response": response['response'], |
|
|
"queue_status": response['queue_status'] |
|
|
}) |
|
|
|
|
|
# Pequeña pausa entre preguntas |
|
|
time.sleep(1) |
|
|
|
|
|
return results |
|
|
|
|
|
def compare_temperatures(self, message: str, temperatures: List[float] = [0.3, 0.7, 1.2]): |
|
|
"""Comparar respuestas con diferentes temperaturas""" |
|
|
print(f"Comparando respuestas para: '{message}'") |
|
|
print("=" * 60) |
|
|
|
|
|
results = {} |
|
|
|
|
|
for temp in temperatures: |
|
|
print(f"\\n🌡️ Temperature: {temp}") |
|
|
print("-" * 30) |
|
|
|
|
|
response = self.client.chat( |
|
|
message=message, |
|
|
temperature=temp, |
|
|
max_tokens=200 |
|
|
) |
|
|
|
|
|
if "error" in response: |
|
|
print(f"Error: {response['error']}") |
|
|
results[temp] = {"error": response['error']} |
|
|
else: |
|
|
print(response['response']) |
|
|
results[temp] = {"response": response['response']} |
|
|
|
|
|
return results |
|
|
|
|
|
def roleplay_scenario(self, scenario: str, turns: int = 5): |
|
|
"""Escenario de roleplay interactivo""" |
|
|
print(f"🎭 Escenario: {scenario}") |
|
|
print("=" * 50) |
|
|
|
|
|
system_prompt = f"Actúa como {scenario}. Mantén el rol consistentemente y responde de manera inmersiva." |
|
|
history = [] |
|
|
|
|
|
for turn in range(turns): |
|
|
user_input = input(f"\\nTurno {turn + 1} - Tú: ").strip() |
|
|
|
|
|
if not user_input or user_input.lower() == 'salir': |
|
|
break |
|
|
|
|
|
print("🎭: ", end="", flush=True) |
|
|
|
|
|
for chunk in self.client.chat_stream( |
|
|
message=user_input, |
|
|
system_prompt=system_prompt, |
|
|
history=history, |
|
|
temperature=0.8, |
|
|
max_tokens=300 |
|
|
): |
|
|
if "error" in chunk: |
|
|
print(f"Error: {chunk['error']}") |
|
|
break |
|
|
|
|
|
print(f"\\r🎭: {chunk['response']}", end="", flush=True) |
|
|
|
|
|
if chunk.get("is_complete", False): |
|
|
history.append([user_input, chunk['response']]) |
|
|
print() |
|
|
break |
|
|
|
|
|
return history |
|
|
|
|
|
def academic_tutor_example(): |
|
|
"""Ejemplo: Tutor académico para física""" |
|
|
client = LlamaClient("https://tu-usuario-llama-chat.hf.space") |
|
|
|
|
|
system_prompt = """Eres un tutor de física especializado en bachillerato español. |
|
|
Explicas conceptos de forma clara, usas ejemplos cotidianos y siempre verificas |
|
|
que el estudiante entienda antes de avanzar. Puedes resolver problemas paso a paso.""" |
|
|
|
|
|
physics_questions = [ |
|
|
"¿Qué es la velocidad angular y cómo se relaciona con la velocidad lineal?", |
|
|
"Explica el principio de conservación de la energía con un ejemplo", |
|
|
"¿Cómo funciona el efecto Doppler?", |
|
|
"Diferencia entre masa y peso físicamente" |
|
|
] |
|
|
|
|
|
print("🔬 Tutor de Física - Bachillerato") |
|
|
print("=" * 40) |
|
|
|
|
|
for question in physics_questions: |
|
|
print(f"\\n📚 Pregunta: {question}") |
|
|
print("-" * 50) |
|
|
|
|
|
full_response = "" |
|
|
for chunk in client.chat_stream( |
|
|
message=question, |
|
|
system_prompt=system_prompt, |
|
|
max_tokens=400, |
|
|
temperature=0.6 |
|
|
): |
|
|
if "error" in chunk: |
|
|
print(f"Error: {chunk['error']}") |
|
|
break |
|
|
|
|
|
print(f"\\r👨🏫: {chunk['response']}", end="", flush=True) |
|
|
full_response = chunk['response'] |
|
|
|
|
|
if chunk.get("is_complete", False): |
|
|
print("\\n") |
|
|
break |
|
|
|
|
|
input("Presiona Enter para la siguiente pregunta...") |
|
|
|
|
|
def programming_assistant_example(): |
|
|
"""Ejemplo: Asistente de programación""" |
|
|
client = LlamaClient("https://tu-usuario-llama-chat.hf.space") |
|
|
|
|
|
system_prompt = """Eres un desarrollador senior especializado en Python y Flutter. |
|
|
Ayudas a estudiantes con código, debugging y mejores prácticas. Siempre explicas |
|
|
el código línea por línea y sugieres mejoras.""" |
|
|
|
|
|
code_questions = [ |
|
|
"¿Cómo implementar un patrón Singleton en Python?", |
|
|
"Explica la diferencia entre async/await y threading", |
|
|
"¿Cómo manejo errores de API en Flutter?", |
|
|
"Mejores prácticas para estructurar un proyecto Flutter" |
|
|
] |
|
|
|
|
|
print("💻 Asistente de Programación") |
|
|
print("=" * 35) |
|
|
|
|
|
for question in code_questions: |
|
|
print(f"\\n🤔 {question}") |
|
|
print("-" * 60) |
|
|
|
|
|
response = client.chat( |
|
|
message=question, |
|
|
system_prompt=system_prompt, |
|
|
max_tokens=600, |
|
|
temperature=0.4 # Menor temperatura para código |
|
|
) |
|
|
|
|
|
if "error" in response: |
|
|
print(f"❌ Error: {response['error']}") |
|
|
else: |
|
|
print(f"💡 {response['response']}") |
|
|
|
|
|
print("\\n" + "="*60) |
|
|
time.sleep(2) |
|
|
|
|
|
def creative_writing_example(): |
|
|
"""Ejemplo: Escritura creativa colaborativa""" |
|
|
client = LlamaClient("https://tu-usuario-llama-chat.hf.space") |
|
|
|
|
|
system_prompt = """Eres un escritor creativo experto. Ayudas a desarrollar historias, |
|
|
personajes y narrativas. Puedes continuar historias, sugerir tramas y crear diálogos |
|
|
naturales. Eres imaginativo pero coherente.""" |
|
|
|
|
|
print("✍️ Escritura Creativa Colaborativa") |
|
|
print("=" * 40) |
|
|
|
|
|
story_start = input("Escribe el inicio de una historia (2-3 líneas): ") |
|
|
|
|
|
current_story = story_start |
|
|
history = [] |
|
|
|
|
|
for chapter in range(3): |
|
|
print(f"\\n📖 Capítulo {chapter + 1}") |
|
|
print("-" * 30) |
|
|
|
|
|
prompt = f"Continúa esta historia de manera creativa e interesante:\\n\\n{current_story}" |
|
|
|
|
|
print("✨ Continuando la historia...") |
|
|
continuation = "" |
|
|
|
|
|
for chunk in client.chat_stream( |
|
|
message=prompt, |
|
|
system_prompt=system_prompt, |
|
|
history=history, |
|
|
max_tokens=400, |
|
|
temperature=1.0 # Alta creatividad |
|
|
): |
|
|
if "error" in chunk: |
|
|
print(f"Error: {chunk['error']}") |
|
|
break |
|
|
|
|
|
continuation = chunk['response'] |
|
|
print(f"\\r{continuation}", end="", flush=True) |
|
|
|
|
|
if chunk.get("is_complete", False): |
|
|
print("\\n") |
|
|
break |
|
|
|
|
|
current_story += "\\n\\n" + continuation |
|
|
history.append([prompt, continuation]) |
|
|
|
|
|
# Opción de dirigir la historia |
|
|
direction = input("\\n¿Quieres sugerir una dirección para la historia? (opcional): ") |
|
|
if direction.strip(): |
|
|
current_story += "\\n\\n[Dirección sugerida: " + direction + "]" |
|
|
|
|
|
print("\\n📚 Historia completa:") |
|
|
print("=" * 50) |
|
|
print(current_story) |
|
|
|
|
|
def main(): |
|
|
"""Menú principal de ejemplos""" |
|
|
examples = { |
|
|
"1": ("Chat Continuo", lambda: AdvancedLlamaClient("https://tu-usuario-llama-chat.hf.space").continuous_chat()), |
|
|
"2": ("Tutor de Física", academic_tutor_example), |
|
|
"3": ("Asistente de Programación", programming_assistant_example), |
|
|
"4": ("Escritura Creativa", creative_writing_example), |
|
|
"5": ("Comparar Temperaturas", lambda: AdvancedLlamaClient("https://tu-usuario-llama-chat.hf.space").compare_temperatures( |
|
|
"Explica la inteligencia artificial", [0.3, 0.7, 1.2] |
|
|
)), |
|
|
} |
|
|
|
|
|
print("🦙 Ejemplos Avanzados - Llama 3.2 Chat") |
|
|
print("=" * 45) |
|
|
print("IMPORTANTE: Cambia la URL por tu Space real") |
|
|
print("=" * 45) |
|
|
|
|
|
while True: |
|
|
print("\\nSelecciona un ejemplo:") |
|
|
for key, (name, _) in examples.items(): |
|
|
print(f"{key}. {name}") |
|
|
print("0. Salir") |
|
|
|
|
|
choice = input("\\nOpción: ").strip() |
|
|
|
|
|
if choice == "0": |
|
|
print("¡Hasta luego! 🦙") |
|
|
break |
|
|
elif choice in examples: |
|
|
try: |
|
|
print(f"\\n🚀 Ejecutando: {examples[choice][0]}") |
|
|
print("=" * 50) |
|
|
examples[choice][1]() |
|
|
except Exception as e: |
|
|
print(f"Error ejecutando ejemplo: {e}") |
|
|
else: |
|
|
print("Opción no válida") |
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
''' |
|
|
|
|
|
|
|
|
tests_py_content = '''# tests.py - Pruebas para verificar el funcionamiento |
|
|
|
|
|
from client import LlamaClient |
|
|
import time |
|
|
import json |
|
|
|
|
|
def test_basic_functionality(base_url: str): |
|
|
"""Pruebas básicas de funcionalidad""" |
|
|
print("🧪 Ejecutando pruebas básicas...") |
|
|
client = LlamaClient(base_url) |
|
|
|
|
|
tests = [] |
|
|
|
|
|
# Test 1: Chat simple |
|
|
print("\\n1. Test chat simple...") |
|
|
try: |
|
|
response = client.chat( |
|
|
message="Hola, ¿puedes presentarte en una línea?", |
|
|
max_tokens=50 |
|
|
) |
|
|
|
|
|
if "error" in response: |
|
|
tests.append(("Chat simple", False, response['error'])) |
|
|
else: |
|
|
tests.append(("Chat simple", True, f"Respuesta: {response['response'][:50]}...")) |
|
|
except Exception as e: |
|
|
tests.append(("Chat simple", False, str(e))) |
|
|
|
|
|
# Test 2: Chat con system prompt |
|
|
print("2. Test system prompt...") |
|
|
try: |
|
|
response = client.chat( |
|
|
message="¿Cuánto es 2+2?", |
|
|
system_prompt="Eres una calculadora. Solo responde con números.", |
|
|
max_tokens=20 |
|
|
) |
|
|
|
|
|
success = "error" not in response and "4" in response.get('response', '') |
|
|
tests.append(("System prompt", success, response.get('response', 'No response'))) |
|
|
except Exception as e: |
|
|
tests.append(("System prompt", False, str(e))) |
|
|
|
|
|
# Test 3: Chat con historial |
|
|
print("3. Test historial...") |
|
|
try: |
|
|
history = [["¿Cómo te llamas?", "Soy un asistente de IA."]] |
|
|
response = client.chat( |
|
|
message="¿Recuerdas cómo te llamas?", |
|
|
history=history, |
|
|
max_tokens=50 |
|
|
) |
|
|
|
|
|
success = "error" not in response |
|
|
tests.append(("Historial", success, response.get('response', 'Error')[:50])) |
|
|
except Exception as e: |
|
|
tests.append(("Historial", False, str(e))) |
|
|
|
|
|
# Test 4: Streaming |
|
|
print("4. Test streaming...") |
|
|
try: |
|
|
chunks_received = 0 |
|
|
final_response = "" |
|
|
|
|
|
for chunk in client.chat_stream( |
|
|
message="Cuenta del 1 al 5", |
|
|
max_tokens=30 |
|
|
): |
|
|
chunks_received += 1 |
|
|
if "error" in chunk: |
|
|
tests.append(("Streaming", False, chunk['error'])) |
|
|
break |
|
|
|
|
|
final_response = chunk['response'] |
|
|
if chunk.get("is_complete", False): |
|
|
break |
|
|
|
|
|
success = chunks_received > 1 and final_response |
|
|
tests.append(("Streaming", success, f"{chunks_received} chunks, respuesta: {final_response[:30]}")) |
|
|
except Exception as e: |
|
|
tests.append(("Streaming", False, str(e))) |
|
|
|
|
|
# Test 5: Estado de cola |
|
|
print("5. Test estado de cola...") |
|
|
try: |
|
|
status = client.get_queue_status() |
|
|
success = "queue_size" in status and "is_processing" in status |
|
|
tests.append(("Estado cola", success, str(status))) |
|
|
except Exception as e: |
|
|
tests.append(("Estado cola", False, str(e))) |
|
|
|
|
|
# Mostrar resultados |
|
|
print("\\n" + "="*60) |
|
|
print("📊 RESULTADOS DE PRUEBAS") |
|
|
print("="*60) |
|
|
|
|
|
passed = 0 |
|
|
for test_name, success, details in tests: |
|
|
status = "✅ PASS" if success else "❌ FAIL" |
|
|
print(f"{status} {test_name}: {details}") |
|
|
if success: |
|
|
passed += 1 |
|
|
|
|
|
print(f"\\n🎯 Resultado: {passed}/{len(tests)} pruebas pasaron") |
|
|
return passed == len(tests) |
|
|
|
|
|
def stress_test(base_url: str, num_requests: int = 5): |
|
|
"""Prueba de estrés con múltiples requests""" |
|
|
print(f"\\n⚡ Prueba de estrés ({num_requests} requests)...") |
|
|
client = LlamaClient(base_url) |
|
|
|
|
|
start_time = time.time() |
|
|
results = [] |
|
|
|
|
|
for i in range(num_requests): |
|
|
print(f"Request {i+1}/{num_requests}...", end=" ") |
|
|
|
|
|
try: |
|
|
request_start = time.time() |
|
|
response = client.chat( |
|
|
message=f"Esta es la request número {i+1}. Responde brevemente.", |
|
|
max_tokens=50 |
|
|
) |
|
|
request_time = time.time() - request_start |
|
|
|
|
|
if "error" in response: |
|
|
print(f"❌ Error: {response['error']}") |
|
|
results.append({"success": False, "time": request_time, "error": response['error']}) |
|
|
else: |
|
|
print(f"✅ {request_time:.2f}s") |
|
|
results.append({"success": True, "time": request_time, "response_length": len(response['response'])}) |
|
|
|
|
|
except Exception as e: |
|
|
print(f"❌ Exception: {e}") |
|
|
results.append({"success": False, "time": 0, "error": str(e)}) |
|
|
|
|
|
total_time = time.time() - start_time |
|
|
|
|
|
# Análisis de resultados |
|
|
successful = [r for r in results if r['success']] |
|
|
failed = [r for r in results if not r['success']] |
|
|
|
|
|
print(f"\\n📈 Análisis de estrés:") |
|
|
print(f" • Total: {total_time:.2f}s") |
|
|
print(f" • Exitosas: {len(successful)}/{num_requests}") |
|
|
print(f" • Fallidas: {len(failed)}/{num_requests}") |
|
|
|
|
|
if successful: |
|
|
avg_time = sum(r['time'] for r in successful) / len(successful) |
|
|
print(f" • Tiempo promedio: {avg_time:.2f}s") |
|
|
|
|
|
return len(successful) == num_requests |
|
|
|
|
|
def performance_benchmark(base_url: str): |
|
|
"""Benchmark de rendimiento""" |
|
|
print("\\n🏁 Benchmark de rendimiento...") |
|
|
client = LlamaClient(base_url) |
|
|
|
|
|
test_cases = [ |
|
|
("Respuesta corta", "Hola", 20), |
|
|
("Respuesta media", "Explica qué es Python en un párrafo", 100), |
|
|
("Respuesta larga", "Describe la historia de la programación", 300), |
|
|
] |
|
|
|
|
|
for test_name, message, max_tokens in test_cases: |
|
|
print(f"\\n{test_name} ({max_tokens} tokens)...") |
|
|
|
|
|
# Test sin streaming |
|
|
start_time = time.time() |
|
|
response = client.chat(message=message, max_tokens=max_tokens) |
|
|
normal_time = time.time() - start_time |
|
|
|
|
|
if "error" in response: |
|
|
print(f" ❌ Error: {response['error']}") |
|
|
continue |
|
|
|
|
|
# Test con streaming |
|
|
start_time = time.time() |
|
|
for chunk in client.chat_stream(message=message, max_tokens=max_tokens): |
|
|
if chunk.get("is_complete", False): |
|
|
break |
|
|
stream_time = time.time() - start_time |
|
|
|
|
|
print(f" • Sin streaming: {normal_time:.2f}s") |
|
|
print(f" • Con streaming: {stream_time:.2f}s") |
|
|
print(f" • Tokens generados: ~{len(response['response'])} chars") |
|
|
|
|
|
def main(): |
|
|
"""Ejecutar todas las pruebas""" |
|
|
base_url = input("Ingresa la URL de tu Space: ").strip() |
|
|
|
|
|
if not base_url: |
|
|
print("❌ URL requerida") |
|
|
return |
|
|
|
|
|
if not base_url.startswith(('http://', 'https://')): |
|
|
base_url = f"https://{base_url}" |
|
|
|
|
|
print(f"🧪 Probando Space: {base_url}") |
|
|
print("="*60) |
|
|
|
|
|
# Ejecutar pruebas |
|
|
basic_ok = test_basic_functionality(base_url) |
|
|
|
|
|
if basic_ok: |
|
|
print("\\n✅ Pruebas básicas exitosas. Continuando...") |
|
|
stress_ok = stress_test(base_url, 3) |
|
|
performance_benchmark(base_url) |
|
|
|
|
|
if stress_ok: |
|
|
print("\\n🎉 ¡Todas las pruebas pasaron! El Space funciona correctamente.") |
|
|
else: |
|
|
print("\\n⚠️ Algunas pruebas de estrés fallaron. Verifica el rendimiento.") |
|
|
else: |
|
|
print("\\n❌ Pruebas básicas fallaron. Verifica la configuración del Space.") |
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
''' |
|
|
|
|
|
|
|
|
with open("examples.py", "w", encoding="utf-8") as f: |
|
|
f.write(examples_py_content) |
|
|
|
|
|
with open("tests.py", "w", encoding="utf-8") as f: |
|
|
f.write(tests_py_content) |
|
|
|
|
|
|
|
|
summary = """ |
|
|
🦙 RESUMEN DEL PROYECTO - Llama 3.2 3B Chat Space |
|
|
================================================================ |
|
|
|
|
|
✅ ARCHIVOS GENERADOS: |
|
|
|
|
|
📱 CORE APPLICATION: |
|
|
- app.py → Aplicación principal de Gradio con cola y streaming |
|
|
- requirements.txt → Dependencias del proyecto |
|
|
- config.py → Configuración centralizada |
|
|
- utils.py → Utilidades y monitoreo de rendimiento |
|
|
|
|
|
🐍 CLIENTE PYTHON: |
|
|
- client.py → Cliente Python para API del Space |
|
|
- examples.py → Ejemplos avanzados de uso |
|
|
- tests.py → Suite de pruebas automáticas |
|
|
|
|
|
📚 DOCUMENTACIÓN: |
|
|
- README.md → Instrucciones completas de setup y uso |
|
|
|
|
|
🚀 CARACTERÍSTICAS IMPLEMENTADAS: |
|
|
|
|
|
✓ Sistema de colas (una petición a la vez) |
|
|
✓ Streaming en tiempo real |
|
|
✓ API completa para cliente Python |
|
|
✓ Soporte para system prompt, message e history |
|
|
✓ Interfaz web con Gradio |
|
|
✓ Monitoreo de estado de cola |
|
|
✓ Manejo de errores robusto |
|
|
✓ Configuración para hf_token |
|
|
✓ Validación de parámetros |
|
|
✓ Estimación de tokens |
|
|
✓ Historial de conversación |
|
|
✓ Múltiples temperaturas |
|
|
✓ Límites configurables |
|
|
|
|
|
🔧 PASOS SIGUIENTES: |
|
|
|
|
|
1. Crear Space en Hugging Face: |
|
|
- Ve a https://huggingface.co/new-space |
|
|
- Selecciona Gradio SDK |
|
|
- Elige hardware T4 small o superior |
|
|
|
|
|
2. Configurar HF_TOKEN: |
|
|
- Settings → Repository secrets |
|
|
- Agregar HF_TOKEN con tu token de acceso |
|
|
|
|
|
3. Subir archivos: |
|
|
- app.py y requirements.txt son obligatorios |
|
|
- Los demás archivos son opcionales pero recomendados |
|
|
|
|
|
4. Probar funcionalidad: |
|
|
- Usar tests.py para verificar el funcionamiento |
|
|
- Usar examples.py para casos de uso avanzados |
|
|
|
|
|
📖 DOCUMENTACIÓN COMPLETA: |
|
|
Revisar README.md para instrucciones detalladas de configuración, |
|
|
uso de la API, troubleshooting y ejemplos de integración. |
|
|
|
|
|
¡Tu Space está listo para ser desplegado! 🚀 |
|
|
""" |
|
|
|
|
|
print(summary) |
|
|
|
|
|
|
|
|
with open("RESUMEN.md", "w", encoding="utf-8") as f: |
|
|
f.write(summary) |