| # entrypoint.sh | |
| set -e | |
| echo "πΉ Starting Ollama server in background..." | |
| OLLAMA_HOST=0.0.0.0:11434 /app/ollama serve & | |
| OLLAMA_PID=$! | |
| # Wait until Ollama API is responsive | |
| echo "πΉ Waiting for Ollama API..." | |
| until curl -f http://localhost:11434/ > /dev/null 2>&1; do | |
| echo "π‘ Ollama not ready... retrying in 3s" | |
| sleep 3 | |
| done | |
| echo "π’ Ollama is live!" | |
| # Pull your lightweight model | |
| MODEL_NAME="hf.co/kshitijthakkar/loggenix-moe-0.3B-A0.1B-e3-lr7e5-b16-4090-v6.3-finetuned-tool-Q8_0-GGUF:Q8_0" | |
| echo "π½ Pulling model: $MODEL_NAME" | |
| /app/ollama pull "$MODEL_NAME" || { | |
| echo "β Failed to pull model. Check name and internet." | |
| exit 1 | |
| } | |
| # Start your app | |
| echo "π Launching enhanced_app.py" | |
| exec python /app/enhanced_app.py | |