hue-portal-backendDocker / entrypoint.sh
Davidtran99
Chore: Log transformers version/modeling_layers at startup
1987c97
raw
history blame
1.91 kB
#!/bin/bash
set -euo pipefail
log() {
echo "[ENTRYPOINT] $1"
}
log "Boot sequence started at $(date -u +"%Y-%m-%d %H:%M:%S UTC")"
# Debug installed transformers version to ensure modeling_layers exists
python - <<'PY'
import importlib.util, transformers
print(f"[ENTRYPOINT] transformers version: {transformers.__version__}")
spec = importlib.util.find_spec("transformers.modeling_layers")
print(f"[ENTRYPOINT] transformers.modeling_layers available: {bool(spec)}")
PY
if [[ -z "${DATABASE_URL:-}" ]]; then
log "DATABASE_URL is empty -> Django will fallback to POSTGRES_* or SQLite"
else
log "DATABASE_URL detected (length: ${#DATABASE_URL})"
fi
cd /app
log "Running migrations..."
python hue_portal/manage.py migrate --noinput
log "Migrations completed."
log "Ensuring cache table exists..."
python hue_portal/manage.py createcachetable
log "Cache table ready."
log "Starting Gunicorn on port ${PORT:-7860}..."
# Preload model if LLM provider is llama_cpp (to avoid timeout on first request)
if [[ "${DEFAULT_LLM_PROVIDER:-}" == "llama_cpp" ]] || [[ "${LLM_PROVIDER:-}" == "llama_cpp" ]]; then
log "Preloading llama.cpp model to avoid first-request timeout..."
python -c "
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hue_portal.hue_portal.settings')
import django
django.setup()
from hue_portal.chatbot.llm_integration import LLMGenerator
try:
gen = LLMGenerator()
if gen.llama_cpp:
print('[ENTRYPOINT] ✅ Model preloaded successfully')
else:
print('[ENTRYPOINT] ⚠️ Model not loaded (may load on first request)')
except Exception as e:
print(f'[ENTRYPOINT] ⚠️ Model preload failed: {e} (will load on first request)')
" || log "Model preload skipped (will load on first request)"
fi
exec gunicorn hue_portal.hue_portal.wsgi:application \
--bind 0.0.0.0:${PORT:-7860} \
--timeout 600 \
--workers 1 \
--worker-class sync