Spaces:
Sleeping
Sleeping
Commit
Β·
31b2e82
1
Parent(s):
02d909f
README updated
Browse files- Dockerfile +17 -26
- README.md +0 -1
- requirements.txt +4 -4
Dockerfile
CHANGED
|
@@ -1,26 +1,17 @@
|
|
| 1 |
-
FROM python:3.11-slim
|
| 2 |
|
| 3 |
WORKDIR /app
|
| 4 |
|
| 5 |
-
# Install
|
|
|
|
| 6 |
RUN apt-get update && apt-get install -y \
|
| 7 |
curl \
|
| 8 |
wget \
|
| 9 |
git \
|
| 10 |
-
|
| 11 |
-
cmake \
|
| 12 |
-
pkg-config \
|
| 13 |
-
libopenblas-dev \
|
| 14 |
-
liblapack-dev \
|
| 15 |
-
libxml2-dev \
|
| 16 |
-
libxslt1-dev \
|
| 17 |
-
zlib1g-dev \
|
| 18 |
-
libjpeg-dev \
|
| 19 |
-
libpng-dev \
|
| 20 |
-
libfreetype6-dev \
|
| 21 |
&& rm -rf /var/lib/apt/lists/*
|
| 22 |
|
| 23 |
-
#
|
| 24 |
COPY requirements.txt .
|
| 25 |
RUN pip install --no-cache-dir --upgrade pip && \
|
| 26 |
pip install --no-cache-dir -r requirements.txt
|
|
@@ -28,24 +19,24 @@ RUN pip install --no-cache-dir --upgrade pip && \
|
|
| 28 |
# Download spaCy model
|
| 29 |
RUN python -m spacy download en_core_web_sm
|
| 30 |
|
| 31 |
-
# Copy
|
| 32 |
COPY . .
|
| 33 |
|
| 34 |
-
# Create directories
|
| 35 |
RUN mkdir -p uploads cache logs /data/models
|
| 36 |
|
| 37 |
-
#
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
# Environment variables for CPU-only operation
|
| 41 |
ENV LLAMA_CPP_N_GPU_LAYERS=0
|
| 42 |
-
ENV
|
| 43 |
-
ENV
|
| 44 |
-
|
|
|
|
|
|
|
| 45 |
|
| 46 |
-
#
|
| 47 |
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
| 48 |
CMD curl -f http://localhost:7860/api/v1/health || exit 1
|
| 49 |
|
| 50 |
-
#
|
| 51 |
-
CMD uvicorn app:app --host 0.0.0.0 --port 7860 --workers 1 --timeout-keep-alive 30
|
|
|
|
| 1 |
+
FROM python:3.11-slim-bookworm
|
| 2 |
|
| 3 |
WORKDIR /app
|
| 4 |
|
| 5 |
+
# Install ONLY minimal runtime dependencies (no build tools!)
|
| 6 |
+
# libgomp1 is needed by OpenMP in NumPy/SciPy; git/wget/curl for huggingface-hub
|
| 7 |
RUN apt-get update && apt-get install -y \
|
| 8 |
curl \
|
| 9 |
wget \
|
| 10 |
git \
|
| 11 |
+
libgomp1 \
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
&& rm -rf /var/lib/apt/lists/*
|
| 13 |
|
| 14 |
+
# Install Python dependencies (wheels only β no compilation!)
|
| 15 |
COPY requirements.txt .
|
| 16 |
RUN pip install --no-cache-dir --upgrade pip && \
|
| 17 |
pip install --no-cache-dir -r requirements.txt
|
|
|
|
| 19 |
# Download spaCy model
|
| 20 |
RUN python -m spacy download en_core_web_sm
|
| 21 |
|
| 22 |
+
# Copy app code
|
| 23 |
COPY . .
|
| 24 |
|
| 25 |
+
# Create directories
|
| 26 |
RUN mkdir -p uploads cache logs /data/models
|
| 27 |
|
| 28 |
+
# Environment: enforce CPU mode
|
| 29 |
+
ENV CUDA_VISIBLE_DEVICES=""
|
|
|
|
|
|
|
| 30 |
ENV LLAMA_CPP_N_GPU_LAYERS=0
|
| 31 |
+
ENV OMP_NUM_THREADS=2
|
| 32 |
+
ENV NUMEXPR_MAX_THREADS=2
|
| 33 |
+
|
| 34 |
+
# Expose port
|
| 35 |
+
EXPOSE 7860
|
| 36 |
|
| 37 |
+
# Health check
|
| 38 |
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
| 39 |
CMD curl -f http://localhost:7860/api/v1/health || exit 1
|
| 40 |
|
| 41 |
+
# Run app
|
| 42 |
+
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860", "--workers", "1", "--timeout-keep-alive", "30"]
|
README.md
CHANGED
|
@@ -4,7 +4,6 @@ emoji: π
|
|
| 4 |
colorFrom: blue
|
| 5 |
colorTo: purple
|
| 6 |
sdk: docker
|
| 7 |
-
accelerator: gpu
|
| 8 |
license: mit
|
| 9 |
---
|
| 10 |
|
|
|
|
| 4 |
colorFrom: blue
|
| 5 |
colorTo: purple
|
| 6 |
sdk: docker
|
|
|
|
| 7 |
license: mit
|
| 8 |
---
|
| 9 |
|
requirements.txt
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
--extra-index-url https://pypi.org/simple/ https://huggingface.github.io/llama-cpp-python-wheels/
|
| 2 |
|
| 3 |
# Core Dependencies
|
| 4 |
fastapi>=0.104.1
|
|
@@ -7,8 +7,8 @@ pydantic>=2.5.0
|
|
| 7 |
pydantic-settings>=2.1.0
|
| 8 |
python-multipart>=0.0.6
|
| 9 |
|
| 10 |
-
# AI & NLP Libraries
|
| 11 |
-
torch>=2.1.0
|
| 12 |
transformers>=4.35.0
|
| 13 |
sentence-transformers>=2.2.2
|
| 14 |
tokenizers>=0.14.0
|
|
@@ -40,7 +40,7 @@ openai>=1.0.0
|
|
| 40 |
anthropic>=0.5.0
|
| 41 |
|
| 42 |
# For Hugging Face Spaces
|
| 43 |
-
llama-cpp-python==0.3.16
|
| 44 |
huggingface-hub>=0.19.0
|
| 45 |
|
| 46 |
# Text Processing Utilities
|
|
|
|
| 1 |
+
--extra-index-url https://pypi.org/simple/ https://huggingface.github.io/llama-cpp-python-wheels/ https://download.pytorch.org/whl/cpu
|
| 2 |
|
| 3 |
# Core Dependencies
|
| 4 |
fastapi>=0.104.1
|
|
|
|
| 7 |
pydantic-settings>=2.1.0
|
| 8 |
python-multipart>=0.0.6
|
| 9 |
|
| 10 |
+
# AI & NLP Libraries (CPU-only PyTorch!)
|
| 11 |
+
torch>=2.1.0+cpu
|
| 12 |
transformers>=4.35.0
|
| 13 |
sentence-transformers>=2.2.2
|
| 14 |
tokenizers>=0.14.0
|
|
|
|
| 40 |
anthropic>=0.5.0
|
| 41 |
|
| 42 |
# For Hugging Face Spaces
|
| 43 |
+
llama-cpp-python==0.3.16 # Pinned to pre-built wheel
|
| 44 |
huggingface-hub>=0.19.0
|
| 45 |
|
| 46 |
# Text Processing Utilities
|