abrahamdw882 commited on
Commit
c6c29f9
·
verified ·
1 Parent(s): 4b064b1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -7
app.py CHANGED
@@ -1,44 +1,86 @@
1
  # app.py
2
  from fastapi import FastAPI
 
3
  from pydantic import BaseModel
4
  from huggingface_hub import hf_hub_download
5
  import uvicorn
 
 
 
6
 
7
  app = FastAPI(title="WAN2 GGUF API", version="1.0")
8
 
9
- # ✅ Download model file at startup
10
  MODEL_REPO = "calcuis/wan2-gguf"
11
- MODEL_FILE = "wan2.2-animate-14b-q4_0.gguf" # pick one from repo
 
 
 
 
 
12
 
 
13
  model_path = hf_hub_download(
14
  repo_id=MODEL_REPO,
15
  filename=MODEL_FILE,
16
- local_dir="models"
17
  )
18
-
19
  print("✅ Model downloaded to:", model_path)
20
 
 
21
  # Request schema
22
  class PromptRequest(BaseModel):
23
  prompt: str
24
  steps: int = 20
25
 
 
26
  @app.get("/")
27
  def root():
28
  return {"message": "WAN2 GGUF API is running!"}
29
 
 
30
  @app.post("/generate")
31
  def generate_video(request: PromptRequest):
32
- # Dummy response for now
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  return {
34
  "status": "success",
35
  "model_file": MODEL_FILE,
36
- "model_path": model_path,
37
  "prompt": request.prompt,
38
  "steps": request.steps,
 
39
  "note": "Replace this with actual inference code."
40
  }
41
 
42
- # ✅ Ensure server stays alive in Hugging Face Spaces
 
 
 
 
 
43
  if __name__ == "__main__":
44
  uvicorn.run(app, host="0.0.0.0", port=7860)
 
1
  # app.py
2
  from fastapi import FastAPI
3
+ from fastapi.staticfiles import StaticFiles
4
  from pydantic import BaseModel
5
  from huggingface_hub import hf_hub_download
6
  import uvicorn
7
+ import os
8
+ import uuid
9
+ import shutil
10
 
11
  app = FastAPI(title="WAN2 GGUF API", version="1.0")
12
 
13
+ # ✅ Directories
14
  MODEL_REPO = "calcuis/wan2-gguf"
15
+ MODEL_FILE = "wan2.2-animate-14b-q4_0.gguf" # big model
16
+ MODEL_DIR = "models"
17
+ OUTPUT_DIR = "outputs"
18
+
19
+ os.makedirs(MODEL_DIR, exist_ok=True)
20
+ os.makedirs(OUTPUT_DIR, exist_ok=True)
21
 
22
+ # ✅ Download model file at startup
23
  model_path = hf_hub_download(
24
  repo_id=MODEL_REPO,
25
  filename=MODEL_FILE,
26
+ local_dir=MODEL_DIR
27
  )
 
28
  print("✅ Model downloaded to:", model_path)
29
 
30
+
31
  # Request schema
32
  class PromptRequest(BaseModel):
33
  prompt: str
34
  steps: int = 20
35
 
36
+
37
  @app.get("/")
38
  def root():
39
  return {"message": "WAN2 GGUF API is running!"}
40
 
41
+
42
  @app.post("/generate")
43
  def generate_video(request: PromptRequest):
44
+ """
45
+ Dummy video generator — for now just copies a placeholder .mp4.
46
+ Replace this later with actual WAN2 inference code.
47
+ """
48
+
49
+ # Unique filename
50
+ file_id = str(uuid.uuid4())
51
+ file_path = os.path.join(OUTPUT_DIR, f"{file_id}.mp4")
52
+
53
+ # Use a Hugging Face placeholder video
54
+ placeholder_url = (
55
+ "https://huggingface.co/datasets/huggingface/documentation-images/"
56
+ "resolve/main/video-placeholder.mp4"
57
+ )
58
+
59
+ # Download placeholder only once
60
+ placeholder_file = os.path.join(OUTPUT_DIR, "placeholder.mp4")
61
+ if not os.path.exists(placeholder_file):
62
+ import requests
63
+ r = requests.get(placeholder_url, stream=True)
64
+ with open(placeholder_file, "wb") as f:
65
+ shutil.copyfileobj(r.raw, f)
66
+
67
+ # Copy placeholder to simulate unique output
68
+ shutil.copy(placeholder_file, file_path)
69
+
70
  return {
71
  "status": "success",
72
  "model_file": MODEL_FILE,
 
73
  "prompt": request.prompt,
74
  "steps": request.steps,
75
+ "video_url": f"/file/{file_id}.mp4",
76
  "note": "Replace this with actual inference code."
77
  }
78
 
79
+
80
+ # ✅ Serve output videos
81
+ app.mount("/file", StaticFiles(directory=OUTPUT_DIR), name="file")
82
+
83
+
84
+ # ✅ Run server in HF Spaces
85
  if __name__ == "__main__":
86
  uvicorn.run(app, host="0.0.0.0", port=7860)