Spaces:
Sleeping
Sleeping
Upload 3 files
Browse files- Dockerfile +20 -0
- app.py +36 -0
- requirements.txt +7 -0
Dockerfile
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.9
|
| 2 |
+
|
| 3 |
+
WORKDIR /app
|
| 4 |
+
|
| 5 |
+
COPY requirements.txt .
|
| 6 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
RUN useradd user
|
| 10 |
+
|
| 11 |
+
USER user
|
| 12 |
+
|
| 13 |
+
ENV HOME=/home/user \
|
| 14 |
+
PATH=/home/user/.local/bin:$PATH
|
| 15 |
+
|
| 16 |
+
WORKDIR $HOME/app
|
| 17 |
+
|
| 18 |
+
COPY --chown=user:user . $HOME/app
|
| 19 |
+
|
| 20 |
+
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
|
app.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import FastAPI
|
| 2 |
+
from fastapi.responses import JSONResponse
|
| 3 |
+
from diffusers import StableDiffusionXLPipeline
|
| 4 |
+
import torch
|
| 5 |
+
import base64
|
| 6 |
+
from io import BytesIO
|
| 7 |
+
|
| 8 |
+
#create fastapi instance
|
| 9 |
+
app = FastAPI()
|
| 10 |
+
|
| 11 |
+
# Load the pre-trained Stable Diffusion XL model
|
| 12 |
+
pipe = StableDiffusionXLPipeline.from_pretrained(
|
| 13 |
+
"segmind/SSD-1B",
|
| 14 |
+
torch_dtype=torch.float16,
|
| 15 |
+
use_safetensors=True,
|
| 16 |
+
variant="fp16"
|
| 17 |
+
)
|
| 18 |
+
pipe.to("cuda")
|
| 19 |
+
# pipe.enable_xformers_memory_efficient_attention() # for torch < 2.0
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
@app.get("/")
|
| 23 |
+
def home():
|
| 24 |
+
return {"message": "Stable Diffusion API running"}
|
| 25 |
+
|
| 26 |
+
#Define function to generate image from text prompt
|
| 27 |
+
@app.get("/generate-image/")
|
| 28 |
+
def generate_image(prompt: str, negative_prompt: str = None):
|
| 29 |
+
image = pipe(prompt=prompt, negative_prompt=negative_prompt).images[0]
|
| 30 |
+
|
| 31 |
+
# Convert to base64 string
|
| 32 |
+
buffered = BytesIO()
|
| 33 |
+
image.save(buffered, format="PNG")
|
| 34 |
+
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
| 35 |
+
|
| 36 |
+
return JSONResponse(content={"image_base64": img_str})
|
requirements.txt
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi[standard]
|
| 2 |
+
uvicorn[standard]
|
| 3 |
+
torch
|
| 4 |
+
transformers
|
| 5 |
+
accelerate
|
| 6 |
+
safetensors
|
| 7 |
+
diffusers
|