Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

AI-Image-Generation\Offline\Stable-Deffusion-Model 🚀 #242

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@ yarn-error.log*
pnpm-debug.log*
lerna-debug.log*

backend/app/models/image-generation/*

node_modules
dist
dist-ssr
Expand Down
89 changes: 87 additions & 2 deletions backend/app/routes/images.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,19 @@
import os
import shutil
import asyncio
from fastapi import APIRouter, Query
from fastapi.responses import JSONResponse
import time
import logging
from fastapi import APIRouter, Query, HTTPException
from fastapi.responses import JSONResponse, StreamingResponse
from pydantic import BaseModel
from transformers import pipeline
from diffusers import StableDiffusionPipeline, DiffusionPipeline, LCMScheduler
import torch
import matplotlib.pyplot as plt
from io import BytesIO
import base64
from PIL import Image
from fastapi import HTTPException, Query

# hello
from app.config.settings import IMAGES_PATH
Expand All @@ -19,6 +29,23 @@
extract_metadata,
)

from diffusers import StableDiffusionPipeline, LCMScheduler
from transformers import BitsAndBytesConfig
import torch
from fastapi import FastAPI, HTTPException, Query
from pydantic import BaseModel
from fastapi.responses import JSONResponse
from io import BytesIO
import base64
import os
import asyncio
from diffusers import StableDiffusionPipeline
import torch
import warnings
import numpy as np
import warnings


router = APIRouter()


Expand All @@ -32,6 +59,64 @@ async def run_get_classes(img_path):
detect_faces(img_path)



print(os.curdir)

model_path = os.path.abspath("./app/models/image-generation")

# Check if GPU is available
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using device: {device}")


# Load the Stable Diffusion pipeline
pipe = StableDiffusionPipeline.from_pretrained(
model_path,
torch_dtype=torch.float32,
)

if(device=="cuda"):
pipe.to("cuda")
else:
pipe.to("cpu")

pipe.enable_attention_slicing()


# Route to generate an image
@router.post("/generate-image")
async def generate_image(prompt: str = Query(..., description="Prompt for image generation")):
"""
Generate an image using the Stable Diffusion model.
Example: http://localhost:8000/generate-image?prompt=Astronaut%20in%20a%20jungle
"""
try:
print("Request received with prompt:", prompt)


image = pipe(prompt, num_inference_steps=5).images[0]

buffer = BytesIO()
image.save(buffer, format="PNG")
buffer.seek(0)

# Convert image to Base64
image_base64 = base64.b64encode(buffer.getvalue()).decode("utf-8")

del image

if device == "cuda":
torch.cuda.empty_cache()
# Return JSON response
return JSONResponse(content={
"prompt": prompt,
"image": image_base64
})
except Exception as e:
print(f"Error generating image: {e}")
raise HTTPException(status_code=500, detail=f"Image generation failed: {str(e)}")


@router.get("/all-images")
def get_images():
try:
Expand Down
57 changes: 0 additions & 57 deletions backend/main.py

This file was deleted.

Loading