VetriVelRavi commited on
Commit
cc76f1c
Β·
verified Β·
1 Parent(s): 26b1fc7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -12
app.py CHANGED
@@ -8,25 +8,23 @@ import cv2
8
 
9
  # Load ControlNet (depth)
10
  controlnet = ControlNetModel.from_pretrained(
11
- "lllyasviel/sd-controlnet-depth", torch_dtype=torch.float32
12
  )
13
 
14
- # Load SD pipeline with ControlNet
15
  pipe = StableDiffusionControlNetPipeline.from_pretrained(
16
  "runwayml/stable-diffusion-v1-5",
17
  controlnet=controlnet,
18
- torch_dtype=torch.float32
19
- ).to("cpu")
20
-
21
- # βœ… Removed: pipe.enable_model_cpu_offload()
22
 
23
  # Depth model
24
- depth_model = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas").to("cpu")
25
  depth_processor = DPTFeatureExtractor.from_pretrained("Intel/dpt-hybrid-midas")
26
 
27
  def generate(input_image, prompt):
28
  image = input_image.convert("RGB")
29
- inputs = depth_processor(images=image, return_tensors="pt").to("cpu")
30
 
31
  with torch.no_grad():
32
  outputs = depth_model(**inputs)
@@ -38,9 +36,9 @@ def generate(input_image, prompt):
38
  result = pipe(
39
  prompt=prompt,
40
  image=depth_image,
41
- height=720,
42
- width=1280,
43
- num_inference_steps=15
44
  ).images[0]
45
 
46
  return result
@@ -54,5 +52,5 @@ gr.Interface(
54
  ],
55
  outputs=gr.Image(type="pil", label="Generated Room"),
56
  title="πŸ›‹οΈ AI Interior Designer",
57
- description="Upload your room and get a styled redesign using ControlNet (Depth).",
58
  ).launch()
 
8
 
9
  # Load ControlNet (depth)
10
  controlnet = ControlNetModel.from_pretrained(
11
+ "lllyasviel/sd-controlnet-depth", torch_dtype=torch.float16
12
  )
13
 
14
+ # Load SD pipeline with ControlNet and enable CUDA for faster generation
15
  pipe = StableDiffusionControlNetPipeline.from_pretrained(
16
  "runwayml/stable-diffusion-v1-5",
17
  controlnet=controlnet,
18
+ torch_dtype=torch.float16
19
+ ).to("cuda")
 
 
20
 
21
  # Depth model
22
+ depth_model = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas").to("cuda")
23
  depth_processor = DPTFeatureExtractor.from_pretrained("Intel/dpt-hybrid-midas")
24
 
25
  def generate(input_image, prompt):
26
  image = input_image.convert("RGB")
27
+ inputs = depth_processor(images=image, return_tensors="pt").to("cuda")
28
 
29
  with torch.no_grad():
30
  outputs = depth_model(**inputs)
 
36
  result = pipe(
37
  prompt=prompt,
38
  image=depth_image,
39
+ height=512,
40
+ width=768,
41
+ num_inference_steps=5 # πŸ”₯ Faster generation
42
  ).images[0]
43
 
44
  return result
 
52
  ],
53
  outputs=gr.Image(type="pil", label="Generated Room"),
54
  title="πŸ›‹οΈ AI Interior Designer",
55
+ description="Upload your room and get a styled redesign using ControlNet (Depth). Optimized for fast generation.",
56
  ).launch()