r3gm commited on
Commit
5ddf1f8
·
verified ·
1 Parent(s): 1ba4588

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +184 -164
app.py CHANGED
@@ -8,6 +8,7 @@ from stablepy import (
8
  check_scheduler_compatibility,
9
  TASK_AND_PREPROCESSORS,
10
  FACE_RESTORATION_MODELS,
 
11
  scheduler_names,
12
  )
13
  from constants import (
@@ -376,7 +377,7 @@ class GuiSD:
376
  t2i_adapter_preprocessor,
377
  t2i_adapter_conditioning_scale,
378
  t2i_adapter_conditioning_factor,
379
- xformers_memory_efficient_attention,
380
  freeu,
381
  generator_in_cpu,
382
  adetailer_inpaint_only,
@@ -544,7 +545,7 @@ class GuiSD:
544
  "sampler": sampler,
545
  "schedule_type": schedule_type,
546
  "schedule_prediction_type": schedule_prediction_type,
547
- "xformers_memory_efficient_attention": xformers_memory_efficient_attention,
548
  "gui_active": True,
549
  "loop_generation": loop_generation,
550
  "controlnet_conditioning_scale": float(controlnet_output_scaling_in_unet),
@@ -559,7 +560,7 @@ class GuiSD:
559
  "leave_progress_bar": leave_progress_bar,
560
  "disable_progress_bar": disable_progress_bar,
561
  "image_previews": image_previews,
562
- "display_images": display_images,
563
  "save_generated_images": save_generated_images,
564
  "filename_pattern": filename_pattern,
565
  "image_storage_location": image_storage_location,
@@ -636,8 +637,13 @@ class GuiSD:
636
  if save_generated_images:
637
  info_images += f"<br>{download_links}"
638
 
 
 
639
  info_state = "COMPLETE"
640
 
 
 
 
641
  yield info_state, img, info_images
642
 
643
 
@@ -785,10 +791,14 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
785
 
786
  return gr.update(value=task_name, choices=new_choices)
787
 
788
- task_gui = gr.Dropdown(label="Task", choices=SDXL_TASK, value=TASK_MODEL_LIST[0])
789
- model_name_gui = gr.Dropdown(label="Model", choices=model_list, value=model_list[0], allow_custom_value=True)
 
790
  prompt_gui = gr.Textbox(lines=5, placeholder="Enter prompt", label="Prompt")
791
- neg_prompt_gui = gr.Textbox(lines=3, placeholder="Enter Neg prompt", label="Negative prompt", value="lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, worst quality, low quality, very displeasing, (bad)")
 
 
 
792
  with gr.Row(equal_height=False):
793
  set_params_gui = gr.Button(value="↙️", variant="secondary", size="sm")
794
  clear_prompt_gui = gr.Button(value="🗑️", variant="secondary", size="sm")
@@ -825,158 +835,166 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
825
  load_lora_cpu_gui = gr.Checkbox(value=False, container=False, label="Load LoRAs on CPU")
826
 
827
  with gr.Column(scale=1):
828
- steps_gui = gr.Slider(minimum=1, maximum=100, step=1, value=28, label="Steps")
829
- cfg_gui = gr.Slider(minimum=0, maximum=30, step=0.5, value=7., label="CFG")
830
- sampler_gui = gr.Dropdown(label="Sampler", choices=scheduler_names, value="Euler")
831
- schedule_type_gui = gr.Dropdown(label="Schedule type", choices=SCHEDULE_TYPE_OPTIONS, value=SCHEDULE_TYPE_OPTIONS[0])
832
- img_width_gui = gr.Slider(minimum=64, maximum=4096, step=8, value=1024, label="Img Width")
833
- img_height_gui = gr.Slider(minimum=64, maximum=4096, step=8, value=1024, label="Img Height")
834
- seed_gui = gr.Number(minimum=-1, maximum=9999999999, value=-1, label="Seed")
835
- pag_scale_gui = gr.Slider(minimum=0.0, maximum=10.0, step=0.1, value=0.0, label="PAG Scale")
836
- with gr.Row():
837
- clip_skip_gui = gr.Checkbox(value=True, label="Layer 2 Clip Skip")
838
- free_u_gui = gr.Checkbox(value=False, label="FreeU")
839
-
840
- with gr.Row(equal_height=False):
841
-
842
- def run_set_params_gui(base_prompt, name_model):
843
- valid_receptors = { # default values
844
- "prompt": gr.update(value=base_prompt),
845
- "neg_prompt": gr.update(value=""),
846
- "Steps": gr.update(value=30),
847
- "width": gr.update(value=1024),
848
- "height": gr.update(value=1024),
849
- "Seed": gr.update(value=-1),
850
- "Sampler": gr.update(value="Euler"),
851
- "CFG scale": gr.update(value=7.), # cfg
852
- "Clip skip": gr.update(value=True),
853
- "Model": gr.update(value=name_model),
854
- "Schedule type": gr.update(value="Automatic"),
855
- "PAG": gr.update(value=.0),
856
- "FreeU": gr.update(value=False),
857
- "Hires upscaler": gr.update(),
858
- "Hires upscale": gr.update(),
859
- "Hires steps": gr.update(),
860
- "Hires denoising strength": gr.update(),
861
- "Hires CFG": gr.update(),
862
- "Hires sampler": gr.update(),
863
- "Hires schedule type": gr.update(),
864
- "Image resolution": gr.update(value=1024),
865
- "Strength": gr.update(),
866
- }
867
-
868
- # Generate up to 7 LoRAs
869
- for i in range(1, 8):
870
- valid_receptors[f"Lora_{i}"] = gr.update()
871
- valid_receptors[f"Lora_scale_{i}"] = gr.update()
872
-
873
- valid_keys = list(valid_receptors.keys())
874
-
875
- parameters = extract_parameters(base_prompt)
876
- # print(parameters)
877
-
878
- if "Sampler" in parameters:
879
- value_sampler = parameters["Sampler"]
880
- for s_type in SCHEDULE_TYPE_OPTIONS:
881
- if s_type in value_sampler:
882
- value_sampler = value_sampler.replace(s_type, "").strip()
883
- parameters["Sampler"] = value_sampler
884
- parameters["Schedule type"] = s_type
885
-
886
- params_lora = []
887
- if ">" in parameters["prompt"] and "<" in parameters["prompt"]:
888
- params_lora = re.findall(r'<lora:[^>]+>', parameters["prompt"])
889
- if "Loras" in parameters:
890
- params_lora += re.findall(r'<lora:[^>]+>', parameters["Loras"])
891
-
892
- if params_lora:
893
- parsed_params = []
894
- for tag_l in params_lora:
895
- try:
896
- inner = tag_l.strip("<>") # remove < >
897
- _, data_l = inner.split(":", 1) # remove the "lora:" part
898
- parts_l = data_l.split(":")
899
-
900
- name_l = parts_l[0]
901
- weight_l = float(parts_l[1]) if len(parts_l) > 1 else 1.0 # default weight = 1.0
902
-
903
- parsed_params.append((name_l, weight_l))
904
- except Exception as e:
905
- print(f"Error parsing LoRA tag {tag_l}: {e}")
906
-
907
- num_lora = 1
908
- for parsed_l, parsed_s in parsed_params:
909
- filtered_loras = [m for m in lora_model_list if parsed_l in m]
910
- if filtered_loras:
911
- parameters[f"Lora_{num_lora}"] = filtered_loras[0]
912
- parameters[f"Lora_scale_{num_lora}"] = parsed_s
913
- num_lora += 1
914
-
915
- # continue = discard new value
916
- for key, val in parameters.items():
917
- # print(val)
918
- if key in valid_keys:
919
- try:
920
- if key == "Sampler":
921
- if val not in scheduler_names:
922
- continue
923
- if key in ["Schedule type", "Hires schedule type"]:
924
- if val not in SCHEDULE_TYPE_OPTIONS:
925
- continue
926
- if key == "Hires sampler":
927
- if val not in POST_PROCESSING_SAMPLER:
928
- continue
929
- elif key == "Clip skip":
930
- if "," in str(val):
931
- val = val.replace(",", "")
932
- if int(val) >= 2:
933
- val = True
934
- if key == "prompt":
935
- if ">" in val and "<" in val:
936
- val = re.sub(r'<[^>]+>', '', val) # Delete html and loras
937
- print("Removed LoRA written in the prompt")
938
- if key in ["prompt", "neg_prompt"]:
939
- val = re.sub(r'\s+', ' ', re.sub(r',+', ',', val)).strip()
940
- if key in ["Steps", "width", "height", "Seed", "Hires steps", "Image resolution"]:
941
- val = int(val)
942
- if key == "FreeU":
 
 
943
  val = True
944
- if key in ["CFG scale", "PAG", "Hires upscale", "Hires denoising strength", "Hires CFG", "Strength"]:
945
- val = float(val)
946
- if key == "Model":
947
- filtered_models = [m for m in model_list if val in m]
948
- if filtered_models:
949
- val = filtered_models[0]
950
- else:
951
- val = name_model
952
- if key == "Hires upscaler":
953
- if val not in UPSCALER_KEYS:
954
- continue
955
- if key == "Seed":
 
 
 
 
 
 
 
 
956
  continue
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
957
 
958
- valid_receptors[key] = gr.update(value=val)
959
- # print(val, type(val))
960
- # print(valid_receptors)
961
- except Exception as e:
962
- print(str(e))
963
- return [value for value in valid_receptors.values()]
964
-
965
- def run_clear_prompt_gui():
966
- return gr.update(value=""), gr.update(value="")
967
- clear_prompt_gui.click(
968
- run_clear_prompt_gui, [], [prompt_gui, neg_prompt_gui]
969
- )
970
-
971
- def run_set_random_seed():
972
- return -1
973
- set_random_seed.click(
974
- run_set_random_seed, [], seed_gui
975
- )
976
-
977
- num_images_gui = gr.Slider(minimum=1, maximum=(8 if IS_ZERO_GPU else 20), step=1, value=1, label="Images")
978
- prompt_syntax_gui = gr.Dropdown(label="Prompt Syntax", choices=PROMPT_W_OPTIONS, value=PROMPT_W_OPTIONS[0][1])
979
- vae_model_gui = gr.Dropdown(label="VAE Model", choices=vae_model_list, value=vae_model_list[0])
980
 
981
  with gr.Accordion("Hires fix", open=False, visible=True):
982
 
@@ -1012,10 +1030,10 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1012
  lora_scale_4_gui = lora_scale_slider("Lora Scale 4")
1013
  lora5_gui = lora_dropdown("Lora5")
1014
  lora_scale_5_gui = lora_scale_slider("Lora Scale 5")
1015
- lora6_gui = lora_dropdown("Lora6", visible=(not IS_ZERO_GPU))
1016
- lora_scale_6_gui = lora_scale_slider("Lora Scale 6", visible=(not IS_ZERO_GPU))
1017
- lora7_gui = lora_dropdown("Lora7", visible=(not IS_ZERO_GPU))
1018
- lora_scale_7_gui = lora_scale_slider("Lora Scale 7", visible=(not IS_ZERO_GPU))
1019
 
1020
  with gr.Accordion("From URL", open=False, visible=True):
1021
  text_lora = gr.Textbox(
@@ -1171,6 +1189,8 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1171
  schedule_prediction_type_gui = gr.Dropdown(label="Discrete Sampling Type", choices=SCHEDULE_PREDICTION_TYPE_OPTIONS, value=SCHEDULE_PREDICTION_TYPE_OPTIONS[0])
1172
  guidance_rescale_gui = gr.Number(label="CFG rescale:", value=0., step=0.01, minimum=0., maximum=1.5)
1173
  save_generated_images_gui = gr.Checkbox(value=True, label="Create a download link for the images")
 
 
1174
  filename_pattern_gui = gr.Textbox(label="Filename pattern", value="model,seed", placeholder="model,seed,sampler,schedule_type,img_width,img_height,guidance_scale,num_steps,vae,prompt_section,neg_prompt_section", lines=1)
1175
  hires_before_adetailer_gui = gr.Checkbox(value=False, label="Hires Before Adetailer")
1176
  hires_after_adetailer_gui = gr.Checkbox(value=True, label="Hires After Adetailer")
@@ -1183,12 +1203,11 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1183
  with gr.Accordion("More settings", open=False, visible=False):
1184
  loop_generation_gui = gr.Slider(minimum=1, value=1, label="Loop Generation")
1185
  retain_task_cache_gui = gr.Checkbox(value=False, label="Retain task model in cache")
1186
- display_images_gui = gr.Checkbox(value=False, label="Display Images")
1187
- image_previews_gui = gr.Checkbox(value=True, label="Image Previews")
1188
  retain_compel_previous_load_gui = gr.Checkbox(value=False, label="Retain Compel Previous Load")
1189
  retain_detailfix_model_previous_load_gui = gr.Checkbox(value=False, label="Retain Detailfix Model Previous Load")
1190
  retain_hires_model_previous_load_gui = gr.Checkbox(value=False, label="Retain Hires Model Previous Load")
1191
- xformers_memory_efficient_attention_gui = gr.Checkbox(value=False, label="Xformers Memory Efficient Attention")
1192
 
1193
  set_params_gui.click(
1194
  run_set_params_gui, [prompt_gui, model_name_gui], [
@@ -1214,6 +1233,7 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1214
  hires_schedule_type_gui,
1215
  image_resolution_gui,
1216
  strength_gui,
 
1217
  lora1_gui,
1218
  lora_scale_1_gui,
1219
  lora2_gui,
@@ -1442,7 +1462,7 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1442
  t2i_adapter_preprocessor_gui,
1443
  adapter_conditioning_scale_gui,
1444
  adapter_conditioning_factor_gui,
1445
- xformers_memory_efficient_attention_gui,
1446
  free_u_gui,
1447
  generator_in_cpu_gui,
1448
  adetailer_inpaint_only_gui,
@@ -1503,4 +1523,4 @@ if __name__ == "__main__":
1503
  ssr_mode=args.ssr,
1504
  allowed_paths=[allowed_path],
1505
  show_api=(not HIDE_API),
1506
- )
 
8
  check_scheduler_compatibility,
9
  TASK_AND_PREPROCESSORS,
10
  FACE_RESTORATION_MODELS,
11
+ PROMPT_WEIGHT_OPTIONS_PRIORITY,
12
  scheduler_names,
13
  )
14
  from constants import (
 
377
  t2i_adapter_preprocessor,
378
  t2i_adapter_conditioning_scale,
379
  t2i_adapter_conditioning_factor,
380
+ enable_live_preview,
381
  freeu,
382
  generator_in_cpu,
383
  adetailer_inpaint_only,
 
545
  "sampler": sampler,
546
  "schedule_type": schedule_type,
547
  "schedule_prediction_type": schedule_prediction_type,
548
+ "xformers_memory_efficient_attention": False,
549
  "gui_active": True,
550
  "loop_generation": loop_generation,
551
  "controlnet_conditioning_scale": float(controlnet_output_scaling_in_unet),
 
560
  "leave_progress_bar": leave_progress_bar,
561
  "disable_progress_bar": disable_progress_bar,
562
  "image_previews": image_previews,
563
+ "display_images": False,
564
  "save_generated_images": save_generated_images,
565
  "filename_pattern": filename_pattern,
566
  "image_storage_location": image_storage_location,
 
637
  if save_generated_images:
638
  info_images += f"<br>{download_links}"
639
 
640
+ if not display_images:
641
+ img = gr.update()
642
  info_state = "COMPLETE"
643
 
644
+ elif not enable_live_preview:
645
+ img = gr.update()
646
+
647
  yield info_state, img, info_images
648
 
649
 
 
791
 
792
  return gr.update(value=task_name, choices=new_choices)
793
 
794
+ with gr.Accordion("Model and Task", open=True, visible=True):
795
+ task_gui = gr.Dropdown(label="Task", choices=SDXL_TASK, value=TASK_MODEL_LIST[0])
796
+ model_name_gui = gr.Dropdown(label="Model", choices=model_list, value=model_list[0], allow_custom_value=True)
797
  prompt_gui = gr.Textbox(lines=5, placeholder="Enter prompt", label="Prompt")
798
+
799
+ with gr.Accordion("Negative prompt", open=False, visible=True):
800
+ neg_prompt_gui = gr.Textbox(lines=3, placeholder="Enter Neg prompt", label="Negative prompt", value="bad anatomy, ((many hands, bad hands, missing fingers)), anatomical nonsense, ugly, deformed, bad proportions, bad shadow, extra limbs, missing limbs, floating limbs, disconnected limbs, malformed hands, poorly drawn, mutation, mutated hands and fingers, extra legs, interlocked fingers, extra arms, disfigured face, long neck, asymmetrical eyes, lowres, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry, duplicate, bad composition, text, worst quality, normal quality, low quality, very displeasing, monochrome, grayscale, black and white, desaturated, low contrast, muted tones, washed out, unfinished, incomplete, draft, logo, backlighting")
801
+
802
  with gr.Row(equal_height=False):
803
  set_params_gui = gr.Button(value="↙️", variant="secondary", size="sm")
804
  clear_prompt_gui = gr.Button(value="🗑️", variant="secondary", size="sm")
 
835
  load_lora_cpu_gui = gr.Checkbox(value=False, container=False, label="Load LoRAs on CPU")
836
 
837
  with gr.Column(scale=1):
838
+ with gr.Accordion("Generation settings", open=True, visible=True):
839
+ steps_gui = gr.Slider(minimum=1, maximum=100, step=1, value=30, label="Steps")
840
+ cfg_gui = gr.Slider(minimum=0, maximum=30, step=0.5, value=7., label="CFG")
841
+ sampler_gui = gr.Dropdown(label="Sampler", choices=scheduler_names, value="Euler")
842
+ schedule_type_gui = gr.Dropdown(label="Schedule type", choices=SCHEDULE_TYPE_OPTIONS, value=SCHEDULE_TYPE_OPTIONS[0])
843
+ img_width_gui = gr.Slider(minimum=64, maximum=4096, step=8, value=832, label="Img Width")
844
+ img_height_gui = gr.Slider(minimum=64, maximum=4096, step=8, value=1216, label="Img Height")
845
+ seed_gui = gr.Number(minimum=-1, maximum=9999999999, value=-1, label="Seed")
846
+ pag_scale_gui = gr.Slider(minimum=0.0, maximum=10.0, step=0.1, value=0.0, label="PAG Scale")
847
+ with gr.Row():
848
+ clip_skip_gui = gr.Checkbox(value=True, label="Layer 2 Clip Skip")
849
+ free_u_gui = gr.Checkbox(value=True, label="FreeU")
850
+
851
+ with gr.Row(equal_height=False):
852
+ num_images_gui = gr.Slider(minimum=1, maximum=(16 if IS_ZERO_GPU else 20), step=1, value=1, label="Images")
853
+ prompt_syntax_gui = gr.Dropdown(label="Prompt Syntax", choices=PROMPT_W_OPTIONS, value=PROMPT_W_OPTIONS[2][1])
854
+ vae_model_gui = gr.Dropdown(label="VAE Model", choices=vae_model_list, value=vae_model_list[0])
855
+
856
+
857
+ def run_set_params_gui(base_prompt, name_model):
858
+ valid_receptors = { # default values
859
+ "prompt": gr.update(value=base_prompt),
860
+ "neg_prompt": gr.update(value=""),
861
+ "Steps": gr.update(value=30),
862
+ "width": gr.update(value=1024),
863
+ "height": gr.update(value=1024),
864
+ "Seed": gr.update(value=-1),
865
+ "Sampler": gr.update(value="Euler"),
866
+ "CFG scale": gr.update(value=7.), # cfg
867
+ "Clip skip": gr.update(value=True),
868
+ "Model": gr.update(value=name_model),
869
+ "Schedule type": gr.update(value="Automatic"),
870
+ "PAG": gr.update(value=.0),
871
+ "FreeU": gr.update(value=False),
872
+ "Hires upscaler": gr.update(),
873
+ "Hires upscale": gr.update(),
874
+ "Hires steps": gr.update(),
875
+ "Hires denoising strength": gr.update(),
876
+ "Hires CFG": gr.update(),
877
+ "Hires sampler": gr.update(),
878
+ "Hires schedule type": gr.update(),
879
+ "Image resolution": gr.update(value=1024),
880
+ "Strength": gr.update(),
881
+ "Prompt emphasis": gr.update(),
882
+ }
883
+
884
+ # Generate up to 7 LoRAs
885
+ for i in range(1, 8):
886
+ valid_receptors[f"Lora_{i}"] = gr.update()
887
+ valid_receptors[f"Lora_scale_{i}"] = gr.update()
888
+
889
+ valid_keys = list(valid_receptors.keys())
890
+
891
+ parameters = extract_parameters(base_prompt)
892
+ # print(parameters)
893
+
894
+ if "Sampler" in parameters:
895
+ value_sampler = parameters["Sampler"]
896
+ for s_type in SCHEDULE_TYPE_OPTIONS:
897
+ if s_type in value_sampler:
898
+ value_sampler = value_sampler.replace(s_type, "").strip()
899
+ parameters["Sampler"] = value_sampler
900
+ parameters["Schedule type"] = s_type
901
+
902
+ params_lora = []
903
+ if ">" in parameters["prompt"] and "<" in parameters["prompt"]:
904
+ params_lora = re.findall(r'<lora:[^>]+>', parameters["prompt"])
905
+ if "Loras" in parameters:
906
+ params_lora += re.findall(r'<lora:[^>]+>', parameters["Loras"])
907
+
908
+ if params_lora:
909
+ parsed_params = []
910
+ for tag_l in params_lora:
911
+ try:
912
+ inner = tag_l.strip("<>") # remove < >
913
+ _, data_l = inner.split(":", 1) # remove the "lora:" part
914
+ parts_l = data_l.split(":")
915
+
916
+ name_l = parts_l[0]
917
+ weight_l = float(parts_l[1]) if len(parts_l) > 1 else 1.0 # default weight = 1.0
918
+
919
+ parsed_params.append((name_l, weight_l))
920
+ except Exception as e:
921
+ print(f"Error parsing LoRA tag {tag_l}: {e}")
922
+
923
+ new_lora_model_list = get_model_list(DIRECTORY_LORAS)
924
+ new_lora_model_list.insert(0, "None")
925
+
926
+ num_lora = 1
927
+ for parsed_l, parsed_s in parsed_params:
928
+ filtered_loras = [m for m in new_lora_model_list if parsed_l in m]
929
+ if filtered_loras:
930
+ parameters[f"Lora_{num_lora}"] = filtered_loras[0]
931
+ parameters[f"Lora_scale_{num_lora}"] = parsed_s
932
+ num_lora += 1
933
+
934
+ # continue = discard new value
935
+ for key, val in parameters.items():
936
+ # print(val)
937
+ if key in valid_keys:
938
+ try:
939
+ if key == "Sampler":
940
+ if val not in scheduler_names:
941
+ continue
942
+ if key in ["Schedule type", "Hires schedule type"]:
943
+ if val not in SCHEDULE_TYPE_OPTIONS:
944
+ continue
945
+ if key == "Hires sampler":
946
+ if val not in POST_PROCESSING_SAMPLER:
947
+ continue
948
+ if key == "Prompt emphasis":
949
+ if val not in PROMPT_WEIGHT_OPTIONS_PRIORITY:
950
+ continue
951
+ elif key == "Clip skip":
952
+ if "," in str(val):
953
+ val = val.replace(",", "")
954
+ if int(val) >= 2:
955
  val = True
956
+ if key == "prompt":
957
+ if ">" in val and "<" in val:
958
+ val = re.sub(r'<[^>]+>', '', val) # Delete html and loras
959
+ print("Removed LoRA written in the prompt")
960
+ if key in ["prompt", "neg_prompt"]:
961
+ val = re.sub(r'\s+', ' ', re.sub(r',+', ',', val)).strip()
962
+ if key in ["Steps", "width", "height", "Seed", "Hires steps", "Image resolution"]:
963
+ val = int(val)
964
+ if key == "FreeU":
965
+ val = True
966
+ if key in ["CFG scale", "PAG", "Hires upscale", "Hires denoising strength", "Hires CFG", "Strength"]:
967
+ val = float(val)
968
+ if key == "Model":
969
+ filtered_models = [m for m in model_list if val in m]
970
+ if filtered_models:
971
+ val = filtered_models[0]
972
+ else:
973
+ val = name_model
974
+ if key == "Hires upscaler":
975
+ if val not in UPSCALER_KEYS:
976
  continue
977
+ if key == "Seed":
978
+ continue
979
+
980
+ valid_receptors[key] = gr.update(value=val)
981
+ # print(val, type(val))
982
+ # print(valid_receptors)
983
+ except Exception as e:
984
+ print(str(e))
985
+ return [value for value in valid_receptors.values()]
986
+
987
+ def run_clear_prompt_gui():
988
+ return gr.update(value=""), gr.update(value="")
989
+ clear_prompt_gui.click(
990
+ run_clear_prompt_gui, [], [prompt_gui, neg_prompt_gui]
991
+ )
992
 
993
+ def run_set_random_seed():
994
+ return -1
995
+ set_random_seed.click(
996
+ run_set_random_seed, [], seed_gui
997
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
998
 
999
  with gr.Accordion("Hires fix", open=False, visible=True):
1000
 
 
1030
  lora_scale_4_gui = lora_scale_slider("Lora Scale 4")
1031
  lora5_gui = lora_dropdown("Lora5")
1032
  lora_scale_5_gui = lora_scale_slider("Lora Scale 5")
1033
+ lora6_gui = lora_dropdown("Lora6")
1034
+ lora_scale_6_gui = lora_scale_slider("Lora Scale 6")
1035
+ lora7_gui = lora_dropdown("Lora7")
1036
+ lora_scale_7_gui = lora_scale_slider("Lora Scale 7")
1037
 
1038
  with gr.Accordion("From URL", open=False, visible=True):
1039
  text_lora = gr.Textbox(
 
1189
  schedule_prediction_type_gui = gr.Dropdown(label="Discrete Sampling Type", choices=SCHEDULE_PREDICTION_TYPE_OPTIONS, value=SCHEDULE_PREDICTION_TYPE_OPTIONS[0])
1190
  guidance_rescale_gui = gr.Number(label="CFG rescale:", value=0., step=0.01, minimum=0., maximum=1.5)
1191
  save_generated_images_gui = gr.Checkbox(value=True, label="Create a download link for the images")
1192
+ enable_live_preview_gui = gr.Checkbox(value=True, label="Enable live previews")
1193
+ display_images_gui = gr.Checkbox(value=True, label="Show final results")
1194
  filename_pattern_gui = gr.Textbox(label="Filename pattern", value="model,seed", placeholder="model,seed,sampler,schedule_type,img_width,img_height,guidance_scale,num_steps,vae,prompt_section,neg_prompt_section", lines=1)
1195
  hires_before_adetailer_gui = gr.Checkbox(value=False, label="Hires Before Adetailer")
1196
  hires_after_adetailer_gui = gr.Checkbox(value=True, label="Hires After Adetailer")
 
1203
  with gr.Accordion("More settings", open=False, visible=False):
1204
  loop_generation_gui = gr.Slider(minimum=1, value=1, label="Loop Generation")
1205
  retain_task_cache_gui = gr.Checkbox(value=False, label="Retain task model in cache")
1206
+
1207
+ image_previews_gui = gr.Checkbox(value=True, label="Image Previews (alt)")
1208
  retain_compel_previous_load_gui = gr.Checkbox(value=False, label="Retain Compel Previous Load")
1209
  retain_detailfix_model_previous_load_gui = gr.Checkbox(value=False, label="Retain Detailfix Model Previous Load")
1210
  retain_hires_model_previous_load_gui = gr.Checkbox(value=False, label="Retain Hires Model Previous Load")
 
1211
 
1212
  set_params_gui.click(
1213
  run_set_params_gui, [prompt_gui, model_name_gui], [
 
1233
  hires_schedule_type_gui,
1234
  image_resolution_gui,
1235
  strength_gui,
1236
+ prompt_syntax_gui,
1237
  lora1_gui,
1238
  lora_scale_1_gui,
1239
  lora2_gui,
 
1462
  t2i_adapter_preprocessor_gui,
1463
  adapter_conditioning_scale_gui,
1464
  adapter_conditioning_factor_gui,
1465
+ enable_live_preview_gui,
1466
  free_u_gui,
1467
  generator_in_cpu_gui,
1468
  adetailer_inpaint_only_gui,
 
1523
  ssr_mode=args.ssr,
1524
  allowed_paths=[allowed_path],
1525
  show_api=(not HIDE_API),
1526
+ )