File size: 5,947 Bytes
96b5a4c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
import os,sys
import ipdb
current_dir = os.path.dirname(__file__)
sys.path.append(os.path.abspath(os.path.join(current_dir, '..')))
import torch
from src.condition import Condition
from PIL import Image
from src.SubjectGeniusTransformer2DModel import SubjectGeniusTransformer2DModel
from src.SubjectGeniusPipeline import SubjectGeniusPipeline
from accelerate.utils import set_seed
import json
import argparse
import cv2
import numpy as np
from datetime import datetime
weight_dtype = torch.bfloat16
device = torch.device("cuda:0")




def parse_args(input_args=None):
    parser = argparse.ArgumentParser(description="inference script.")
    parser.add_argument("--pretrained_model_name_or_path", type=str,default="/data/ydchen/VLP/SubjectGenius/model/FLUX.1-schnell",)
    parser.add_argument("--transformer",type=str,default="/data/ydchen/VLP/SubjectGenius/model/FLUX.1-schnell/transformer",)
    parser.add_argument("--condition_types", type=str, nargs='+', default=["fill","subject"],)
    parser.add_argument("--denoising_lora",type=str,default="/data/ydchen/VLP/SubjectGenius/model/Subject_genuis/Denoising_LoRA/subject_fill_union",)
    parser.add_argument("--denoising_lora_weight",type=float,default=1.0,)
    parser.add_argument("--condition_lora_dir",type=str,default="/data/ydchen/VLP/SubjectGenius/model/Subject_genuis/Condition_LoRA",)
    parser.add_argument("--work_dir",type=str,default="/data/ydchen/VLP/SubjectGenius/output/inference_result",)
    parser.add_argument("--seed", type=int, default=0)
    parser.add_argument("--resolution",type=int,default=512,)
    parser.add_argument("--canny",type=str,default=None)
    parser.add_argument("--depth",type=str,default=None)
    parser.add_argument("--fill",type=str,default="/data/ydchen/VLP/SubjectGenius/examples/window/background.jpg")
    parser.add_argument("--subject",type=str,default="/data/ydchen/VLP/SubjectGenius/examples/window/subject.jpg")
    parser.add_argument("--json",type=str,default="/data/ydchen/VLP/SubjectGenius/examples/window/1634_rank0_A decorative fabric topper for windows..json")
    parser.add_argument("--prompt",type=str,default=None)
    parser.add_argument("--num",type=int,default=1)
    parser.add_argument("--version",type=str,default="training-free",choices=["training-based","training-free"])

    args = parser.parse_args()
    args.revision = None
    args.variant = None
    args.json = json.load(open(args.json))
    if args.prompt is None:
        args.prompt = args.json['description']
    args.denoising_lora_name = os.path.basename(os.path.normpath(args.denoising_lora))
    return args




if __name__ == "__main__":
    args = parse_args()
    transformer = SubjectGeniusTransformer2DModel.from_pretrained(
            pretrained_model_name_or_path=args.transformer,
    ).to(device = device, dtype=weight_dtype)

    for condition_type in args.condition_types:
        transformer.load_lora_adapter(f"{args.condition_lora_dir}/{condition_type}.safetensors", adapter_name=condition_type)

    pipe = SubjectGeniusPipeline.from_pretrained(
        args.pretrained_model_name_or_path,
        torch_dtype = weight_dtype,
        transformer = None
    )
    pipe.transformer = transformer

    if args.version == "training-based":
        pipe.transformer.load_lora_adapter(args.denoising_lora,adapter_name=args.denoising_lora_name, use_safetensors=True)
        pipe.transformer.set_adapters([i for i in args.condition_types] + [args.denoising_lora_name],[1.0,1.0,args.denoising_lora_weight])
    elif args.version == "training-free":
        pipe.transformer.set_adapters([i for i in args.condition_types])

    pipe = pipe.to(device)

    # load conditions
    # "no_process = True" means there is no need to run the canny or depth extraction or any other preparation for the input conditional images.
    # which means the input conditional images can be used directly.
    conditions = []
    for condition_type in args.condition_types:
        if condition_type == "subject":
            conditions.append(Condition("subject", raw_img=Image.open(args.subject), no_process=True))
        elif condition_type == "canny":
            conditions.append(Condition("canny", raw_img=Image.open(args.canny), no_process=True))
        elif condition_type == "depth":
            conditions.append(Condition("depth", raw_img=Image.open(args.depth), no_process=True))
        elif condition_type == "fill":
            conditions.append(Condition("fill", raw_img=Image.open(args.fill), no_process=True))
        else:
            raise ValueError("Only support for subject, canny, depth, fill so far.")

    # load prompt
    prompt = args.prompt

    if args.seed is not None:
        set_seed(args.seed)

    output_dir = os.path.join(args.work_dir, f"{datetime.now().strftime('%y_%m_%d-%H:%M')}")
    os.makedirs(output_dir, exist_ok=True)

    # generate
    for i in range(args.num):
        result_img = pipe(
            prompt=prompt,
            conditions=conditions,
            height=512,
            width=512,
            num_inference_steps=8,
            max_sequence_length=512,
            model_config = {},
        ).images[0]

        concat_image = Image.new("RGB", (512 + len(args.condition_types) * 512, 512))
        for j, cond_type in enumerate(args.condition_types):
            cond_image = conditions[j].condition
            if cond_type == "fill":
                cond_image = cv2.rectangle(np.array(cond_image), args.json['bbox'][:2], args.json['bbox'][2:], color=(128, 128, 128),thickness=-1)
                cond_image = Image.fromarray(cv2.rectangle(cond_image, args.json['bbox'][:2], args.json['bbox'][2:], color=(255, 215, 0), thickness=2))
            concat_image.paste(cond_image, (j * 512, 0))
        concat_image.paste(result_img, (j * 512 + 512, 0))
        concat_image.save(os.path.join(output_dir, f"{i}_result.jpg"))
        print(f"Done. Output saved at {output_dir}/{i}_result.jpg")