cyd0806 commited on
Commit
d3d01be
·
verified ·
1 Parent(s): af37370

Upload src/dataloader.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. src/dataloader.py +98 -0
src/dataloader.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from accelerate.logging import get_logger
2
+ import torch
3
+ import io
4
+ logger = get_logger(__name__)
5
+ from PIL import Image
6
+ from .condition import Condition
7
+ from diffusers.image_processor import VaeImageProcessor
8
+ from datasets import load_dataset, concatenate_datasets
9
+ def get_dataset(args):
10
+ dataset = []
11
+ assert isinstance(args.dataset_name,list),"dataset dir should be a list"
12
+ if args.dataset_name is not None:
13
+ for name in args.dataset_name:
14
+ # Downloading and loading a dataset from the hub.
15
+ dataset.append(load_dataset(name,cache_dir=args.cache_dir,split='train'))
16
+ dataset = concatenate_datasets(dataset)
17
+ return dataset
18
+
19
+ def prepare_dataset(dataset, vae_scale_factor, accelerator, args):
20
+ image_processor = VaeImageProcessor(vae_scale_factor=vae_scale_factor * 2 ,do_resize=True,do_convert_rgb=True)
21
+
22
+ def preprocess_conditions(conditions):
23
+ conditioning_tensors = []
24
+ conditions_types = []
25
+ for cond in conditions:
26
+ conditioning_tensors.append(image_processor.preprocess(cond.condition,width=args.resolution,height=args.resolution).squeeze(0))
27
+ conditions_types.append(cond.condition_type)
28
+ return torch.stack(conditioning_tensors,dim=0),conditions_types
29
+ def preprocess(examples):
30
+ # images = [image_transforms(image) for image in images]
31
+ pixel_values =[]
32
+ condition_latents=[]
33
+ condition_types=[]
34
+ bboxes = []
35
+ for image,bbox,canny,depth in zip(examples[args.image_column],examples[args.bbox_column],examples[args.canny_column],examples[args.depth_column]):
36
+ image = image.convert("RGB") if not isinstance(image, str) else Image.open(image).convert("RGB")
37
+ width, height = image.size
38
+ # 检查宽度是否为偶数,以便可以均匀分割
39
+ if width % 2 != 0:
40
+ raise ValueError("Image width must be even to split into two equal parts.")
41
+ # 分割图像
42
+ left_image = image.crop((0, 0, width // 2, height)) # 左半部分
43
+ right_image = image.crop((width // 2, 0, width, height)) # 右半部分
44
+ # load mask image
45
+ image_width,image_height = image.size
46
+ bbox_pixel = [
47
+ bbox[0] * image_width,
48
+ bbox[1] * image_height,
49
+ bbox[2] * image_width,
50
+ bbox[3] * image_height
51
+ ]
52
+ left = bbox_pixel[0] - bbox_pixel[2] / 2
53
+ top = bbox_pixel[1] - bbox_pixel[3] / 2
54
+ right = bbox_pixel[0] + bbox_pixel[2] / 2
55
+ bottom = bbox_pixel[1] + bbox_pixel[3] / 2
56
+ masked_left_image = left_image.copy()
57
+ masked_left_image.paste((0, 0, 0), (int(left), int(top), int(right), int(bottom)))
58
+ bboxes.append([int(left*args.resolution/(width // 2)), int(top*args.resolution/height), int(right*args.resolution/(width // 2)), int(bottom*args.resolution/height)])
59
+ # 应用转换,将分割后的图像添加到列表中
60
+ pixel_values.append(image_processor.preprocess(left_image,width=args.resolution,height=args.resolution).squeeze(0))
61
+ conditions = []
62
+ for condition_type in args.condition_types:
63
+ if condition_type == "subject":
64
+ conditions.append(Condition("subject", condition = right_image))
65
+ elif condition_type == "canny":
66
+ conditions.append(Condition("canny", condition = Image.open(io.BytesIO(canny['bytes']))))
67
+ elif condition_type == "depth":
68
+ conditions.append(Condition("depth", condition = Image.open(io.BytesIO(depth['bytes']))))
69
+ elif condition_type == "fill":
70
+ conditions.append(Condition("fill", condition = masked_left_image))
71
+ else:
72
+ raise ValueError("Only support for subject, canny, depth, fill")
73
+ cond_tensors, cond_types = preprocess_conditions(conditions)
74
+ condition_latents.append(cond_tensors)
75
+ condition_types.append(cond_types)
76
+ examples["pixel_values"] = pixel_values
77
+ examples["condition_latents"] = condition_latents
78
+ examples["condition_types"] = condition_types
79
+ examples["bbox"]=bboxes
80
+ return examples
81
+
82
+ with accelerator.main_process_first():
83
+ dataset = dataset.with_transform(preprocess)
84
+
85
+ return dataset
86
+
87
+ def collate_fn(examples):
88
+ pixel_values = torch.stack([example["pixel_values"] for example in examples])
89
+ pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
90
+ condition_latents = torch.stack([example["condition_latents"] for example in examples])
91
+ condition_latents = condition_latents.to(memory_format=torch.contiguous_format).float()
92
+ bboxes= [example["bbox"] for example in examples]
93
+ condition_types= [example["condition_types"] for example in examples]
94
+ descriptions = [example["description"]["description_0"] for example in examples]
95
+ items = [example["description"]["item"] for example in examples]
96
+ return {"pixel_values": pixel_values, "condition_latents": condition_latents,
97
+ "condition_types":condition_types,"descriptions": descriptions, "bboxes": bboxes,"items":items}
98
+