yujiepan commited on
Commit
b24dc2a
·
verified ·
1 Parent(s): d9e0511

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer/tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: Diffusers
3
+ pipeline_tag: text-to-image
4
+ inference: true
5
+ base_model:
6
+ - Tongyi-MAI/Z-Image-Turbo
7
+ ---
8
+
9
+ This tiny model is for debugging. It is randomly initialized with the config adapted from [Tongyi-MAI/Z-Image-Turbo](https://huggingface.co/Tongyi-MAI/Z-Image-Turbo).
10
+
11
+ File size:
12
+ - 2.4MB text_encoder/model.safetensors
13
+ - 1.4MB transformer/diffusion_pytorch_model.safetensors
14
+ - 0.5MB vae/diffusion_pytorch_model.safetensors
15
+
16
+ ### Example usage:
17
+
18
+ ```python
19
+ import torch
20
+ from diffusers import ZImagePipeline
21
+
22
+ model_id = "yujiepan/z-image-tiny-random"
23
+ torch_dtype = torch.bfloat16
24
+ device = "cuda"
25
+ pipe = ZImagePipeline.from_pretrained(model_id, torch_dtype=torch_dtype)
26
+ pipe = pipe.to(device)
27
+
28
+ prompt = "Flowers and trees"
29
+ image = pipe(
30
+ prompt=prompt,
31
+ height=1024,
32
+ width=1024,
33
+ num_inference_steps=9, # This actually results in 8 DiT forwards
34
+ guidance_scale=0.0, # Guidance should be 0 for the Turbo models
35
+ generator=torch.Generator("cuda").manual_seed(42),
36
+ ).images[0]
37
+ print(image)
38
+ ```
39
+
40
+ ### Codes to create this repo:
41
+
42
+ ```python
43
+ import json
44
+
45
+ import torch
46
+ from diffusers import (
47
+ AutoencoderKL,
48
+ DiffusionPipeline,
49
+ FlowMatchEulerDiscreteScheduler,
50
+ ZImagePipeline,
51
+ ZImageTransformer2DModel,
52
+ )
53
+ from huggingface_hub import hf_hub_download
54
+ from transformers import AutoConfig, AutoTokenizer, Qwen2Tokenizer, Qwen3Model
55
+ from transformers.generation import GenerationConfig
56
+
57
+ source_model_id = "Tongyi-MAI/Z-Image-Turbo"
58
+ save_folder = "/tmp/yujiepan/z-image-tiny-random"
59
+
60
+ torch.set_default_dtype(torch.bfloat16)
61
+ scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(
62
+ source_model_id, subfolder='scheduler')
63
+ tokenizer = AutoTokenizer.from_pretrained(
64
+ source_model_id, subfolder='tokenizer')
65
+
66
+ def save_json(path, obj):
67
+ import json
68
+ from pathlib import Path
69
+ Path(path).parent.mkdir(parents=True, exist_ok=True)
70
+ with open(path, 'w', encoding='utf-8') as f:
71
+ json.dump(obj, f, indent=2, ensure_ascii=False)
72
+
73
+ def init_weights(model):
74
+ import torch
75
+ torch.manual_seed(42)
76
+ with torch.no_grad():
77
+ for name, p in sorted(model.named_parameters()):
78
+ torch.nn.init.normal_(p, 0, 0.1)
79
+ print(name, p.shape, p.dtype, p.device)
80
+
81
+ with open(hf_hub_download(source_model_id, filename='text_encoder/config.json', repo_type='model'), 'r', encoding='utf - 8') as f:
82
+ config = json.load(f)
83
+ config.update({
84
+ "head_dim": 32,
85
+ 'hidden_size': 8,
86
+ 'intermediate_size': 32,
87
+ 'max_window_layers': 1,
88
+ 'num_attention_heads': 8,
89
+ 'num_hidden_layers': 2,
90
+ 'num_key_value_heads': 4,
91
+ 'tie_word_embeddings': True,
92
+ })
93
+ save_json(f'{save_folder}/text_encoder/config.json', config)
94
+ text_encoder_config = AutoConfig.from_pretrained(
95
+ f'{save_folder}/text_encoder')
96
+ text_encoder = Qwen3Model(text_encoder_config).to(torch.bfloat16)
97
+ generation_config = GenerationConfig.from_pretrained(
98
+ source_model_id, subfolder='text_encoder')
99
+ text_encoder.generation_config = generation_config
100
+ init_weights(text_encoder)
101
+
102
+ with open(hf_hub_download(source_model_id, filename='transformer/config.json', repo_type='model'), 'r', encoding='utf-8') as f:
103
+ config = json.load(f)
104
+ config.update({
105
+ 'dim': 64,
106
+ 'axes_dims': [8, 8, 16],
107
+ 'n_heads': 2,
108
+ 'n_kv_heads': 4,
109
+ 'n_layers': 2,
110
+ 'cap_feat_dim': 8,
111
+ 'in_channels': 8,
112
+ })
113
+ save_json(f'{save_folder}/transformer/config.json', config)
114
+ transformer_config = ZImageTransformer2DModel.load_config(
115
+ f'{save_folder}/transformer')
116
+ transformer = ZImageTransformer2DModel.from_config(
117
+ transformer_config)
118
+ init_weights(transformer)
119
+
120
+ with open(hf_hub_download(source_model_id, filename='vae/config.json', repo_type='model'), 'r', encoding='utf-8') as f:
121
+ config = json.load(f)
122
+ config.update({
123
+ 'layers_per_block': 1,
124
+ 'block_out_channels': [32, 32],
125
+ 'latent_channels': 8,
126
+ 'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
127
+ 'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D']
128
+ })
129
+ save_json(f'{save_folder}/vae/config.json', config)
130
+ vae_config = AutoencoderKL.load_config(f'{save_folder}/vae')
131
+ vae = AutoencoderKL.from_config(vae_config)
132
+ init_weights(vae)
133
+
134
+ pipeline = ZImagePipeline(
135
+ scheduler=scheduler,
136
+ text_encoder=text_encoder,
137
+ tokenizer=tokenizer,
138
+ transformer=transformer,
139
+ vae=vae,
140
+ )
141
+ pipeline = pipeline.to(torch.bfloat16)
142
+ pipeline.save_pretrained(save_folder, safe_serialization=True)
143
+ print(pipeline)
144
+ ```
model_index.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "ZImagePipeline",
3
+ "_diffusers_version": "0.36.0.dev0",
4
+ "scheduler": [
5
+ "diffusers",
6
+ "FlowMatchEulerDiscreteScheduler"
7
+ ],
8
+ "text_encoder": [
9
+ "transformers",
10
+ "Qwen3Model"
11
+ ],
12
+ "tokenizer": [
13
+ "transformers",
14
+ "Qwen2Tokenizer"
15
+ ],
16
+ "transformer": [
17
+ "diffusers",
18
+ "ZImageTransformer2DModel"
19
+ ],
20
+ "vae": [
21
+ "diffusers",
22
+ "AutoencoderKL"
23
+ ]
24
+ }
scheduler/scheduler_config.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "FlowMatchEulerDiscreteScheduler",
3
+ "_diffusers_version": "0.36.0.dev0",
4
+ "base_image_seq_len": 256,
5
+ "base_shift": 0.5,
6
+ "invert_sigmas": false,
7
+ "max_image_seq_len": 4096,
8
+ "max_shift": 1.15,
9
+ "num_train_timesteps": 1000,
10
+ "shift": 3.0,
11
+ "shift_terminal": null,
12
+ "stochastic_sampling": false,
13
+ "time_shift_type": "exponential",
14
+ "use_beta_sigmas": false,
15
+ "use_dynamic_shifting": false,
16
+ "use_exponential_sigmas": false,
17
+ "use_karras_sigmas": false
18
+ }
text_encoder/config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen3Model"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 151643,
8
+ "dtype": "bfloat16",
9
+ "eos_token_id": 151645,
10
+ "head_dim": 32,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 8,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 32,
15
+ "layer_types": [
16
+ "full_attention",
17
+ "full_attention"
18
+ ],
19
+ "max_position_embeddings": 40960,
20
+ "max_window_layers": 1,
21
+ "model_type": "qwen3",
22
+ "num_attention_heads": 8,
23
+ "num_hidden_layers": 2,
24
+ "num_key_value_heads": 4,
25
+ "rms_norm_eps": 1e-06,
26
+ "rope_parameters": {
27
+ "rope_theta": 1000000,
28
+ "rope_type": "default"
29
+ },
30
+ "sliding_window": null,
31
+ "tie_word_embeddings": true,
32
+ "transformers_version": "5.0.0.dev0",
33
+ "use_cache": true,
34
+ "use_sliding_window": false,
35
+ "vocab_size": 151936
36
+ }
text_encoder/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25eeb02baf45bca7a4d6d353ee850b77a57f0f589a8718c5b00b3a48b8d3ea9f
3
+ size 2461344
tokenizer/chat_template.jinja ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0].role == 'system' %}
4
+ {{- messages[0].content + '\n\n' }}
5
+ {%- endif %}
6
+ {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
7
+ {%- for tool in tools %}
8
+ {{- "\n" }}
9
+ {{- tool | tojson }}
10
+ {%- endfor %}
11
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
12
+ {%- else %}
13
+ {%- if messages[0].role == 'system' %}
14
+ {{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
15
+ {%- endif %}
16
+ {%- endif %}
17
+ {%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
18
+ {%- for message in messages[::-1] %}
19
+ {%- set index = (messages|length - 1) - loop.index0 %}
20
+ {%- if ns.multi_step_tool and message.role == "user" and message.content is string and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}
21
+ {%- set ns.multi_step_tool = false %}
22
+ {%- set ns.last_query_index = index %}
23
+ {%- endif %}
24
+ {%- endfor %}
25
+ {%- for message in messages %}
26
+ {%- if message.content is string %}
27
+ {%- set content = message.content %}
28
+ {%- else %}
29
+ {%- set content = '' %}
30
+ {%- endif %}
31
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
32
+ {{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
33
+ {%- elif message.role == "assistant" %}
34
+ {%- set reasoning_content = '' %}
35
+ {%- if message.reasoning_content is string %}
36
+ {%- set reasoning_content = message.reasoning_content %}
37
+ {%- else %}
38
+ {%- if '</think>' in content %}
39
+ {%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
40
+ {%- set content = content.split('</think>')[-1].lstrip('\n') %}
41
+ {%- endif %}
42
+ {%- endif %}
43
+ {%- if loop.index0 > ns.last_query_index %}
44
+ {%- if loop.last or (not loop.last and reasoning_content) %}
45
+ {{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
46
+ {%- else %}
47
+ {{- '<|im_start|>' + message.role + '\n' + content }}
48
+ {%- endif %}
49
+ {%- else %}
50
+ {{- '<|im_start|>' + message.role + '\n' + content }}
51
+ {%- endif %}
52
+ {%- if message.tool_calls %}
53
+ {%- for tool_call in message.tool_calls %}
54
+ {%- if (loop.first and content) or (not loop.first) %}
55
+ {{- '\n' }}
56
+ {%- endif %}
57
+ {%- if tool_call.function %}
58
+ {%- set tool_call = tool_call.function %}
59
+ {%- endif %}
60
+ {{- '<tool_call>\n{"name": "' }}
61
+ {{- tool_call.name }}
62
+ {{- '", "arguments": ' }}
63
+ {%- if tool_call.arguments is string %}
64
+ {{- tool_call.arguments }}
65
+ {%- else %}
66
+ {{- tool_call.arguments | tojson }}
67
+ {%- endif %}
68
+ {{- '}\n</tool_call>' }}
69
+ {%- endfor %}
70
+ {%- endif %}
71
+ {{- '<|im_end|>\n' }}
72
+ {%- elif message.role == "tool" %}
73
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
74
+ {{- '<|im_start|>user' }}
75
+ {%- endif %}
76
+ {{- '\n<tool_response>\n' }}
77
+ {{- content }}
78
+ {{- '\n</tool_response>' }}
79
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
80
+ {{- '<|im_end|>\n' }}
81
+ {%- endif %}
82
+ {%- endif %}
83
+ {%- endfor %}
84
+ {%- if add_generation_prompt %}
85
+ {{- '<|im_start|>assistant\n' }}
86
+ {%- if enable_thinking is defined and enable_thinking is false %}
87
+ {{- '<think>\n\n</think>\n\n' }}
88
+ {%- endif %}
89
+ {%- endif %}
tokenizer/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05d47c87966b4db779200053de490f89936ed529f8ab889244e271630715fcfe
3
+ size 11422638
tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "additional_special_tokens": null,
4
+ "backend": "tokenizers",
5
+ "bos_token": null,
6
+ "clean_up_tokenization_spaces": false,
7
+ "eos_token": "<|im_end|>",
8
+ "errors": "replace",
9
+ "extra_special_tokens": [
10
+ "<|im_start|>",
11
+ "<|im_end|>",
12
+ "<|object_ref_start|>",
13
+ "<|object_ref_end|>",
14
+ "<|box_start|>",
15
+ "<|box_end|>",
16
+ "<|quad_start|>",
17
+ "<|quad_end|>",
18
+ "<|vision_start|>",
19
+ "<|vision_end|>",
20
+ "<|vision_pad|>",
21
+ "<|image_pad|>",
22
+ "<|video_pad|>"
23
+ ],
24
+ "is_local": false,
25
+ "model_max_length": 131072,
26
+ "pad_token": "<|endoftext|>",
27
+ "split_special_tokens": false,
28
+ "tokenizer_class": "Qwen2Tokenizer",
29
+ "unk_token": null
30
+ }
transformer/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "ZImageTransformer2DModel",
3
+ "_diffusers_version": "0.36.0.dev0",
4
+ "all_f_patch_size": [
5
+ 1
6
+ ],
7
+ "all_patch_size": [
8
+ 2
9
+ ],
10
+ "axes_dims": [
11
+ 8,
12
+ 8,
13
+ 16
14
+ ],
15
+ "axes_lens": [
16
+ 1536,
17
+ 512,
18
+ 512
19
+ ],
20
+ "cap_feat_dim": 8,
21
+ "dim": 64,
22
+ "in_channels": 8,
23
+ "n_heads": 2,
24
+ "n_kv_heads": 4,
25
+ "n_layers": 2,
26
+ "n_refiner_layers": 2,
27
+ "norm_eps": 1e-05,
28
+ "qk_norm": true,
29
+ "rope_theta": 256.0,
30
+ "t_scale": 1000.0
31
+ }
transformer/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4edbc5f3277b151aca0d61adfc6f4a8abc583a3835eaa2469a3ee2cc8ba75500
3
+ size 1411000
vae/config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.36.0.dev0",
4
+ "_name_or_path": "flux-dev",
5
+ "act_fn": "silu",
6
+ "block_out_channels": [
7
+ 32,
8
+ 32
9
+ ],
10
+ "down_block_types": [
11
+ "DownEncoderBlock2D",
12
+ "DownEncoderBlock2D"
13
+ ],
14
+ "force_upcast": true,
15
+ "in_channels": 3,
16
+ "latent_channels": 8,
17
+ "latents_mean": null,
18
+ "latents_std": null,
19
+ "layers_per_block": 1,
20
+ "mid_block_add_attention": true,
21
+ "norm_num_groups": 32,
22
+ "out_channels": 3,
23
+ "sample_size": 1024,
24
+ "scaling_factor": 0.3611,
25
+ "shift_factor": 0.1159,
26
+ "up_block_types": [
27
+ "UpDecoderBlock2D",
28
+ "UpDecoderBlock2D"
29
+ ],
30
+ "use_post_quant_conv": false,
31
+ "use_quant_conv": false
32
+ }
vae/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e71547b596ef6dfd8f8d09d61aadc533353269654c9b07b60e33a8c0472161c
3
+ size 456470