awni commited on
Commit
65d2673
·
verified ·
1 Parent(s): 80b637f

Add files using upload-large-folder tool

Browse files
Files changed (49) hide show
  1. .gitattributes +1 -0
  2. README.md +36 -0
  3. added_tokens.json +28 -0
  4. chat_template.jinja +143 -0
  5. config.json +162 -0
  6. configuration_mimo_v2_flash.py +109 -0
  7. merges.txt +0 -0
  8. model-00001-of-00036.safetensors +3 -0
  9. model-00002-of-00036.safetensors +3 -0
  10. model-00003-of-00036.safetensors +3 -0
  11. model-00004-of-00036.safetensors +3 -0
  12. model-00005-of-00036.safetensors +3 -0
  13. model-00006-of-00036.safetensors +3 -0
  14. model-00007-of-00036.safetensors +3 -0
  15. model-00008-of-00036.safetensors +3 -0
  16. model-00009-of-00036.safetensors +3 -0
  17. model-00010-of-00036.safetensors +3 -0
  18. model-00011-of-00036.safetensors +3 -0
  19. model-00012-of-00036.safetensors +3 -0
  20. model-00013-of-00036.safetensors +3 -0
  21. model-00014-of-00036.safetensors +3 -0
  22. model-00015-of-00036.safetensors +3 -0
  23. model-00016-of-00036.safetensors +3 -0
  24. model-00017-of-00036.safetensors +3 -0
  25. model-00018-of-00036.safetensors +3 -0
  26. model-00019-of-00036.safetensors +3 -0
  27. model-00020-of-00036.safetensors +3 -0
  28. model-00021-of-00036.safetensors +3 -0
  29. model-00022-of-00036.safetensors +3 -0
  30. model-00023-of-00036.safetensors +3 -0
  31. model-00024-of-00036.safetensors +3 -0
  32. model-00025-of-00036.safetensors +3 -0
  33. model-00026-of-00036.safetensors +3 -0
  34. model-00027-of-00036.safetensors +3 -0
  35. model-00028-of-00036.safetensors +3 -0
  36. model-00029-of-00036.safetensors +3 -0
  37. model-00030-of-00036.safetensors +3 -0
  38. model-00031-of-00036.safetensors +3 -0
  39. model-00032-of-00036.safetensors +3 -0
  40. model-00033-of-00036.safetensors +3 -0
  41. model-00034-of-00036.safetensors +3 -0
  42. model-00035-of-00036.safetensors +3 -0
  43. model-00036-of-00036.safetensors +3 -0
  44. model.safetensors.index.json +0 -0
  45. modeling_mimo_v2_flash.py +664 -0
  46. special_tokens_map.json +31 -0
  47. tokenizer.json +3 -0
  48. tokenizer_config.json +239 -0
  49. vocab.json +0 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ base_model: XiaomiMiMo/MiMo-V2-Flash
4
+ library_name: mlx
5
+ tags:
6
+ - mlx
7
+ pipeline_tag: text-generation
8
+ ---
9
+
10
+ # mlx-community/MiMo-V2-Flash-4bit
11
+
12
+ This model [mlx-community/MiMo-V2-Flash-4bit](https://huggingface.co/mlx-community/MiMo-V2-Flash-4bit) was
13
+ converted to MLX format from [XiaomiMiMo/MiMo-V2-Flash](https://huggingface.co/XiaomiMiMo/MiMo-V2-Flash)
14
+ using mlx-lm version **0.29.1**.
15
+
16
+ ## Use with mlx
17
+
18
+ ```bash
19
+ pip install mlx-lm
20
+ ```
21
+
22
+ ```python
23
+ from mlx_lm import load, generate
24
+
25
+ model, tokenizer = load("mlx-community/MiMo-V2-Flash-4bit")
26
+
27
+ prompt = "hello"
28
+
29
+ if tokenizer.chat_template is not None:
30
+ messages = [{"role": "user", "content": prompt}]
31
+ prompt = tokenizer.apply_chat_template(
32
+ messages, add_generation_prompt=True
33
+ )
34
+
35
+ response = generate(model, tokenizer, prompt=prompt, verbose=True)
36
+ ```
added_tokens.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</think>": 151668,
3
+ "</tool_call>": 151658,
4
+ "</tool_response>": 151666,
5
+ "<think>": 151667,
6
+ "<tool_call>": 151657,
7
+ "<tool_response>": 151665,
8
+ "<|box_end|>": 151649,
9
+ "<|box_start|>": 151648,
10
+ "<|endoftext|>": 151643,
11
+ "<|file_sep|>": 151664,
12
+ "<|fim_middle|>": 151660,
13
+ "<|fim_pad|>": 151662,
14
+ "<|fim_prefix|>": 151659,
15
+ "<|fim_suffix|>": 151661,
16
+ "<|im_end|>": 151645,
17
+ "<|im_start|>": 151644,
18
+ "<|image_pad|>": 151655,
19
+ "<|object_ref_end|>": 151647,
20
+ "<|object_ref_start|>": 151646,
21
+ "<|quad_end|>": 151651,
22
+ "<|quad_start|>": 151650,
23
+ "<|repo_name|>": 151663,
24
+ "<|video_pad|>": 151656,
25
+ "<|vision_end|>": 151653,
26
+ "<|vision_pad|>": 151654,
27
+ "<|vision_start|>": 151652
28
+ }
chat_template.jinja ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if not add_generation_prompt is defined -%}
2
+ {%- set add_generation_prompt = false -%}
3
+ {%- endif -%}
4
+ {%- if not enable_thinking is defined -%}
5
+ {%- set enable_thinking = false -%}
6
+ {%- endif -%}
7
+ {%- if not keep_all_reasoning is defined -%}
8
+ {%- set keep_all_reasoning = false -%}
9
+ {%- endif -%}
10
+ {%- macro render_extra_keys(json_dict, handled_keys) -%}
11
+ {%- if json_dict is mapping %}
12
+ {%- for json_key in json_dict if json_key not in handled_keys %}
13
+ {%- if json_dict[json_key] is mapping or (json_dict[json_key] is sequence and json_dict[json_key] is not string) %}
14
+ {{- '\n<' ~ json_key ~ '>' ~ (json_dict[json_key] | tojson | safe) ~ '</' ~ json_key ~ '>' }}
15
+ {%- else %}
16
+ {{-'\n<' ~ json_key ~ '>' ~ (json_dict[json_key] | string) ~ '</' ~ json_key ~ '>' }}
17
+ {%- endif %}
18
+ {%- endfor %}
19
+ {%- endif %}
20
+ {%- endmacro -%}
21
+ {%- if messages[0]["role"] == "system" %}
22
+ {%- set system_message = messages[0]["content"] %}
23
+ {%- set loop_messages = messages[1:] %}
24
+ {%- else %}
25
+ {%- set loop_messages = messages %}
26
+ {%- endif %}
27
+ {%- set ns = namespace(last_user_index=-1) %}
28
+ {%- for m in loop_messages %}
29
+ {%- if m.role == 'user' %}
30
+ {%- set ns.last_user_index = loop.index0 -%}
31
+ {%- endif %}
32
+ {%- endfor %}
33
+ {%- if not tools is defined %}
34
+ {%- set tools = [] %}
35
+ {%- endif %}
36
+ {%- if system_message is defined %}
37
+ {{- "<|im_start|>system\n" + system_message }}
38
+ {%- else %}
39
+ {{- "<|im_start|>system\nYou are MiMo, a helpful AI assistant engineered by Xiaomi." }}
40
+ {%- endif %}
41
+ {%- if tools is iterable and tools | length > 0 %}
42
+ {{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou have access to the following functions:\n\n" }}
43
+ {{- "<tools>" }}
44
+ {%- for tool in tools %}
45
+ {%- if tool.function is defined %}
46
+ {%- set tool = tool.function %}
47
+ {%- endif %}
48
+ {{- "\n<function>\n<name>" ~ tool.name ~ "</name>" }}
49
+ {%- if tool.description is defined %}
50
+ {{- '\n<description>' ~ (tool.description | trim) ~ '</description>' }}
51
+ {%- endif %}
52
+ {{- '\n<parameters>' }}
53
+ {%- if tool.parameters is defined and tool.parameters is mapping and tool.parameters.properties is defined and tool.parameters.properties is mapping %}
54
+ {%- for param_name, param_fields in tool.parameters.properties|items %}
55
+ {{- '\n<parameter>' }}
56
+ {{- '\n<name>' ~ param_name ~ '</name>' }}
57
+ {%- if param_fields.type is defined %}
58
+ {{- '\n<type>' ~ (param_fields.type | string) ~ '</type>' }}
59
+ {%- endif %}
60
+ {%- if param_fields.description is defined %}
61
+ {{- '\n<description>' ~ (param_fields.description | trim) ~ '</description>' }}
62
+ {%- endif %}
63
+ {%- set handled_keys = ['name', 'type', 'description'] %}
64
+ {{- render_extra_keys(param_fields, handled_keys) }}
65
+ {{- '\n</parameter>' }}
66
+ {%- endfor %}
67
+ {%- endif %}
68
+ {%- set handled_keys = ['type', 'properties'] %}
69
+ {{- render_extra_keys(tool.parameters, handled_keys) }}
70
+ {{- '\n</parameters>' }}
71
+ {%- set handled_keys = ['type', 'name', 'description', 'parameters'] %}
72
+ {{- render_extra_keys(tool, handled_keys) }}
73
+ {{- '\n</function>' }}
74
+ {%- endfor %}
75
+ {{- "\n</tools>" }}
76
+ {{- '\n\nFor each function call, output the function name and arguments in the following format:\n<tool_call>\n<function=example_function_name>\n<parameter=example_parameter_1>value_1</parameter>\n<parameter=example_parameter_2>This is the value for the second parameter\nthat can span\nmultiple lines</parameter>\n</function>\n</tool_call>\n\n<IMPORTANT>\n- Function calls MUST follow the specified format: an inner <function=...></function> block must be nested within <tool_call></tool_call> XML tags\n- DO NOT use function calls inside <think></think> tags.\n- The value enclosed between parameter tags is preserved exactly as-is, including newlines and spaces.\n</IMPORTANT>' }}
77
+ {%- endif %}
78
+ {{- '<|im_end|>' }}
79
+ {%- for message in loop_messages %}
80
+ {%- if message.content is string %}
81
+ {%- set content = message.content %}
82
+ {%- else %}
83
+ {%- set content = '' %}
84
+ {%- endif %}
85
+ {%- if message.role == "assistant" %}
86
+ {%- if message.reasoning_content is string %}
87
+ {%- set reasoning_content = message.reasoning_content %}
88
+ {%- else %}
89
+ {%- set reasoning_content = '' %}
90
+ {%- if '</think>' in content %}
91
+ {%- set reasoning_content = content.split('</think>')[0].split('<think>')[-1] %}
92
+ {%- set content = content.split('</think>')[-1] %}
93
+ {%- endif %}
94
+ {%- endif %}
95
+ {%- if (keep_all_reasoning or loop.index0 > ns.last_user_index) and reasoning_content -%}
96
+ {{- '<|im_start|>' + message.role + '\n<think>' + reasoning_content + '</think>' + content }}
97
+ {%- else %}
98
+ {{- '<|im_start|>' + message.role + '\n<think></think>' + content }}
99
+ {%- endif %}
100
+ {%- if message.tool_calls is defined and message.tool_calls is iterable and message.tool_calls | length > 0 %}
101
+ {%- for tool_call in message.tool_calls %}
102
+ {%- if tool_call.function is defined %}
103
+ {%- set tool_call = tool_call.function %}
104
+ {%- endif %}
105
+ {{- '<tool_call>\n<function=' + tool_call.name + '>\n' }}
106
+ {%- if tool_call.arguments is defined %}
107
+ {%- for args_name, args_value in tool_call.arguments|items %}
108
+ {{- '<parameter=' + args_name + '>' }}
109
+ {%- set args_value = args_value | tojson | safe if args_value is mapping or (args_value is sequence and args_value is not string) else args_value | string %}
110
+ {{- args_value }}
111
+ {{- '</parameter>\n' }}
112
+ {%- endfor %}
113
+ {%- endif %}
114
+ {{- '</function>\n</tool_call>' }}
115
+ {%- endfor %}
116
+ {%- endif %}
117
+ {{- '<|im_end|>' }}
118
+ {%- elif message.role == "user" or message.role == "system"%}
119
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' }}
120
+ {%- elif message.role == "tool" %}
121
+ {%- if loop.previtem and loop.previtem.role != "tool" %}
122
+ {{- '<|im_start|>tool\n' }}
123
+ {%- endif %}
124
+ {{- '<tool_response>\n' }}
125
+ {{- message.content }}
126
+ {{- '\n</tool_response>\n' }}
127
+ {%- if not loop.last and loop.nextitem.role != "tool" %}
128
+ {{- '<|im_end|>' }}
129
+ {%- elif loop.last %}
130
+ {{- '<|im_end|>' }}
131
+ {%- endif %}
132
+ {%- else %}
133
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' }}
134
+ {%- endif %}
135
+ {%- endfor %}
136
+ {%- if add_generation_prompt %}
137
+ {{- '<|im_start|>assistant\n' }}
138
+ {%- if not enable_thinking -%}
139
+ {{- '<think></think>' -}}
140
+ {%- else -%}
141
+ {{- '' -}}
142
+ {%- endif -%}
143
+ {%- endif %}
config.json ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_full_attention_sink_bias": false,
3
+ "add_swa_attention_sink_bias": true,
4
+ "architectures": [
5
+ "MiMoV2FlashForCausalLM"
6
+ ],
7
+ "attention_bias": false,
8
+ "attention_chunk_size": 128,
9
+ "attention_dropout": 0.0,
10
+ "attention_value_scale": 0.707,
11
+ "auto_map": {
12
+ "AutoConfig": "configuration_mimo_v2_flash.MiMoV2FlashConfig",
13
+ "AutoModel": "modeling_mimo_v2_flash.MiMoV2FlashModel",
14
+ "AutoModelForCausalLM": "modeling_mimo_v2_flash.MiMoV2FlashForCausalLM"
15
+ },
16
+ "head_dim": 192,
17
+ "hidden_act": "silu",
18
+ "hidden_size": 4096,
19
+ "hybrid_layer_pattern": [
20
+ 0,
21
+ 1,
22
+ 1,
23
+ 1,
24
+ 1,
25
+ 0,
26
+ 1,
27
+ 1,
28
+ 1,
29
+ 1,
30
+ 1,
31
+ 0,
32
+ 1,
33
+ 1,
34
+ 1,
35
+ 1,
36
+ 1,
37
+ 0,
38
+ 1,
39
+ 1,
40
+ 1,
41
+ 1,
42
+ 1,
43
+ 0,
44
+ 1,
45
+ 1,
46
+ 1,
47
+ 1,
48
+ 1,
49
+ 0,
50
+ 1,
51
+ 1,
52
+ 1,
53
+ 1,
54
+ 1,
55
+ 0,
56
+ 1,
57
+ 1,
58
+ 1,
59
+ 1,
60
+ 1,
61
+ 0,
62
+ 1,
63
+ 1,
64
+ 1,
65
+ 1,
66
+ 1,
67
+ 0
68
+ ],
69
+ "initializer_range": 0.02,
70
+ "intermediate_size": 16384,
71
+ "layernorm_epsilon": 1e-05,
72
+ "max_position_embeddings": 262144,
73
+ "model_type": "mimo_v2_flash",
74
+ "moe_intermediate_size": 2048,
75
+ "moe_layer_freq": [
76
+ 0,
77
+ 1,
78
+ 1,
79
+ 1,
80
+ 1,
81
+ 1,
82
+ 1,
83
+ 1,
84
+ 1,
85
+ 1,
86
+ 1,
87
+ 1,
88
+ 1,
89
+ 1,
90
+ 1,
91
+ 1,
92
+ 1,
93
+ 1,
94
+ 1,
95
+ 1,
96
+ 1,
97
+ 1,
98
+ 1,
99
+ 1,
100
+ 1,
101
+ 1,
102
+ 1,
103
+ 1,
104
+ 1,
105
+ 1,
106
+ 1,
107
+ 1,
108
+ 1,
109
+ 1,
110
+ 1,
111
+ 1,
112
+ 1,
113
+ 1,
114
+ 1,
115
+ 1,
116
+ 1,
117
+ 1,
118
+ 1,
119
+ 1,
120
+ 1,
121
+ 1,
122
+ 1,
123
+ 1
124
+ ],
125
+ "n_group": 1,
126
+ "n_routed_experts": 256,
127
+ "n_shared_experts": null,
128
+ "norm_topk_prob": true,
129
+ "num_attention_heads": 64,
130
+ "num_experts_per_tok": 8,
131
+ "num_hidden_layers": 48,
132
+ "num_key_value_heads": 4,
133
+ "partial_rotary_factor": 0.334,
134
+ "quantization": {
135
+ "group_size": 64,
136
+ "bits": 4,
137
+ "mode": "affine"
138
+ },
139
+ "quantization_config": {
140
+ "group_size": 64,
141
+ "bits": 4,
142
+ "mode": "affine"
143
+ },
144
+ "rope_theta": 5000000,
145
+ "routed_scaling_factor": null,
146
+ "scoring_func": "sigmoid",
147
+ "sliding_window": 128,
148
+ "sliding_window_size": 128,
149
+ "swa_head_dim": 192,
150
+ "swa_num_attention_heads": 64,
151
+ "swa_num_key_value_heads": 8,
152
+ "swa_rope_theta": 10000,
153
+ "swa_v_head_dim": 128,
154
+ "tie_word_embeddings": false,
155
+ "topk_group": 1,
156
+ "topk_method": "noaux_tc",
157
+ "torch_dtype": "bfloat16",
158
+ "transformers_version": "4.40.1",
159
+ "use_cache": true,
160
+ "v_head_dim": 128,
161
+ "vocab_size": 152576
162
+ }
configuration_mimo_v2_flash.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ #
3
+ # Copyright 2025 Xiaomi Corporation.
4
+ # Copyright 2025 The HuggingFace Inc. team.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+
18
+ from transformers.configuration_utils import PretrainedConfig
19
+ from transformers.modeling_rope_utils import rope_config_validation
20
+ from transformers.utils import logging
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class MiMoV2FlashConfig(PretrainedConfig):
27
+
28
+ model_type = ""
29
+ keys_to_ignore_at_inference = ["past_key_values"]
30
+
31
+ # Default tensor parallel plan for base model `Hybrid`
32
+ base_model_tp_plan = {
33
+ "layers.*.self_attn.q_proj": "colwise",
34
+ "layers.*.self_attn.k_proj": "colwise",
35
+ "layers.*.self_attn.v_proj": "colwise",
36
+ "layers.*.self_attn.o_proj": "rowwise",
37
+ "layers.*.mlp.gate_proj": "colwise",
38
+ "layers.*.mlp.up_proj": "colwise",
39
+ "layers.*.mlp.down_proj": "rowwise",
40
+ }
41
+ base_model_pp_plan = {
42
+ "embed_tokens": (["input_ids"], ["inputs_embeds"]),
43
+ "layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
44
+ "norm": (["hidden_states"], ["hidden_states"]),
45
+ }
46
+
47
+ attribute_map = {
48
+ "num_local_experts": "n_routed_experts",
49
+ }
50
+
51
+ def __init__(
52
+ self,
53
+ vocab_size=151936,
54
+ hidden_size=4096,
55
+ intermediate_size=22016,
56
+ num_hidden_layers=32,
57
+ num_attention_heads=32,
58
+ num_key_value_heads=32,
59
+ hidden_act="silu",
60
+ max_position_embeddings=32768,
61
+ initializer_range=0.02,
62
+ layernorm_epsilon=1e-6,
63
+ use_cache=True,
64
+ tie_word_embeddings=False,
65
+ rope_theta=10000.0,
66
+ rope_scaling=None,
67
+ attention_dropout=0.0,
68
+ hybrid_block_size=None,
69
+ hybrid_layer_pattern=None,
70
+ partial_rotary_factor=1.0,
71
+ **kwargs,
72
+ ):
73
+ self.vocab_size = vocab_size
74
+ self.max_position_embeddings = max_position_embeddings
75
+ self.hidden_size = hidden_size
76
+ self.intermediate_size = intermediate_size
77
+ self.num_hidden_layers = num_hidden_layers
78
+ self.num_attention_heads = num_attention_heads
79
+
80
+ # for backward compatibility
81
+ if num_key_value_heads is None:
82
+ num_key_value_heads = num_attention_heads
83
+
84
+ self.num_key_value_heads = num_key_value_heads
85
+ self.hidden_act = hidden_act
86
+ self.initializer_range = initializer_range
87
+ self.layernorm_epsilon = layernorm_epsilon
88
+ self.use_cache = use_cache
89
+ self.rope_theta = rope_theta
90
+ self.rope_scaling = rope_scaling
91
+ self.attention_dropout = attention_dropout
92
+
93
+ if hybrid_block_size is not None and hybrid_layer_pattern is None:
94
+ hybrid_layer_pattern = [0 if ((i + 1) % hybrid_block_size == 0) else 1 for i in range(num_hidden_layers)]
95
+ self.hybrid_block_size = hybrid_block_size
96
+ self.hybrid_layer_pattern = hybrid_layer_pattern
97
+
98
+ self.partial_rotary_factor = partial_rotary_factor
99
+
100
+ # Validate the correctness of rotary position embeddings parameters
101
+ # BC: if there is a 'type' field, move it to 'rope_type'.
102
+ if self.rope_scaling is not None and "type" in self.rope_scaling:
103
+ self.rope_scaling["rope_type"] = self.rope_scaling["type"]
104
+ rope_config_validation(self)
105
+
106
+ super().__init__(
107
+ tie_word_embeddings=tie_word_embeddings,
108
+ **kwargs,
109
+ )
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00036.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d64f0bcb4b7298f32944c8c521e3b51f690d904735484216b587c94bd4b189d
3
+ size 5320844044
model-00002-of-00036.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1d37c9d06a7a7318b59354b80bc48c602d6f4ef13eb365f23cb7c4fb36213ab
3
+ size 5021258542
model-00003-of-00036.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28bf1510758e8d0dc8afa7ec07bae1018eb167b118478ee1908c93486bb91a17
3
+ size 4939291946
model-00004-of-00036.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:694c72f72691ec8bb30f9fe326cf244c2b51ee5d88d7ddcae4554ce2aa3810fa
3
+ size 4887040562
model-00005-of-00036.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d39002147ac3d26c8bf3de27f93f9ef6c7e7cce5e5da5e448c6e6bd8e957648
3
+ size 4887040556
model-00006-of-00036.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60e345155b2f1c2e1f2fa07cc4b6fc4ede3780b4570a3ed187b3b3f42b7d938b
3
+ size 4942241318
model-00007-of-00036.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b3f8b70ab9850091df54d84c132b37eb3729927a35c7ef7a43fa44fd6508dd3
3
+ size 4887040578
model-00008-of-00036.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29813fbc5aa8a1bf2b8ba9d838372ef1127a14614205f1f2d7307c4d0b0b715b
3
+ size 4884091214
model-00009-of-00036.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a2ee12fd043b764e67f7546d19ab1f49118c2cffb9ee9b06d1db55ef34c5639
3
+ size 4942241362
model-00010-of-00036.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b570d03d51a3029194f68c1254e4302c414554cec08b0213313556292b8c7766
3
+ size 4887040591
model-00011-of-00036.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f004f6c2e1b3350e02d1ba61c8fd81f677c2185651dd6317a994e78a9912eb58
3
+ size 4887040581
model-00012-of-00036.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a78cd0ae7f019c3daad0b914f59907b8f006dc869350f49f327185e8503c15f6
3
+ size 4939292009
model-00013-of-00036.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b07c1a1418e15249229ccc0382f4350344187237903214d703d5904690e6712
3
+ size 4887040595
model-00014-of-00036.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6818bddfa4f3be83a68f4a1f1313a77e5c3fd8af0cb0585bd2d15815f7c665e
3
+ size 4887040585
model-00015-of-00036.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49ff9c946249d1254a7110c441921403641274586e0c2abd3b0f947177989d25
3
+ size 4942241360
model-00016-of-00036.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b14d65687983dbc9e77b48307a3bc38f15ba82997c53ae03a523e354a3fb647a
3
+ size 4887040595
model-00017-of-00036.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efc0faa5a26504e03e27e0f456d3d006b23aae1c85db9e9bd5c0beee131470fe
3
+ size 4884091214
model-00018-of-00036.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3354229f006678f4cd87a70ba370150c522e710945beb4afb4b18a1065153a1d
3
+ size 4942241390
model-00019-of-00036.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f2eae97e471002be024024ceb43e53e2f88c157547b4b37893d96a3061c05c6
3
+ size 4887040575
model-00020-of-00036.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03536c8a12e011a5768b0e77e5ba6364f4a2c34b189400f7dd5e2988c5179038
3
+ size 4887040585
model-00021-of-00036.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49b3da487ea0880fd36f6876e114a8c6ece57fb2b929c63596603e55dbdc3eb3
3
+ size 4939292019
model-00022-of-00036.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40e42dc7fdff86ae979a881e22f7b98435221c9ad426f9814544b717301fda07
3
+ size 4887040541
model-00023-of-00036.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af4e74e00c4bcc22ceae21fdd0b1c74c39c7efa2479eac3268f32d195ba36970
3
+ size 4887040583
model-00024-of-00036.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6a6858bf57bcf4364d6e5682664f09e7b8dd76a2135b3f9ac3117df64bd9e4d
3
+ size 4942241362
model-00025-of-00036.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7dc54db351e74289a4ae52738da5ca9419c3bc023cd48952777d47c85b638804
3
+ size 4887040561
model-00026-of-00036.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d46004e324c10b0a6df64c2eae423ff1041aa4e50900996bd1689b38b3b0e4ac
3
+ size 4884091212
model-00027-of-00036.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0529a15c9ee534272580a19fc2c567429c27ccffbf04396eb14f24eebd3b600
3
+ size 4942241388
model-00028-of-00036.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bef9ae9bf04ccd2433572a01dd379546030742d11f7edc9ba66d241b8834037f
3
+ size 4887040595
model-00029-of-00036.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f744f9bc6cac550baa60eacca815c7021e8760b80b8d7af186c48d2ece279b61
3
+ size 4887040589
model-00030-of-00036.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f008609f40434c3ee39a49e23359e5b48bd7afa04faf5a2d4e9009bd1a2c3351
3
+ size 4939292007
model-00031-of-00036.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:263d57aa64b265a0051a78705ace72c5043eb41afc09efc7d67101713460459f
3
+ size 4887040591
model-00032-of-00036.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d64e6236ad39d37c0313b9bccf53bfc1da5ede5c4ca28e90f12c4399894e68c9
3
+ size 4887040565
model-00033-of-00036.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:488df95959aa06b1f18e31b67d48ba65b218d7907627e38f275a093e13524491
3
+ size 4942241378
model-00034-of-00036.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16a05cb9d6d166cce6293634fd4f6c7c43548cfecf711b7cc4832fc3ec198c33
3
+ size 4887040595
model-00035-of-00036.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23e555dd9cf2235b36c1ea4bf3ba5089c18ce6c5fac7d54860cf8af993e7d3b9
3
+ size 4884091208
model-00036-of-00036.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d097dce67a4787dc01546642be0712058dccc894ac337e8c19aecca2b1f83e4a
3
+ size 1561618630
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
modeling_mimo_v2_flash.py ADDED
@@ -0,0 +1,664 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ #
3
+ # Copyright 2025 Xiaomi Corporation.
4
+ # Copyright 2025 The HuggingFace Inc. team.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+
18
+ from typing import Callable, Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.nn as nn
22
+ import torch.nn.functional as F
23
+
24
+ from transformers.generation import GenerationMixin
25
+ from transformers.activations import ACT2FN
26
+ from transformers.cache_utils import Cache, DynamicCache
27
+ from transformers.integrations import use_kernel_forward_from_hub
28
+
29
+ from transformers.modeling_outputs import (
30
+ BaseModelOutputWithPast,
31
+ CausalLMOutputWithPast,
32
+ )
33
+
34
+ from transformers.masking_utils import create_causal_mask, create_sliding_window_causal_mask
35
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
36
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
37
+ from transformers.processing_utils import Unpack
38
+ from transformers.utils import (
39
+ logging,
40
+ )
41
+
42
+ from transformers.modeling_outputs import MoeModelOutputWithPast
43
+ from transformers.utils import TransformersKwargs, auto_docstring, can_return_tuple
44
+ from .configuration_mimo_v2_flash import MiMoV2FlashConfig
45
+
46
+ logger = logging.get_logger(__name__)
47
+
48
+
49
+ def rotate_half(x):
50
+ """Rotates half the hidden dims of the input."""
51
+ x1 = x[..., : x.shape[-1] // 2]
52
+ x2 = x[..., x.shape[-1] // 2:]
53
+ return torch.cat((-x2, x1), dim=-1)
54
+
55
+
56
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
57
+ """Applies Rotary Position Embedding to the query and key tensors.
58
+
59
+ Args:
60
+ q (`torch.Tensor`): The query tensor.
61
+ k (`torch.Tensor`): The key tensor.
62
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
63
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
64
+ position_ids (`torch.Tensor`, *optional*):
65
+ Deprecated and unused.
66
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
67
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
68
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
69
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
70
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
71
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
72
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
73
+ Returns:
74
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
75
+ """
76
+ cos = cos.unsqueeze(unsqueeze_dim)
77
+ sin = sin.unsqueeze(unsqueeze_dim)
78
+ q_embed = (q * cos) + (rotate_half(q) * sin)
79
+ k_embed = (k * cos) + (rotate_half(k) * sin)
80
+ return q_embed, k_embed
81
+
82
+
83
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
84
+ """
85
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
86
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
87
+ """
88
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
89
+ if n_rep == 1:
90
+ return hidden_states
91
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
92
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
93
+
94
+
95
+ def eager_attention_forward(
96
+ module: nn.Module,
97
+ query: torch.Tensor,
98
+ key: torch.Tensor,
99
+ value: torch.Tensor,
100
+ attention_mask: Optional[torch.Tensor],
101
+ scaling: float,
102
+ dropout: float = 0.0,
103
+ sinks: Optional[torch.Tensor] = None,
104
+ ):
105
+ key_states = repeat_kv(key, module.num_key_value_groups)
106
+ value_states = repeat_kv(value, module.num_key_value_groups)
107
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
108
+ if attention_mask is not None:
109
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
110
+ attn_weights = attn_weights + causal_mask
111
+
112
+ if sinks is not None:
113
+ sinks = module.attention_sink_bias.reshape(1, -1, 1, 1).expand(query.shape[0], -1, query.shape[-2], -1)
114
+ attn_weights = torch.cat([attn_weights, sinks], dim=-1)
115
+
116
+ attn_weights = attn_weights - attn_weights.max(dim=-1, keepdim=True).values
117
+ probs = F.softmax(attn_weights, dim=-1, dtype=attn_weights.dtype)
118
+
119
+ if sinks is not None:
120
+ probs = probs[..., :-1] # we drop the sink here
121
+
122
+ attn_weights = nn.functional.dropout(probs, p=dropout, training=module.training)
123
+ attn_output = torch.matmul(attn_weights, value_states)
124
+ attn_output = attn_output.transpose(1, 2).contiguous()
125
+ return attn_output, attn_weights
126
+
127
+
128
+ @use_kernel_forward_from_hub("RMSNorm")
129
+ class MiMoV2RMSNorm(nn.Module):
130
+ def __init__(self, hidden_size, eps=1e-6):
131
+ """
132
+ MiMoV2RMSNorm is equivalent to T5LayerNorm
133
+ """
134
+ super().__init__()
135
+ self.weight = nn.Parameter(torch.ones(hidden_size))
136
+ self.variance_epsilon = eps
137
+
138
+ def forward(self, hidden_states):
139
+ input_dtype = hidden_states.dtype
140
+ hidden_states = hidden_states.to(torch.float32)
141
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
142
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
143
+ return self.weight * hidden_states.to(input_dtype)
144
+
145
+
146
+ class MiMoV2MLP(nn.Module):
147
+ """MiMoV2MLP matching the gate, up, and down projection layers."""
148
+
149
+ def __init__(self, config: MiMoV2FlashConfig, intermediate_size=None):
150
+ super().__init__()
151
+ self.config = config
152
+ self.hidden_size = config.hidden_size
153
+ self.intermediate_size = config.intermediate_size if intermediate_size is None else intermediate_size
154
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
155
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
156
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
157
+ self.act_fn = ACT2FN[config.hidden_act]
158
+
159
+ def forward(self, hidden_states):
160
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(hidden_states)) * self.up_proj(hidden_states))
161
+ return down_proj
162
+
163
+
164
+ class MiMoV2MoEGate(nn.Module):
165
+ def __init__(self, config):
166
+ super().__init__()
167
+ self.config = config
168
+ self.top_k = config.num_experts_per_tok
169
+ self.n_routed_experts = config.n_routed_experts
170
+ self.routed_scaling_factor = (
171
+ config.routed_scaling_factor
172
+ if config.routed_scaling_factor is not None
173
+ else 1.0
174
+ )
175
+ self.scoring_func = config.scoring_func
176
+ self.topk_method = config.topk_method
177
+ self.n_group = config.n_group
178
+ self.topk_group = config.topk_group
179
+
180
+ # topk selection algorithm
181
+ self.norm_topk_prob = config.norm_topk_prob
182
+ self.gating_dim = config.hidden_size
183
+ self.weight = nn.Parameter(
184
+ torch.empty((self.n_routed_experts, self.gating_dim))
185
+ )
186
+ if self.topk_method == "noaux_tc":
187
+ self.e_score_correction_bias = nn.Parameter(
188
+ torch.empty((self.n_routed_experts))
189
+ )
190
+
191
+ def forward(self, hidden_states):
192
+ bsz, seq_len, h = hidden_states.shape
193
+ ### compute gating score
194
+ hidden_states = hidden_states.view(-1, h)
195
+ logits = F.linear(
196
+ hidden_states.type(torch.float32), self.weight.type(torch.float32), None
197
+ )
198
+ if self.scoring_func == "sigmoid":
199
+ scores = logits.sigmoid()
200
+ else:
201
+ raise NotImplementedError(
202
+ f"insupportable scoring function for MoE gating: {self.scoring_func}"
203
+ )
204
+
205
+ ### select top-k experts
206
+ if self.topk_method == "noaux_tc":
207
+ assert not self.training
208
+ scores_for_choice = scores.view(bsz * seq_len, -1) + self.e_score_correction_bias.unsqueeze(0)
209
+ group_scores = (
210
+ scores_for_choice.view(bsz * seq_len, self.n_group, -1).topk(2, dim=-1)[0].sum(dim = -1)
211
+ ) # [n, n_group]
212
+ group_idx = torch.topk(
213
+ group_scores, k=self.topk_group, dim=-1, sorted=False
214
+ )[
215
+ 1
216
+ ] # [n, top_k_group]
217
+ group_mask = torch.zeros_like(group_scores) # [n, n_group]
218
+ group_mask.scatter_(1, group_idx, 1) # [n, n_group]
219
+ score_mask = (
220
+ group_mask.unsqueeze(-1)
221
+ .expand(
222
+ bsz * seq_len, self.n_group, self.n_routed_experts // self.n_group
223
+ )
224
+ .reshape(bsz * seq_len, -1)
225
+ ) # [n, e]
226
+ tmp_scores = scores_for_choice.masked_fill(~score_mask.bool(), float("-inf")) # [n, e]
227
+ _, topk_idx = torch.topk(
228
+ tmp_scores, k=self.top_k, dim=-1, sorted=False
229
+ )
230
+ topk_weight = scores.gather(1, topk_idx)
231
+ else:
232
+ raise NotImplementedError(
233
+ f"insupportable TopK function for MoE gating: {self.topk_method}"
234
+ )
235
+
236
+ ### norm gate to sum 1
237
+ if self.top_k > 1 and self.norm_topk_prob:
238
+ denominator = topk_weight.sum(dim=-1, keepdim=True) + 1e-20
239
+ topk_weight = topk_weight / denominator
240
+ topk_weight = topk_weight * self.routed_scaling_factor # must multiply the scaling factor
241
+
242
+ return topk_idx, topk_weight
243
+
244
+
245
+ class MiMoV2MoE(nn.Module):
246
+ """
247
+ A mixed expert module containing shared experts.
248
+ """
249
+
250
+ def __init__(self, config):
251
+ super().__init__()
252
+ self.config = config
253
+ self.experts = nn.ModuleList(
254
+ [
255
+ MiMoV2MLP(config, intermediate_size=config.moe_intermediate_size)
256
+ for _ in range(config.n_routed_experts)
257
+ ]
258
+ )
259
+ self.gate = MiMoV2MoEGate(config)
260
+
261
+ def moe(self, hidden_states: torch.Tensor, topk_indices: torch.Tensor, topk_weights: torch.Tensor):
262
+ r"""
263
+ CALL FOR CONTRIBUTION! I don't have time to optimise this right now, but expert weights need to be fused
264
+ to not have to do a loop here (deepseek has 256 experts soooo yeah).
265
+ """
266
+ final_hidden_states = torch.zeros_like(hidden_states, dtype=topk_weights.dtype)
267
+ expert_mask = torch.nn.functional.one_hot(topk_indices, num_classes=len(self.experts))
268
+ expert_mask = expert_mask.permute(2, 0, 1)
269
+
270
+ for expert_idx in range(len(self.experts)):
271
+ expert = self.experts[expert_idx]
272
+ mask = expert_mask[expert_idx]
273
+ token_indices, weight_indices = torch.where(mask)
274
+
275
+ if token_indices.numel() > 0:
276
+ expert_weights = topk_weights[token_indices, weight_indices]
277
+ expert_input = hidden_states[token_indices]
278
+ expert_output = expert(expert_input)
279
+ weighted_output = expert_output * expert_weights.unsqueeze(-1)
280
+ final_hidden_states.index_add_(0, token_indices, weighted_output)
281
+
282
+ # in original deepseek, the output of the experts are gathered once we leave this module
283
+ # thus the moe module is itelsf an IsolatedParallel module
284
+ # and all expert are "local" meaning we shard but we don't gather
285
+ return final_hidden_states.type(hidden_states.dtype)
286
+
287
+
288
+ def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
289
+ orig_shape = hidden_states.shape
290
+ topk_indices, topk_weights = self.gate(hidden_states)
291
+ hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
292
+ hidden_states = self.moe(hidden_states, topk_indices, topk_weights).view(*orig_shape)
293
+
294
+ return hidden_states
295
+
296
+
297
+ class MiMoV2Attention(nn.Module):
298
+ """MiMoV2 Global Attention (pattern == 0) and Sliding Window Attention (pattern == 1)."""
299
+
300
+ def __init__(self, config: MiMoV2FlashConfig, is_swa: bool, layer_idx: int):
301
+ super().__init__()
302
+ self.config = config
303
+ self.layer_idx = layer_idx
304
+
305
+ if is_swa:
306
+ self.head_dim = config.swa_head_dim
307
+ self.v_head_dim = config.swa_v_head_dim
308
+ self.num_attention_heads = config.swa_num_attention_heads
309
+ self.num_key_value_heads = config.swa_num_key_value_heads
310
+ else:
311
+ self.head_dim = config.head_dim
312
+ self.v_head_dim = config.v_head_dim
313
+ self.num_attention_heads = config.num_attention_heads
314
+ self.num_key_value_heads = config.num_key_value_heads
315
+
316
+ self.rope_dim = int(self.head_dim * config.partial_rotary_factor)
317
+ self.num_key_value_groups = self.num_attention_heads // self.num_key_value_heads
318
+ self.attention_bias = config.attention_bias
319
+ self.attention_dropout: float = config.attention_dropout
320
+ self.scaling = self.head_dim ** -0.5
321
+
322
+ # These dimensions are for the attention layers
323
+ q_hidden_size = self.num_attention_heads * self.head_dim
324
+ k_hidden_size = self.num_key_value_heads * self.head_dim
325
+ v_hidden_size = self.num_key_value_heads * self.v_head_dim
326
+ o_hidden_size = self.num_attention_heads * self.v_head_dim
327
+
328
+ self.q_proj = nn.Linear(config.hidden_size, q_hidden_size, bias=self.attention_bias)
329
+ self.k_proj = nn.Linear(config.hidden_size, k_hidden_size, bias=self.attention_bias)
330
+ self.v_proj = nn.Linear(config.hidden_size, v_hidden_size, bias=self.attention_bias)
331
+ self.o_proj = nn.Linear(o_hidden_size, config.hidden_size, bias=False)
332
+
333
+ self.attention_sink_bias = (
334
+ torch.nn.Parameter(torch.empty(config.num_attention_heads), requires_grad=False)
335
+ if (config.add_full_attention_sink_bias and not is_swa) or (config.add_swa_attention_sink_bias and is_swa)
336
+ else None
337
+ )
338
+
339
+ def forward(
340
+ self,
341
+ hidden_states: torch.Tensor,
342
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
343
+ attention_mask: Optional[torch.Tensor],
344
+ past_key_values: Optional[Cache] = None,
345
+ cache_position: Optional[torch.LongTensor] = None,
346
+ position_ids: Optional[torch.LongTensor] = None,
347
+ **kwargs: Unpack[TransformersKwargs],
348
+ ) -> tuple[torch.Tensor, torch.Tensor]:
349
+ input_shape = hidden_states.shape[:-1]
350
+ qk_hidden_shape = (*input_shape, -1, self.head_dim)
351
+ v_hidden_shape = (*input_shape, -1, self.v_head_dim)
352
+
353
+ query_states = self.q_proj(hidden_states).view(qk_hidden_shape).transpose(1, 2)
354
+ key_states = self.k_proj(hidden_states).view(qk_hidden_shape).transpose(1, 2)
355
+ value_states = self.v_proj(hidden_states).view(v_hidden_shape).transpose(1, 2)
356
+
357
+ cos, sin = position_embeddings
358
+
359
+ query_rope, query_nope = query_states.split([self.rope_dim, self.head_dim - self.rope_dim], dim=-1)
360
+ key_rope, key_nope = key_states.split([self.rope_dim, self.head_dim - self.rope_dim], dim=-1)
361
+
362
+ query_rope, key_rope = apply_rotary_pos_emb(query_rope, key_rope, cos, sin)
363
+
364
+ query_states = torch.cat([query_rope, query_nope], dim=-1)
365
+ key_states = torch.cat([key_rope, key_nope], dim=-1)
366
+
367
+ if past_key_values is not None:
368
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
369
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
370
+ key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
371
+
372
+ attention_interface: Callable = eager_attention_forward
373
+ if self.config._attn_implementation != "eager":
374
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
375
+
376
+ attn_output, attn_weights = attention_interface(
377
+ self,
378
+ query_states,
379
+ key_states,
380
+ value_states,
381
+ attention_mask,
382
+ dropout=0.0 if not self.training else self.attention_dropout,
383
+ scaling=self.scaling,
384
+ position_ids=position_ids,
385
+ sinks=self.attention_sink_bias,
386
+ )
387
+
388
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
389
+ attn_output = self.o_proj(attn_output)
390
+ return attn_output, attn_weights
391
+
392
+
393
+ class MiMoV2DecoderLayer(nn.Module):
394
+ """
395
+ MiMoV2 Decoder Layer. It dynamically chooses the correct attention
396
+ module based on the layer index and the `hybrid_layer_pattern`.
397
+ """
398
+
399
+ def __init__(self, config: MiMoV2FlashConfig, layer_idx: int):
400
+ super().__init__()
401
+
402
+ # This is the key logic: choose the module based on the pattern
403
+ is_swa_layer = config.hybrid_layer_pattern[layer_idx] == 1
404
+ if is_swa_layer:
405
+ self.attention_type = "sliding_window_attention"
406
+ self.self_attn = MiMoV2Attention(config, True, layer_idx)
407
+ else:
408
+ self.attention_type = "full_attention"
409
+ self.self_attn = MiMoV2Attention(config, False, layer_idx)
410
+
411
+ self.mlp = (
412
+ MiMoV2MoE(config)
413
+ if (
414
+ getattr(config, 'n_routed_experts', None) is not None
415
+ and config.moe_layer_freq[layer_idx]
416
+ )
417
+ else MiMoV2MLP(config)
418
+ )
419
+
420
+ self.input_layernorm = MiMoV2RMSNorm(config.hidden_size, eps=config.layernorm_epsilon)
421
+ self.post_attention_layernorm = MiMoV2RMSNorm(config.hidden_size, eps=config.layernorm_epsilon)
422
+ self.hidden_size = config.hidden_size
423
+
424
+ def forward(
425
+ self,
426
+ hidden_states: torch.Tensor,
427
+ attention_mask: Optional[torch.Tensor] = None,
428
+ position_ids: Optional[torch.LongTensor] = None,
429
+ past_key_values: Optional[Cache] = None,
430
+ use_cache: Optional[bool] = False,
431
+ cache_position: Optional[torch.LongTensor] = None,
432
+ position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
433
+ **kwargs: Unpack[TransformersKwargs],
434
+ ) -> torch.Tensor:
435
+ residual = hidden_states
436
+ hidden_states = self.input_layernorm(hidden_states)
437
+ # Self Attention
438
+ hidden_states, _ = self.self_attn(
439
+ hidden_states=hidden_states,
440
+ attention_mask=attention_mask,
441
+ position_ids=position_ids,
442
+ past_key_values=past_key_values,
443
+ use_cache=use_cache,
444
+ cache_position=cache_position,
445
+ position_embeddings=position_embeddings,
446
+ **kwargs,
447
+ )
448
+ hidden_states = residual + hidden_states
449
+
450
+ # MLP or MOE
451
+ residual = hidden_states
452
+ hidden_states = self.post_attention_layernorm(hidden_states)
453
+ hidden_states = self.mlp(hidden_states)
454
+ hidden_states = residual + hidden_states
455
+ return hidden_states
456
+
457
+ class MiMoV2FlashRotaryEmbedding(nn.Module):
458
+ inv_freq: torch.Tensor # fix linting for `register_buffer`
459
+
460
+ def __init__(self, config: MiMoV2FlashConfig, is_swa, device=None):
461
+ super().__init__()
462
+ # BC: "rope_type" was originally "type"
463
+ if hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict):
464
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
465
+ else:
466
+ self.rope_type = "default"
467
+ self.max_seq_len_cached = config.max_position_embeddings
468
+ self.original_max_seq_len = config.max_position_embeddings
469
+
470
+ self.config = config
471
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
472
+
473
+ if is_swa:
474
+ self.config.rope_theta = config.swa_rope_theta
475
+ self.config.head_dim = config.swa_head_dim
476
+
477
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
478
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
479
+ self.original_inv_freq = self.inv_freq
480
+
481
+ @torch.no_grad()
482
+ @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
483
+ def forward(self, x, position_ids):
484
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
485
+ position_ids_expanded = position_ids[:, None, :].float()
486
+
487
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
488
+ with torch.autocast(device_type=device_type, enabled=False): # Force float32
489
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
490
+ emb = torch.cat((freqs, freqs), dim=-1)
491
+ cos = emb.cos() * self.attention_scaling
492
+ sin = emb.sin() * self.attention_scaling
493
+
494
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
495
+
496
+
497
+ @auto_docstring
498
+ class MiMoV2Model(PreTrainedModel):
499
+ """The main 'model' block, corresponding to `model.` in the weight map."""
500
+ config_class = MiMoV2FlashConfig
501
+
502
+ def __init__(self, config: MiMoV2FlashConfig):
503
+ super().__init__(config)
504
+ self.vocab_size = config.vocab_size
505
+
506
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size)
507
+ self.layers = nn.ModuleList(
508
+ [MiMoV2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
509
+ )
510
+ self.norm = MiMoV2RMSNorm(config.hidden_size, eps=config.layernorm_epsilon)
511
+ self.rotary_emb = MiMoV2FlashRotaryEmbedding(config=config, is_swa=False)
512
+ self.swa_rotary_emb = MiMoV2FlashRotaryEmbedding(config=config, is_swa=True)
513
+
514
+ self.has_sliding_layers = any(
515
+ pattern == 1 for pattern in config.hybrid_layer_pattern
516
+ )
517
+
518
+ # For Huggingface DynamicCache compatibility
519
+ self.config.layer_types = [
520
+ "sliding_attention" if config.hybrid_layer_pattern[i] == 1 else "full_attention"
521
+ for i in range(config.num_hidden_layers)
522
+ ]
523
+
524
+ @auto_docstring
525
+ def forward(
526
+ self,
527
+ input_ids: Optional[torch.LongTensor] = None,
528
+ attention_mask: Optional[torch.Tensor] = None,
529
+ position_ids: Optional[torch.LongTensor] = None,
530
+ past_key_values: Optional[Cache] = None,
531
+ inputs_embeds: Optional[torch.FloatTensor] = None,
532
+ use_cache: Optional[bool] = None,
533
+ cache_position: Optional[torch.LongTensor] = None,
534
+ **kwargs: Unpack[TransformersKwargs],
535
+ ) -> MoeModelOutputWithPast:
536
+ if (input_ids is None) ^ (inputs_embeds is not None):
537
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
538
+
539
+ if inputs_embeds is None:
540
+ inputs_embeds = self.embed_tokens(input_ids)
541
+
542
+ if use_cache and past_key_values is None:
543
+ past_key_values = DynamicCache(config=self.config)
544
+
545
+ if cache_position is None:
546
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
547
+ cache_position = torch.arange(
548
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
549
+ )
550
+
551
+ if position_ids is None:
552
+ position_ids = cache_position.unsqueeze(0)
553
+
554
+ # It may already have been prepared by e.g. `generate`
555
+ if not isinstance(causal_mask_mapping := attention_mask, dict):
556
+ # Prepare mask arguments
557
+ mask_kwargs = {
558
+ "config": self.config,
559
+ "input_embeds": inputs_embeds,
560
+ "attention_mask": attention_mask,
561
+ "cache_position": cache_position,
562
+ "past_key_values": past_key_values,
563
+ "position_ids": position_ids,
564
+ }
565
+ # Create the masks
566
+ causal_mask_mapping = {
567
+ "full_attention": create_causal_mask(**mask_kwargs),
568
+ }
569
+ # The sliding window alternating layers are not always activated depending on the config
570
+ if self.has_sliding_layers:
571
+ causal_mask_mapping["sliding_window_attention"] = create_sliding_window_causal_mask(**mask_kwargs)
572
+
573
+ hidden_states = inputs_embeds
574
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
575
+ swa_position_embeddings = self.swa_rotary_emb(hidden_states, position_ids)
576
+
577
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
578
+ hidden_states = decoder_layer(
579
+ hidden_states,
580
+ attention_mask=causal_mask_mapping[decoder_layer.attention_type],
581
+ position_embeddings=(
582
+ position_embeddings
583
+ if decoder_layer.attention_type == "full_attention"
584
+ else swa_position_embeddings
585
+ ),
586
+ position_ids=position_ids,
587
+ past_key_values=past_key_values,
588
+ use_cache=use_cache,
589
+ cache_position=cache_position,
590
+ **kwargs,
591
+ )
592
+
593
+ hidden_states = self.norm(hidden_states)
594
+ return BaseModelOutputWithPast(
595
+ last_hidden_state=hidden_states,
596
+ past_key_values=past_key_values if use_cache else None,
597
+ )
598
+
599
+
600
+ @auto_docstring
601
+ class MiMoV2FlashForCausalLM(PreTrainedModel,GenerationMixin):
602
+ _tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
603
+ _tp_plan = {"lm_head": "colwise_rep"}
604
+ _pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
605
+
606
+ config_class = MiMoV2FlashConfig
607
+ _keys_to_ignore_on_load_unexpected = [r"model.layers\.\d+\.self_attn\.rotary_emb\.inv_freq"]
608
+
609
+ def __init__(self, config: MiMoV2FlashConfig):
610
+ super().__init__(config)
611
+ self.model = MiMoV2Model(config)
612
+ self.vocab_size = config.vocab_size
613
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
614
+
615
+ # Initialize weights and apply final processing
616
+ self.post_init()
617
+
618
+ @can_return_tuple
619
+ @auto_docstring
620
+ def forward(
621
+ self,
622
+ input_ids: Optional[torch.LongTensor] = None,
623
+ attention_mask: Optional[torch.Tensor] = None,
624
+ position_ids: Optional[torch.LongTensor] = None,
625
+ past_key_values: Optional[Cache] = None,
626
+ inputs_embeds: Optional[torch.FloatTensor] = None,
627
+ labels: Optional[torch.LongTensor] = None,
628
+ use_cache: Optional[bool] = None,
629
+ cache_position: Optional[torch.LongTensor] = None,
630
+ logits_to_keep: Union[int, torch.Tensor] = 0,
631
+ **kwargs: Unpack[TransformersKwargs],
632
+ ) -> CausalLMOutputWithPast:
633
+
634
+ outputs: BaseModelOutputWithPast = self.model(
635
+ input_ids=input_ids,
636
+ attention_mask=attention_mask,
637
+ position_ids=position_ids,
638
+ past_key_values=past_key_values,
639
+ inputs_embeds=inputs_embeds,
640
+ use_cache=use_cache,
641
+ cache_position=cache_position,
642
+ **kwargs,
643
+ )
644
+
645
+ hidden_states = outputs.last_hidden_state
646
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
647
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
648
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
649
+
650
+ loss = None
651
+ if labels is not None:
652
+ loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
653
+
654
+ return CausalLMOutputWithPast(
655
+ loss=loss,
656
+ logits=logits,
657
+ past_key_values=outputs.past_key_values,
658
+ hidden_states=outputs.hidden_states,
659
+ attentions=outputs.attentions,
660
+ )
661
+
662
+ __all__ = [
663
+ "MiMoV2FlashForCausalLM"
664
+ ]
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aeb13307a71acd8fe81861d94ad54ab689df773318809eed3cbe794b4492dae4
3
+ size 11422654
tokenizer_config.json ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ },
181
+ "151665": {
182
+ "content": "<tool_response>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": false
188
+ },
189
+ "151666": {
190
+ "content": "</tool_response>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": false
196
+ },
197
+ "151667": {
198
+ "content": "<think>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": false
204
+ },
205
+ "151668": {
206
+ "content": "</think>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": false
212
+ }
213
+ },
214
+ "additional_special_tokens": [
215
+ "<|im_start|>",
216
+ "<|im_end|>",
217
+ "<|object_ref_start|>",
218
+ "<|object_ref_end|>",
219
+ "<|box_start|>",
220
+ "<|box_end|>",
221
+ "<|quad_start|>",
222
+ "<|quad_end|>",
223
+ "<|vision_start|>",
224
+ "<|vision_end|>",
225
+ "<|vision_pad|>",
226
+ "<|image_pad|>",
227
+ "<|video_pad|>"
228
+ ],
229
+ "bos_token": null,
230
+ "clean_up_tokenization_spaces": false,
231
+ "eos_token": "<|im_end|>",
232
+ "errors": "replace",
233
+ "extra_special_tokens": {},
234
+ "model_max_length": 262144,
235
+ "pad_token": "<|endoftext|>",
236
+ "split_special_tokens": false,
237
+ "tokenizer_class": "Qwen2Tokenizer",
238
+ "unk_token": null
239
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff