Nidum.AI
commited on
Commit
·
4aa5f33
0
Parent(s):
Initial upload for MiniMax-M2-THRIFT-55-v1 (sharded)
Browse files- .gitattributes +5 -0
- added_tokens.json +56 -0
- chat_template.jinja +159 -0
- config.json +120 -0
- configuration_minimax_m2.py +131 -0
- merges.txt +3 -0
- model-00001-of-00020.safetensors +3 -0
- model-00002-of-00020.safetensors +3 -0
- model-00003-of-00020.safetensors +3 -0
- model-00004-of-00020.safetensors +3 -0
- model-00005-of-00020.safetensors +3 -0
- model-00006-of-00020.safetensors +3 -0
- model-00007-of-00020.safetensors +3 -0
- model-00008-of-00020.safetensors +3 -0
- model-00009-of-00020.safetensors +3 -0
- model-00010-of-00020.safetensors +3 -0
- model-00011-of-00020.safetensors +3 -0
- model-00012-of-00020.safetensors +3 -0
- model-00013-of-00020.safetensors +3 -0
- model-00014-of-00020.safetensors +3 -0
- model-00015-of-00020.safetensors +3 -0
- model-00016-of-00020.safetensors +3 -0
- model-00017-of-00020.safetensors +3 -0
- model-00018-of-00020.safetensors +3 -0
- model-00019-of-00020.safetensors +3 -0
- model-00020-of-00020.safetensors +3 -0
- model.safetensors.index.json +0 -0
- special_tokens_map.json +75 -0
- tokenizer.json +3 -0
- tokenizer_config.json +3 -0
- vocab.json +3 -0
.gitattributes
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
tokenizer_config.json filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
vocab.json filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
merges.txt filter=lfs diff=lfs merge=lfs -text
|
added_tokens.json
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"</minimax:tool_call>": 200053,
|
| 3 |
+
"</think>": 200051,
|
| 4 |
+
"<add_file>": 200036,
|
| 5 |
+
"<code_context>": 200043,
|
| 6 |
+
"<code_interpreter>": 200023,
|
| 7 |
+
"<commit_after>": 200018,
|
| 8 |
+
"<commit_before>": 200016,
|
| 9 |
+
"<commit_message>": 200040,
|
| 10 |
+
"<commit_msg>": 200017,
|
| 11 |
+
"<delete_file>": 200037,
|
| 12 |
+
"<edit_file>": 200039,
|
| 13 |
+
"<empty_output>": 200015,
|
| 14 |
+
"<empty_source_file>": 200041,
|
| 15 |
+
"<file_content>": 200044,
|
| 16 |
+
"<file_sep>": 200049,
|
| 17 |
+
"<filename>": 200006,
|
| 18 |
+
"<filepath>": 200048,
|
| 19 |
+
"<fim_middle>": 200002,
|
| 20 |
+
"<fim_pad>": 200004,
|
| 21 |
+
"<fim_prefix>": 200001,
|
| 22 |
+
"<fim_suffix>": 200003,
|
| 23 |
+
"<function_call>": 200022,
|
| 24 |
+
"<gh_stars>": 200007,
|
| 25 |
+
"<issue_closed>": 200010,
|
| 26 |
+
"<issue_comment>": 200009,
|
| 27 |
+
"<issue_start>": 200008,
|
| 28 |
+
"<jupyter_code>": 200013,
|
| 29 |
+
"<jupyter_error>": 200035,
|
| 30 |
+
"<jupyter_output>": 200014,
|
| 31 |
+
"<jupyter_start>": 200011,
|
| 32 |
+
"<jupyter_text>": 200012,
|
| 33 |
+
"<minimax:tool_call>": 200052,
|
| 34 |
+
"<pr_start>": 200046,
|
| 35 |
+
"<rename_file>": 200038,
|
| 36 |
+
"<repo_struct>": 200042,
|
| 37 |
+
"<reponame>": 200005,
|
| 38 |
+
"<review_comment>": 200047,
|
| 39 |
+
"<source_files>": 200045,
|
| 40 |
+
"<think>": 200050,
|
| 41 |
+
"[e~[": 200020,
|
| 42 |
+
"]!d~[": 200021,
|
| 43 |
+
"]!p~[": 200000,
|
| 44 |
+
"]<]end of image[>[": 200030,
|
| 45 |
+
"]<]end of speech[>[": 200028,
|
| 46 |
+
"]<]end of video[>[": 200032,
|
| 47 |
+
"]<]image[>[": 200025,
|
| 48 |
+
"]<]speech[>[": 200024,
|
| 49 |
+
"]<]start of image[>[": 200029,
|
| 50 |
+
"]<]start of speech[>[": 200027,
|
| 51 |
+
"]<]start of video[>[": 200031,
|
| 52 |
+
"]<]video[>[": 200026,
|
| 53 |
+
"]<]vision pad[>[": 200033,
|
| 54 |
+
"]~!b[": 200034,
|
| 55 |
+
"]~b]": 200019
|
| 56 |
+
}
|
chat_template.jinja
ADDED
|
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{# ----------‑‑‑ special token variables ‑‑‑---------- #}
|
| 2 |
+
{%- set toolcall_begin_token = '<minimax:tool_call>' -%}
|
| 3 |
+
{%- set toolcall_end_token = '</minimax:tool_call>' -%}
|
| 4 |
+
{#- Tool Rendering Functions ============================================== -#}
|
| 5 |
+
{%- macro render_tool_namespace(namespace_name, tool_list) -%}
|
| 6 |
+
{%- for tool in tool_list -%}
|
| 7 |
+
<tool>{{ tool.function | tojson(ensure_ascii=False) }}</tool>
|
| 8 |
+
{% endfor -%}
|
| 9 |
+
{%- endmacro -%}
|
| 10 |
+
{%- macro visible_text(content) -%}
|
| 11 |
+
{%- if content is string -%}
|
| 12 |
+
{{ content }}
|
| 13 |
+
{%- elif content is iterable and content is not mapping -%}
|
| 14 |
+
{%- for item in content -%}
|
| 15 |
+
{%- if item is mapping and item.type == 'text' -%}
|
| 16 |
+
{{- item.text }}
|
| 17 |
+
{%- elif item is string -%}
|
| 18 |
+
{{- item }}
|
| 19 |
+
{%- endif -%}
|
| 20 |
+
{%- endfor -%}
|
| 21 |
+
{%- else -%}
|
| 22 |
+
{{- content }}
|
| 23 |
+
{%- endif -%}
|
| 24 |
+
{%- endmacro -%}
|
| 25 |
+
{#- System Message Construction ============================================ -#}
|
| 26 |
+
{%- macro build_system_message(system_message) -%}
|
| 27 |
+
{%- if system_message and system_message.content -%}
|
| 28 |
+
{{- visible_text(system_message.content) }}
|
| 29 |
+
{%- else -%}
|
| 30 |
+
{%- if model_identity is not defined -%}
|
| 31 |
+
{%- set model_identity = "You are a helpful assistant." -%}
|
| 32 |
+
{%- endif -%}
|
| 33 |
+
{{- model_identity }}
|
| 34 |
+
{%- endif -%}
|
| 35 |
+
|
| 36 |
+
{#- Handle current_date -#}
|
| 37 |
+
{%- if system_message and system_message.current_date -%}
|
| 38 |
+
{{- '\n' ~ 'Current date: ' + system_message.current_date }}
|
| 39 |
+
{%- endif -%}
|
| 40 |
+
{#- Handle current_location -#}
|
| 41 |
+
{%- if system_message and system_message.current_location -%}
|
| 42 |
+
{{- '\n' ~ 'Current location: ' + system_message.current_location }}
|
| 43 |
+
{%- endif -%}
|
| 44 |
+
{%- endmacro -%}
|
| 45 |
+
{#- Main Template Logic ================================================= -#}
|
| 46 |
+
{#- Extract system message (only first message if it's system) -#}
|
| 47 |
+
{%- set system_message = none -%}
|
| 48 |
+
{%- set conversation_messages = messages -%}
|
| 49 |
+
{%- if messages and messages[0].role == "system" -%}
|
| 50 |
+
{%- set system_message = messages[0] -%}
|
| 51 |
+
{%- set conversation_messages = messages[1:] -%}
|
| 52 |
+
{%- endif -%}
|
| 53 |
+
{#- Get the last user message turn, for interleved thinking -#}
|
| 54 |
+
{%- set ns = namespace(last_user_index=-1) %}
|
| 55 |
+
{% for m in conversation_messages %}
|
| 56 |
+
{%- if m.role == 'user' %}
|
| 57 |
+
{% set ns.last_user_index = loop.index0 -%}
|
| 58 |
+
{%- endif %}
|
| 59 |
+
{%- endfor %}
|
| 60 |
+
{#- Render system message -#}
|
| 61 |
+
{{- ']~!b[' ~ ']~b]system' ~ '\n' }}
|
| 62 |
+
{{- build_system_message(system_message) }}
|
| 63 |
+
{#- Render tools if available -#}
|
| 64 |
+
{%- if tools -%}
|
| 65 |
+
{{- '\n\n' ~ '# Tools' ~ '\n' ~ 'You may call one or more tools to assist with the user query.\nHere are the tools available in JSONSchema format:' ~ '\n' }}
|
| 66 |
+
{{- '\n' ~ '<tools>' ~ '\n' }}
|
| 67 |
+
{{- render_tool_namespace("functions", tools) }}
|
| 68 |
+
{{- '</tools>' ~ '\n\n' }}
|
| 69 |
+
{{- 'When making tool calls, use XML format to invoke tools and pass parameters:' ~ '\n' }}
|
| 70 |
+
{{- '\n' ~ toolcall_begin_token }}
|
| 71 |
+
<invoke name="tool-name-1">
|
| 72 |
+
<parameter name="param-key-1">param-value-1</parameter>
|
| 73 |
+
<parameter name="param-key-2">param-value-2</parameter>
|
| 74 |
+
...
|
| 75 |
+
</invoke>
|
| 76 |
+
{{- '\n' ~ toolcall_end_token }}
|
| 77 |
+
{%- endif -%}
|
| 78 |
+
{{- '[e~[\n' }}
|
| 79 |
+
|
| 80 |
+
{#- Render messages -#}
|
| 81 |
+
{%- set last_tool_call = namespace(name=none) -%}
|
| 82 |
+
{%- for message in conversation_messages -%}
|
| 83 |
+
{%- if message.role == 'assistant' -%}
|
| 84 |
+
{#- Only render reasoning_content if no user message follows -#}
|
| 85 |
+
{{- ']~b]ai' ~ '\n' }}
|
| 86 |
+
|
| 87 |
+
{%- set reasoning_content = '' %}
|
| 88 |
+
{%- set content = visible_text(message.content) %}
|
| 89 |
+
{%- if message.reasoning_content is string %}
|
| 90 |
+
{%- set reasoning_content = message.reasoning_content %}
|
| 91 |
+
{%- else %}
|
| 92 |
+
{%- if '</think>' in content %}
|
| 93 |
+
{%- set reasoning_content = content.split('</think>')[0].strip('\n').split('<think>')[-1].strip('\n') %}
|
| 94 |
+
{%- set content = content.split('</think>')[-1].strip('\n') %}
|
| 95 |
+
{%- endif %}
|
| 96 |
+
{%- endif %}
|
| 97 |
+
{%- if reasoning_content and loop.index0 > ns.last_user_index -%}
|
| 98 |
+
{{- '<think>' ~ '\n' ~ reasoning_content ~ '\n' ~ '</think>' ~ '\n\n' }}
|
| 99 |
+
{%- endif -%}
|
| 100 |
+
{%- if content -%}
|
| 101 |
+
{{- content }}
|
| 102 |
+
{%- endif -%}
|
| 103 |
+
{%- if message.tool_calls -%}
|
| 104 |
+
{{- '\n' ~ toolcall_begin_token ~ '\n' }}
|
| 105 |
+
|
| 106 |
+
{%- for tool_call in message.tool_calls -%}
|
| 107 |
+
{%- if tool_call.function %}
|
| 108 |
+
{%- set tool_call = tool_call.function %}
|
| 109 |
+
{%- endif %}
|
| 110 |
+
{{- '<invoke name="' + tool_call.name + '">' }}
|
| 111 |
+
{% set _args = tool_call.arguments %}
|
| 112 |
+
{%- for k, v in _args.items() %}
|
| 113 |
+
{{- '<parameter name="' + k + '">' }}
|
| 114 |
+
{{- v | tojson(ensure_ascii=False) if v is not string else v }}
|
| 115 |
+
{{- '</parameter>' }}
|
| 116 |
+
{% endfor %}
|
| 117 |
+
{{- '</invoke>' ~ '\n' }}
|
| 118 |
+
{%- endfor -%}
|
| 119 |
+
|
| 120 |
+
{{- toolcall_end_token}}
|
| 121 |
+
{%- set last_tool_call.name = message.tool_calls[-1].name -%}
|
| 122 |
+
{%- else -%}
|
| 123 |
+
{%- set last_tool_call.name = none -%}
|
| 124 |
+
{%- endif -%}
|
| 125 |
+
{{- '[e~[' ~ '\n' }}
|
| 126 |
+
|
| 127 |
+
{%- elif message.role == 'tool' -%}
|
| 128 |
+
{%- if last_tool_call.name is none -%}
|
| 129 |
+
{{- raise_exception("Message has tool role, but there was no previous assistant message with a tool call!") }}
|
| 130 |
+
{%- endif -%}
|
| 131 |
+
{%- if loop.first or (conversation_messages[loop.index0 - 1].role != 'tool') -%}
|
| 132 |
+
{{- ']~b]tool' }}
|
| 133 |
+
{%- endif -%}
|
| 134 |
+
{%- if message.content is string -%}
|
| 135 |
+
{{- '\n<response>' }}
|
| 136 |
+
{{- message.content }}
|
| 137 |
+
{{- '</response>' }}
|
| 138 |
+
{%- else -%}
|
| 139 |
+
{%- for tr in message.content -%}
|
| 140 |
+
{{- '\n<response>' }}
|
| 141 |
+
{{- tr.output if tr.output is defined else (tr.text if tr.type == 'text' and tr.text is defined else tr) }}
|
| 142 |
+
{{- '\n</response>' }}
|
| 143 |
+
{%- endfor -%}
|
| 144 |
+
{%- endif -%}
|
| 145 |
+
{%- if loop.last or (conversation_messages[loop.index0 + 1].role != 'tool') -%}
|
| 146 |
+
{{- '[e~[\n' -}}
|
| 147 |
+
{%- endif -%}
|
| 148 |
+
|
| 149 |
+
{%- elif message.role == 'user' -%}
|
| 150 |
+
{{- ']~b]user' ~ '\n' }}
|
| 151 |
+
{{- visible_text(message.content) }}
|
| 152 |
+
{{- '[e~[' ~ '\n' }}
|
| 153 |
+
{%- endif -%}
|
| 154 |
+
{%- endfor -%}
|
| 155 |
+
|
| 156 |
+
{#- Generation prompt -#}
|
| 157 |
+
{%- if add_generation_prompt -%}
|
| 158 |
+
{{- ']~b]ai' ~ '\n' ~ '<think>' ~ '\n' }}
|
| 159 |
+
{%- endif -%}
|
config.json
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"MiniMaxM2ForCausalLM"
|
| 4 |
+
],
|
| 5 |
+
"attention_dropout": 0.0,
|
| 6 |
+
"attn_type_list": [
|
| 7 |
+
1,
|
| 8 |
+
1,
|
| 9 |
+
1,
|
| 10 |
+
1,
|
| 11 |
+
1,
|
| 12 |
+
1,
|
| 13 |
+
1,
|
| 14 |
+
1,
|
| 15 |
+
1,
|
| 16 |
+
1,
|
| 17 |
+
1,
|
| 18 |
+
1,
|
| 19 |
+
1,
|
| 20 |
+
1,
|
| 21 |
+
1,
|
| 22 |
+
1,
|
| 23 |
+
1,
|
| 24 |
+
1,
|
| 25 |
+
1,
|
| 26 |
+
1,
|
| 27 |
+
1,
|
| 28 |
+
1,
|
| 29 |
+
1,
|
| 30 |
+
1,
|
| 31 |
+
1,
|
| 32 |
+
1,
|
| 33 |
+
1,
|
| 34 |
+
1,
|
| 35 |
+
1,
|
| 36 |
+
1,
|
| 37 |
+
1,
|
| 38 |
+
1,
|
| 39 |
+
1,
|
| 40 |
+
1,
|
| 41 |
+
1,
|
| 42 |
+
1,
|
| 43 |
+
1,
|
| 44 |
+
1,
|
| 45 |
+
1,
|
| 46 |
+
1,
|
| 47 |
+
1,
|
| 48 |
+
1,
|
| 49 |
+
1,
|
| 50 |
+
1,
|
| 51 |
+
1,
|
| 52 |
+
1,
|
| 53 |
+
1,
|
| 54 |
+
1,
|
| 55 |
+
1,
|
| 56 |
+
1,
|
| 57 |
+
1,
|
| 58 |
+
1,
|
| 59 |
+
1,
|
| 60 |
+
1,
|
| 61 |
+
1,
|
| 62 |
+
1,
|
| 63 |
+
1,
|
| 64 |
+
1,
|
| 65 |
+
1,
|
| 66 |
+
1,
|
| 67 |
+
1,
|
| 68 |
+
1
|
| 69 |
+
],
|
| 70 |
+
"attn_window_size": null,
|
| 71 |
+
"auto_map": {
|
| 72 |
+
"AutoConfig": "configuration_minimax_m2.MiniMaxM2Config",
|
| 73 |
+
"AutoModelForCausalLM": "modeling_minimax_m2.MiniMaxM2ForCausalLM"
|
| 74 |
+
},
|
| 75 |
+
"head_dim": 128,
|
| 76 |
+
"hidden_act": "silu",
|
| 77 |
+
"hidden_size": 3072,
|
| 78 |
+
"initializer_range": 0.02,
|
| 79 |
+
"intermediate_size": 1536,
|
| 80 |
+
"layernorm_full_attention_beta": 1.0,
|
| 81 |
+
"layernorm_linear_attention_beta": 1.0,
|
| 82 |
+
"layernorm_mlp_beta": 1.0,
|
| 83 |
+
"max_model_len": null,
|
| 84 |
+
"max_position_embeddings": 196608,
|
| 85 |
+
"mlp_intermediate_size": 8192,
|
| 86 |
+
"model_type": "minimax",
|
| 87 |
+
"mtp_transformer_layers": 1,
|
| 88 |
+
"num_attention_heads": 48,
|
| 89 |
+
"num_expert_group": null,
|
| 90 |
+
"num_experts_per_tok": 8,
|
| 91 |
+
"num_hidden_layers": 62,
|
| 92 |
+
"num_key_value_heads": 8,
|
| 93 |
+
"num_local_experts": 116,
|
| 94 |
+
"num_mtp_modules": 3,
|
| 95 |
+
"output_router_logits": false,
|
| 96 |
+
"partial_rotary_factor": 0.5,
|
| 97 |
+
"qk_norm_type": "per_layer",
|
| 98 |
+
"rms_norm_eps": 1e-06,
|
| 99 |
+
"rope_scaling": null,
|
| 100 |
+
"rope_theta": 5000000,
|
| 101 |
+
"rotary_dim": 64,
|
| 102 |
+
"routed_scaling_factor": 1.0,
|
| 103 |
+
"router_aux_loss_coef": 0.001,
|
| 104 |
+
"router_jitter_noise": 0.0,
|
| 105 |
+
"scoring_func": "sigmoid",
|
| 106 |
+
"shared_intermediate_size": 0,
|
| 107 |
+
"shared_moe_mode": "sigmoid",
|
| 108 |
+
"sliding_window": null,
|
| 109 |
+
"swa_rope_theta": -1.0,
|
| 110 |
+
"tie_word_embeddings": false,
|
| 111 |
+
"topk_group": null,
|
| 112 |
+
"torch_dtype": "bfloat16",
|
| 113 |
+
"transformers_version": "4.55.0",
|
| 114 |
+
"use_cache": false,
|
| 115 |
+
"use_grouped_topk": true,
|
| 116 |
+
"use_mtp": true,
|
| 117 |
+
"use_qk_norm": true,
|
| 118 |
+
"use_routing_bias": true,
|
| 119 |
+
"vocab_size": 200064
|
| 120 |
+
}
|
configuration_minimax_m2.py
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2024-2025 ModelCloud.ai
|
| 2 |
+
# SPDX-FileCopyrightText: 2024-2025 [email protected]
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
# Contact: [email protected], x.com/qubitium
|
| 5 |
+
|
| 6 |
+
"""Configuration for the MiniMax M2 architecture."""
|
| 7 |
+
|
| 8 |
+
from __future__ import annotations
|
| 9 |
+
|
| 10 |
+
from typing import List, Optional, Union
|
| 11 |
+
|
| 12 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class MiniMaxM2Config(PretrainedConfig):
|
| 16 |
+
model_type = "minimax"
|
| 17 |
+
|
| 18 |
+
def __init__(
|
| 19 |
+
self,
|
| 20 |
+
vocab_size: int = 200_064,
|
| 21 |
+
hidden_size: int = 3_072,
|
| 22 |
+
intermediate_size: int = 1_536,
|
| 23 |
+
mlp_intermediate_size: int = 8_192,
|
| 24 |
+
num_hidden_layers: int = 62,
|
| 25 |
+
num_attention_heads: int = 48,
|
| 26 |
+
num_key_value_heads: int = 8,
|
| 27 |
+
head_dim: Optional[int] = 128,
|
| 28 |
+
num_local_experts: int = 256,
|
| 29 |
+
num_experts_per_tok: int = 8,
|
| 30 |
+
attn_type_list: Optional[List[int]] = None,
|
| 31 |
+
attention_dropout: float = 0.0,
|
| 32 |
+
hidden_act: str = "silu",
|
| 33 |
+
rms_norm_eps: float = 1e-6,
|
| 34 |
+
max_position_embeddings: int = 196_608,
|
| 35 |
+
rope_theta: float = 5_000_000.0,
|
| 36 |
+
rotary_dim: int = 64,
|
| 37 |
+
rope_scaling: Optional[dict] = None,
|
| 38 |
+
use_qk_norm: bool = True,
|
| 39 |
+
qk_norm_type: str = "per_layer",
|
| 40 |
+
use_routing_bias: bool = True,
|
| 41 |
+
scoring_func: str = "sigmoid",
|
| 42 |
+
router_aux_loss_coef: float = 0.001,
|
| 43 |
+
router_jitter_noise: float = 0.0,
|
| 44 |
+
output_router_logits: bool = False,
|
| 45 |
+
use_grouped_topk: bool = True,
|
| 46 |
+
num_expert_group: Optional[int] = None,
|
| 47 |
+
topk_group: Optional[int] = None,
|
| 48 |
+
routed_scaling_factor: float = 1.0,
|
| 49 |
+
layernorm_full_attention_beta: float = 1.0,
|
| 50 |
+
layernorm_linear_attention_beta: float = 1.0,
|
| 51 |
+
layernorm_mlp_beta: float = 1.0,
|
| 52 |
+
shared_intermediate_size: int = 0,
|
| 53 |
+
shared_moe_mode: str = "sigmoid",
|
| 54 |
+
use_mtp: bool = True,
|
| 55 |
+
num_mtp_modules: int = 3,
|
| 56 |
+
mtp_transformer_layers: int = 1,
|
| 57 |
+
attn_window_size: Optional[Union[int, List[int]]] = None,
|
| 58 |
+
swa_rope_theta: float = -1.0,
|
| 59 |
+
sliding_window: Optional[int] = None,
|
| 60 |
+
initializer_range: float = 0.02,
|
| 61 |
+
tie_word_embeddings: bool = False,
|
| 62 |
+
max_model_len: Optional[int] = None,
|
| 63 |
+
bos_token_id: Optional[int] = None,
|
| 64 |
+
eos_token_id: Optional[int] = None,
|
| 65 |
+
pad_token_id: Optional[int] = None,
|
| 66 |
+
use_cache: bool = True,
|
| 67 |
+
**kwargs,
|
| 68 |
+
) -> None:
|
| 69 |
+
quantization_config = kwargs.pop("quantization_config", None)
|
| 70 |
+
transformers_version = kwargs.pop("transformers_version", None)
|
| 71 |
+
|
| 72 |
+
super().__init__(
|
| 73 |
+
bos_token_id=bos_token_id,
|
| 74 |
+
eos_token_id=eos_token_id,
|
| 75 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 76 |
+
pad_token_id=pad_token_id,
|
| 77 |
+
**kwargs,
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
self.vocab_size = vocab_size
|
| 81 |
+
self.hidden_size = hidden_size
|
| 82 |
+
self.intermediate_size = intermediate_size
|
| 83 |
+
self.mlp_intermediate_size = mlp_intermediate_size
|
| 84 |
+
self.num_hidden_layers = num_hidden_layers
|
| 85 |
+
self.num_attention_heads = num_attention_heads
|
| 86 |
+
self.num_key_value_heads = num_key_value_heads
|
| 87 |
+
self.head_dim = head_dim or hidden_size // num_attention_heads
|
| 88 |
+
self.num_local_experts = num_local_experts
|
| 89 |
+
self.num_experts_per_tok = num_experts_per_tok
|
| 90 |
+
self.attn_type_list = attn_type_list or [1] * num_hidden_layers
|
| 91 |
+
self.attention_dropout = attention_dropout
|
| 92 |
+
self.hidden_act = hidden_act
|
| 93 |
+
self.rms_norm_eps = rms_norm_eps
|
| 94 |
+
self.max_position_embeddings = max_position_embeddings
|
| 95 |
+
self.rope_theta = rope_theta
|
| 96 |
+
self.rotary_dim = rotary_dim
|
| 97 |
+
self.rope_scaling = rope_scaling
|
| 98 |
+
self.use_qk_norm = use_qk_norm
|
| 99 |
+
self.qk_norm_type = qk_norm_type
|
| 100 |
+
self.use_routing_bias = use_routing_bias
|
| 101 |
+
self.scoring_func = scoring_func
|
| 102 |
+
self.router_aux_loss_coef = router_aux_loss_coef
|
| 103 |
+
self.router_jitter_noise = router_jitter_noise
|
| 104 |
+
self.output_router_logits = output_router_logits
|
| 105 |
+
self.use_grouped_topk = use_grouped_topk
|
| 106 |
+
self.num_expert_group = num_expert_group
|
| 107 |
+
self.topk_group = topk_group
|
| 108 |
+
self.routed_scaling_factor = routed_scaling_factor
|
| 109 |
+
self.layernorm_full_attention_beta = layernorm_full_attention_beta
|
| 110 |
+
self.layernorm_linear_attention_beta = layernorm_linear_attention_beta
|
| 111 |
+
self.layernorm_mlp_beta = layernorm_mlp_beta
|
| 112 |
+
self.shared_intermediate_size = shared_intermediate_size
|
| 113 |
+
self.shared_moe_mode = shared_moe_mode
|
| 114 |
+
self.use_mtp = use_mtp
|
| 115 |
+
self.num_mtp_modules = num_mtp_modules
|
| 116 |
+
self.mtp_transformer_layers = mtp_transformer_layers
|
| 117 |
+
self.attn_window_size = attn_window_size
|
| 118 |
+
self.swa_rope_theta = swa_rope_theta
|
| 119 |
+
self.sliding_window = sliding_window
|
| 120 |
+
self.initializer_range = initializer_range
|
| 121 |
+
self.max_model_len = max_model_len
|
| 122 |
+
self.use_cache = use_cache
|
| 123 |
+
|
| 124 |
+
# Convenient accessor used by rotary embedding helper
|
| 125 |
+
self.partial_rotary_factor = float(self.rotary_dim) / float(self.head_dim)
|
| 126 |
+
if quantization_config is not None:
|
| 127 |
+
self.quantization_config = quantization_config
|
| 128 |
+
self.transformers_version = transformers_version
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
__all__ = ["MiniMaxM2Config"]
|
merges.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:37c54e1d6eb451b1db3ab89669382c097e6533a47fe526e90dd70b35a0d28bc8
|
| 3 |
+
size 2414078
|
model-00001-of-00020.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:87441b8743917e14c7569367437b353d3f9ebdd2ee94fec529cf3d9c79a5d519
|
| 3 |
+
size 10733280832
|
model-00002-of-00020.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:67b3808a2c99582351a846156b5c0df519a6ee631b049603a0505b591de8a222
|
| 3 |
+
size 10732445712
|
model-00003-of-00020.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:17c9b358049a858390bbe22d85fbc7ca6b537297a954fec195e2c106f781415c
|
| 3 |
+
size 10732445712
|
model-00004-of-00020.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d87fbccf362f6c7a0c670943ba39f2507a23daa2b09768781bb9c93ed59e0763
|
| 3 |
+
size 10736330640
|
model-00005-of-00020.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dde7f15e53ec8e1dad1c61f3595b3c429dc6639e4344d7f94bc6c365360004bd
|
| 3 |
+
size 10732445408
|
model-00006-of-00020.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9411de3c15f225dbef2aa729280e8f22b15939e90d774f6ea340b6af5f39e030
|
| 3 |
+
size 10732445712
|
model-00007-of-00020.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5ddff75d84d902dfa3e678f9779666beb0c77de8d408083d0e9c9ea5187f86b7
|
| 3 |
+
size 10732445712
|
model-00008-of-00020.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3bee5074e2106442039af56f5a37f0c0cfcded7216a0a386ab9d0cd95444c526
|
| 3 |
+
size 10732445360
|
model-00009-of-00020.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5b588cfab4237c9e80c62ab9f06bd296fb2a889426359449b60005e312d9fd60
|
| 3 |
+
size 10732445712
|
model-00010-of-00020.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b47892273d418f87e24ab2e19eb68ba85d9a84f3f71939895410b504b940e2a4
|
| 3 |
+
size 10736330656
|
model-00011-of-00020.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:474938c0f72c5eaf2209ff655e20020f1f7f27644b10c849f0dde83a0feb9c87
|
| 3 |
+
size 10732445640
|
model-00012-of-00020.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ced02c8ffd78c65934fa32f1424aaed183de7b6be53a31efa074747a8c0d1a00
|
| 3 |
+
size 10732445456
|
model-00013-of-00020.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5574a46069528ba54172409b8538ed604b59f9481a558f710b7890d13d571f36
|
| 3 |
+
size 10732445712
|
model-00014-of-00020.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3ea6d45792e7927b04086daf63da9c34347109d84c32642ae0f07251e02998ff
|
| 3 |
+
size 10732445712
|
model-00015-of-00020.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:69dfaccd03265c4829aa17f78dd72246f3f3ec7013bb4a9da1a12965e4c62ee2
|
| 3 |
+
size 10736330280
|
model-00016-of-00020.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:39bd7f3ebd0c8c8ba10f326d14751d1b5c720462fbaadc69135cfc4fd42415a3
|
| 3 |
+
size 10732445760
|
model-00017-of-00020.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0265c857910062cd049797ad533f0c1fc520684f7ee2959001c787a78071dec2
|
| 3 |
+
size 10732445712
|
model-00018-of-00020.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:29cdfcbfe5bbdff90f256e9e6076c0c213d1d953f91d165004f37ca335bfcff0
|
| 3 |
+
size 10732445520
|
model-00019-of-00020.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8d44a0507aa900c56ca68f29daaaab71c528b91520d7fd1882ab1d871cb7d52c
|
| 3 |
+
size 10732445288
|
model-00020-of-00020.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ad1694a164946d9e16685a0a1bcaa81e207e679ed52f7cbbc24c98115a27e2b1
|
| 3 |
+
size 7655885952
|
model.safetensors.index.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"additional_special_tokens": [
|
| 3 |
+
"<code_interpreter>",
|
| 4 |
+
"<commit_after>",
|
| 5 |
+
"<commit_before>",
|
| 6 |
+
"<commit_msg>",
|
| 7 |
+
"<empty_output>",
|
| 8 |
+
"<filename>",
|
| 9 |
+
"<fim_middle>",
|
| 10 |
+
"<fim_pad>",
|
| 11 |
+
"<fim_prefix>",
|
| 12 |
+
"<fim_suffix>",
|
| 13 |
+
"<function_call>",
|
| 14 |
+
"<gh_stars>",
|
| 15 |
+
"]<]speech[>[",
|
| 16 |
+
"]<]image[>[",
|
| 17 |
+
"]<]video[>[",
|
| 18 |
+
"]<]start of speech[>[",
|
| 19 |
+
"]<]end of speech[>[",
|
| 20 |
+
"]<]start of image[>[",
|
| 21 |
+
"]<]end of image[>[",
|
| 22 |
+
"]<]start of video[>[",
|
| 23 |
+
"]<]end of video[>[",
|
| 24 |
+
"]<]vision pad[>[",
|
| 25 |
+
"]~!b[",
|
| 26 |
+
"<issue_closed>",
|
| 27 |
+
"<issue_comment>",
|
| 28 |
+
"<issue_start>",
|
| 29 |
+
"<jupyter_code>",
|
| 30 |
+
"<jupyter_output>",
|
| 31 |
+
"<jupyter_start>",
|
| 32 |
+
"<jupyter_text>",
|
| 33 |
+
"<reponame>",
|
| 34 |
+
"[e~[",
|
| 35 |
+
"]!d~[",
|
| 36 |
+
"]!p~[",
|
| 37 |
+
"]~b]",
|
| 38 |
+
"<jupyter_error>",
|
| 39 |
+
"<add_file>",
|
| 40 |
+
"<delete_file>",
|
| 41 |
+
"<rename_file>",
|
| 42 |
+
"<edit_file>",
|
| 43 |
+
"<commit_message>",
|
| 44 |
+
"<empty_source_file>",
|
| 45 |
+
"<repo_struct>",
|
| 46 |
+
"<code_context>",
|
| 47 |
+
"<file_content>",
|
| 48 |
+
"<source_files>",
|
| 49 |
+
"<pr_start>",
|
| 50 |
+
"<review_comment>",
|
| 51 |
+
"<filepath>",
|
| 52 |
+
"<file_sep>"
|
| 53 |
+
],
|
| 54 |
+
"bos_token": {
|
| 55 |
+
"content": "]~!b[",
|
| 56 |
+
"lstrip": false,
|
| 57 |
+
"normalized": false,
|
| 58 |
+
"rstrip": false,
|
| 59 |
+
"single_word": false
|
| 60 |
+
},
|
| 61 |
+
"eos_token": {
|
| 62 |
+
"content": "[e~[",
|
| 63 |
+
"lstrip": false,
|
| 64 |
+
"normalized": false,
|
| 65 |
+
"rstrip": false,
|
| 66 |
+
"single_word": false
|
| 67 |
+
},
|
| 68 |
+
"unk_token": {
|
| 69 |
+
"content": "]!d~[",
|
| 70 |
+
"lstrip": false,
|
| 71 |
+
"normalized": false,
|
| 72 |
+
"rstrip": false,
|
| 73 |
+
"single_word": false
|
| 74 |
+
}
|
| 75 |
+
}
|
tokenizer.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e7b90ed7f55d905175bc26771d6d7d33b40b46742f073675bc816fedaf482ea1
|
| 3 |
+
size 15522763
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2fb1c28a83798e52fb41550d95cc6e93eb9db16fdc608c73c56172a72bb73581
|
| 3 |
+
size 11091
|
vocab.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:449838e19b2c13a375ee324056b4fe0f19128ea359283941d27c4e603292dab5
|
| 3 |
+
size 3905412
|