awni commited on
Commit
4386bf6
·
verified ·
1 Parent(s): d4797ad

Add files using upload-large-folder tool

Browse files
Files changed (4) hide show
  1. README.md +1 -1
  2. config.json +6 -4
  3. model.safetensors +1 -1
  4. tokenizer_config.json +2 -1
README.md CHANGED
@@ -15,7 +15,7 @@ pipeline_tag: text-generation
15
 
16
  This model [mlx-community/Falcon-H1-0.5B-Instruct-6bit](https://huggingface.co/mlx-community/Falcon-H1-0.5B-Instruct-6bit) was
17
  converted to MLX format from [tiiuae/Falcon-H1-0.5B-Instruct](https://huggingface.co/tiiuae/Falcon-H1-0.5B-Instruct)
18
- using mlx-lm version **0.25.2**.
19
 
20
  ## Use with mlx
21
 
 
15
 
16
  This model [mlx-community/Falcon-H1-0.5B-Instruct-6bit](https://huggingface.co/mlx-community/Falcon-H1-0.5B-Instruct-6bit) was
17
  converted to MLX format from [tiiuae/Falcon-H1-0.5B-Instruct](https://huggingface.co/tiiuae/Falcon-H1-0.5B-Instruct)
18
+ using mlx-lm version **0.28.0**.
19
 
20
  ## Use with mlx
21
 
config.json CHANGED
@@ -9,7 +9,7 @@
9
  "attn_layer_indices": null,
10
  "bos_token_id": 1,
11
  "embedding_multiplier": 5.656854249492381,
12
- "eos_token_id": 228,
13
  "head_dim": 64,
14
  "hidden_act": "silu",
15
  "hidden_size": 1024,
@@ -30,7 +30,7 @@
30
  "mamba_proj_bias": false,
31
  "mamba_rms_norm": false,
32
  "mamba_use_mlp": true,
33
- "max_position_embeddings": 131072,
34
  "mlp_bias": false,
35
  "mlp_expansion_factor": 8,
36
  "mlp_multipliers": [
@@ -46,11 +46,13 @@
46
  "projectors_bias": false,
47
  "quantization": {
48
  "group_size": 64,
49
- "bits": 6
 
50
  },
51
  "quantization_config": {
52
  "group_size": 64,
53
- "bits": 6
 
54
  },
55
  "rms_norm_eps": 1e-05,
56
  "rope_scaling": null,
 
9
  "attn_layer_indices": null,
10
  "bos_token_id": 1,
11
  "embedding_multiplier": 5.656854249492381,
12
+ "eos_token_id": 11,
13
  "head_dim": 64,
14
  "hidden_act": "silu",
15
  "hidden_size": 1024,
 
30
  "mamba_proj_bias": false,
31
  "mamba_rms_norm": false,
32
  "mamba_use_mlp": true,
33
+ "max_position_embeddings": 16384,
34
  "mlp_bias": false,
35
  "mlp_expansion_factor": 8,
36
  "mlp_multipliers": [
 
46
  "projectors_bias": false,
47
  "quantization": {
48
  "group_size": 64,
49
+ "bits": 6,
50
+ "mode": "affine"
51
  },
52
  "quantization_config": {
53
  "group_size": 64,
54
+ "bits": 6,
55
+ "mode": "affine"
56
  },
57
  "rms_norm_eps": 1e-05,
58
  "rope_scaling": null,
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:92285ed0654a962696b6bac8b810dc53aa160d6252830d4f3700d7a4f2245a12
3
  size 424258964
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c91f131b70d759a4470c3ceb119e1d51edeb0240c8720c84caa9dbfbc942aae
3
  size 424258964
tokenizer_config.json CHANGED
@@ -4535,6 +4535,7 @@
4535
  ">>UNUSED_511<<"
4536
  ],
4537
  "bos_token": "<|begin_of_text|>",
 
4538
  "clean_up_tokenization_spaces": true,
4539
  "eos_token": "<|end_of_text|>",
4540
  "extra_special_tokens": {},
@@ -4544,5 +4545,5 @@
4544
  ],
4545
  "model_max_length": 1000000000000000019884624838656,
4546
  "pad_token": "<pad>",
4547
- "tokenizer_class": "PreTrainedTokenizerFast"
4548
  }
 
4535
  ">>UNUSED_511<<"
4536
  ],
4537
  "bos_token": "<|begin_of_text|>",
4538
+ "chat_template": "{{bos_token}}\n{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0].role == 'system' %}\n {{- messages[0].content + '\\n\\n' }}\n {%- endif %}\n {{- \"You are a function calling AI model. You are provided with function signature within <tools> </tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions.\\n<tools>\\n\" }}\n {%- for tool in tools %}[{{- tool | tojson }}]{%- endfor %}\n {{- \"\\n</tools>\\nFor each function call, return a json object with function name and arguments within <tool_call> </tool_call> tags with the following schema:\\n<tool_call>\\n{'arguments': <args-dict>, 'name': <function-name>}\\n</tool_call>\\n\" }}\n{%- else %}\n {%- if messages[0].role == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0].content + '<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}{% for message in messages %}{%- if message.role != 'system' %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{%- endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
4539
  "clean_up_tokenization_spaces": true,
4540
  "eos_token": "<|end_of_text|>",
4541
  "extra_special_tokens": {},
 
4545
  ],
4546
  "model_max_length": 1000000000000000019884624838656,
4547
  "pad_token": "<pad>",
4548
+ "tokenizer_class": "PreTrainedTokenizer"
4549
  }