File size: 3,730 Bytes
5693b5c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44f1710
5693b5c
 
44f1710
5693b5c
44f1710
5693b5c
44f1710
5693b5c
44f1710
5693b5c
44f1710
5693b5c
44f1710
5693b5c
44f1710
5693b5c
44f1710
5693b5c
44f1710
5693b5c
44f1710
5693b5c
44f1710
5693b5c
44f1710
5693b5c
44f1710
5693b5c
44f1710
5693b5c
44f1710
5693b5c
44f1710
5693b5c
44f1710
5693b5c
44f1710
5693b5c
44f1710
5693b5c
44f1710
5693b5c
44f1710
5693b5c
44f1710
5693b5c
44f1710
5693b5c
44f1710
5693b5c
 
 
 
44f1710
5693b5c
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
{
  "_name_or_path": "Qwen/Qwen1.5-MoE-A2.7B-Chat",
  "architectures": [
    "Qwen2MoeForCausalLM"
  ],
  "attention_dropout": 0.0,
  "bos_token_id": 151643,
  "decoder_sparse_step": 1,
  "eos_token_id": 151645,
  "hidden_act": "silu",
  "hidden_size": 2048,
  "initializer_range": 0.02,
  "intermediate_size": 5632,
  "max_position_embeddings": 32768,
  "max_window_layers": 21,
  "mlp_only_layers": [],
  "model_type": "qwen2_moe",
  "moe_intermediate_size": 1408,
  "norm_topk_prob": false,
  "num_attention_heads": 16,
  "num_experts": 60,
  "num_experts_per_tok": 4,
  "num_hidden_layers": 24,
  "num_key_value_heads": 16,
  "output_router_logits": false,
  "quantization_config": {
    "config_groups": {
      "group_0": {
        "input_activations": null,
        "output_activations": null,
        "targets": [
          "Linear"
        ],
        "weights": {
          "actorder": null,
          "block_structure": null,
          "dynamic": false,
          "group_size": 128,
          "num_bits": 4,
          "observer": "minmax",
          "observer_kwargs": {},
          "strategy": "group",
          "symmetric": true,
          "type": "int"
        }
      }
    },
    "format": "pack-quantized",
    "global_compression_ratio": 2.2386425028132897,
    "ignore": [
      "model.layers.0.mlp.gate",
      "model.layers.0.mlp.shared_expert_gate",
      "model.layers.1.mlp.gate",
      "model.layers.1.mlp.shared_expert_gate",
      "model.layers.2.mlp.gate",
      "model.layers.2.mlp.shared_expert_gate",
      "model.layers.3.mlp.gate",
      "model.layers.3.mlp.shared_expert_gate",
      "model.layers.4.mlp.gate",
      "model.layers.4.mlp.shared_expert_gate",
      "model.layers.5.mlp.gate",
      "model.layers.5.mlp.shared_expert_gate",
      "model.layers.6.mlp.gate",
      "model.layers.6.mlp.shared_expert_gate",
      "model.layers.7.mlp.gate",
      "model.layers.7.mlp.shared_expert_gate",
      "model.layers.8.mlp.gate",
      "model.layers.8.mlp.shared_expert_gate",
      "model.layers.9.mlp.gate",
      "model.layers.9.mlp.shared_expert_gate",
      "model.layers.10.mlp.gate",
      "model.layers.10.mlp.shared_expert_gate",
      "model.layers.11.mlp.gate",
      "model.layers.11.mlp.shared_expert_gate",
      "model.layers.12.mlp.gate",
      "model.layers.12.mlp.shared_expert_gate",
      "model.layers.13.mlp.gate",
      "model.layers.13.mlp.shared_expert_gate",
      "model.layers.14.mlp.gate",
      "model.layers.14.mlp.shared_expert_gate",
      "model.layers.15.mlp.gate",
      "model.layers.15.mlp.shared_expert_gate",
      "model.layers.16.mlp.gate",
      "model.layers.16.mlp.shared_expert_gate",
      "model.layers.17.mlp.gate",
      "model.layers.17.mlp.shared_expert_gate",
      "model.layers.18.mlp.gate",
      "model.layers.18.mlp.shared_expert_gate",
      "model.layers.19.mlp.gate",
      "model.layers.19.mlp.shared_expert_gate",
      "model.layers.20.mlp.gate",
      "model.layers.20.mlp.shared_expert_gate",
      "model.layers.21.mlp.gate",
      "model.layers.21.mlp.shared_expert_gate",
      "model.layers.22.mlp.gate",
      "model.layers.22.mlp.shared_expert_gate",
      "model.layers.23.mlp.gate",
      "model.layers.23.mlp.shared_expert_gate",
      "lm_head"
    ],
    "kv_cache_scheme": null,
    "quant_method": "compressed-tensors",
    "quantization_status": "compressed"
  },
  "rms_norm_eps": 1e-06,
  "rope_scaling": null,
  "rope_theta": 1000000.0,
  "router_aux_loss_coef": 0.001,
  "shared_expert_intermediate_size": 5632,
  "sliding_window": null,
  "tie_word_embeddings": false,
  "torch_dtype": "bfloat16",
  "transformers_version": "4.48.3",
  "use_cache": true,
  "use_sliding_window": false,
  "vocab_size": 151936
}