| models: | |
| - model: Kaoeiri/Qwenwify-32B-v3 # backbone model, used for fusion | |
| parameters: | |
| weight: 1.0 | |
| density: 0.92 | |
| - model: EVA-UNIT-01/EVA-Qwen2.5-32B-v0.2 | |
| parameters: | |
| weight: 0.5 | |
| density: 0.425 | |
| - model: Sao10K/32B-Qwen2.5-Kunou-v1 # RP and synthetic storywriting | |
| parameters: | |
| weight: 0.30 | |
| density: 0.75 | |
| - model: Dans-DiscountModels/Qwen2.5-32B-ChatML # logic and chatting focus | |
| parameters: | |
| weight: 0.15 | |
| density: 0.85 | |
| - model: OpenBuddy/openbuddy-qwq-32b-v24.2-200k # Chinese-heavy datasets, raw, diverse | |
| parameters: | |
| weight: 0.25 | |
| density: 0.88 | |
| - model: Saxo/Linkbricks-Horizon-AI-Japanese-Base-32B # Japanese-focused, base model | |
| parameters: | |
| weight: 0.20 | |
| density: 0.82 | |
| - model: allura-org/Qwen2.5-32b-RP-Ink # RP-focused, unique character traits | |
| parameters: | |
| weight: 0.28 | |
| density: 0.78 | |
| - model: AiCloser/Qwen2.5-32B-AGI | |
| parameters: | |
| weight: 0.15 | |
| density: 0.68 | |
| - model: huihui-ai/QwQ-32B-Preview-abliterated | |
| parameters: | |
| weight: 0.14 | |
| density: 0.68 | |
| - model: huihui-ai/Qwen2.5-32B-Instruct-abliterated | |
| parameters: | |
| weight: 0.23 | |
| density: 0.78 | |
| merge_method: dare_ties | |
| base_model: Qwen/QwQ-32B-Preview | |
| parameters: | |
| density: 0.90 | |
| epsilon: 0.05 | |
| lambda: 1.35 | |
| random_seed: 42 | |
| dtype: bfloat16 | |
| tokenizer_source: union | |