Dogacel commited on
Commit
194a2e0
·
verified ·
1 Parent(s): 3328039

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +10 -10
README.md CHANGED
@@ -10,7 +10,8 @@ tags:
10
  - code
11
  - qwen
12
  - text-generation-inference
13
- library_name: peft
 
14
  ---
15
 
16
  # Qwen3-Coder-30B-A3B-Kubernetes-Instruct
@@ -72,26 +73,25 @@ Use the code below to get started with the model.
72
  ```python
73
  import torch
74
  from transformers import AutoModelForCausalLM, AutoTokenizer
75
- from peft import PeftModel
76
 
77
- base_model_id = "Qwen/Qwen3-Coder-30B-A3B-Instruct"
78
- adapter_id = "Dogacel/Qwen3-Coder-30B-A3B-Kubernetes-Instruct"
79
 
80
- # Load base model with device_map to avoid RAM OOM
 
81
  model = AutoModelForCausalLM.from_pretrained(
82
- base_model_id,
83
  torch_dtype=torch.float16,
84
  device_map="auto",
85
  low_cpu_mem_usage=True
86
  )
87
 
88
- # Load the adapter
89
- model = PeftModel.from_pretrained(model, adapter_id)
90
- tokenizer = AutoTokenizer.from_pretrained(base_model_id)
91
 
 
92
  messages = [
93
  {"role": "system", "content": "You are a Kubernetes expert. Diagnose issues step-by-step, then provide the fixed YAML configuration."},
94
- {"role": "user", "content": "When I run kubectl apply, I get the following error ..."}
95
  ]
96
 
97
  text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
 
10
  - code
11
  - qwen
12
  - text-generation-inference
13
+ - transformers
14
+ library_name: transformers
15
  ---
16
 
17
  # Qwen3-Coder-30B-A3B-Kubernetes-Instruct
 
73
  ```python
74
  import torch
75
  from transformers import AutoModelForCausalLM, AutoTokenizer
 
76
 
77
+ # Path to your merged model (no base model needed)
78
+ model_id = "Dogacel/Qwen3-Coder-30B-A3B-Kubernetes-Instruct"
79
 
80
+ # 1. Load the Full Model
81
+ # Use device_map="auto" to handle the 30B size efficiently
82
  model = AutoModelForCausalLM.from_pretrained(
83
+ model_id,
84
  torch_dtype=torch.float16,
85
  device_map="auto",
86
  low_cpu_mem_usage=True
87
  )
88
 
89
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
 
 
90
 
91
+ # 2. Run Inference
92
  messages = [
93
  {"role": "system", "content": "You are a Kubernetes expert. Diagnose issues step-by-step, then provide the fixed YAML configuration."},
94
+ {"role": "user", "content": "My Pod is in Pending state and describing it says 'Insufficient cpu'. How do I fix this?"}
95
  ]
96
 
97
  text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)