training-scripts / test_qwen3_capybara.py
luiscosio's picture
Upload test_qwen3_capybara.py with huggingface_hub
b0e8599 verified
# /// script
# dependencies = [
# "trl>=0.12.0",
# "peft>=0.7.0",
# "transformers>=4.36.0",
# "accelerate>=0.24.0",
# "datasets",
# "torch",
# ]
# ///
from datasets import load_dataset
from peft import LoraConfig
from trl import SFTTrainer, SFTConfig
# Load known-working TRL dataset
print("Loading dataset...")
dataset = load_dataset("trl-lib/Capybara", split="train")
print(f"Dataset loaded: {len(dataset)} examples")
# Small subset for quick test
dataset = dataset.shuffle(seed=42).select(range(1000))
print(f"Using {len(dataset)} examples")
# Split
dataset_split = dataset.train_test_split(test_size=0.1, seed=42)
train_dataset = dataset_split["train"]
eval_dataset = dataset_split["test"]
# Training configuration
config = SFTConfig(
output_dir="qwen3-0.6b-test",
push_to_hub=True,
hub_model_id="luiscosio/qwen3-0.6b-test",
num_train_epochs=1,
per_device_train_batch_size=2,
gradient_accumulation_steps=4,
gradient_checkpointing=True,
learning_rate=2e-4,
logging_steps=10,
save_strategy="steps",
save_steps=50,
eval_strategy="steps",
eval_steps=50,
warmup_ratio=0.1,
bf16=True,
max_length=1024,
report_to="none",
)
# LoRA configuration
peft_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
target_modules=["q_proj", "k_proj", "v_proj", "o_proj"],
)
# Initialize and train
print("Initializing trainer...")
trainer = SFTTrainer(
model="Qwen/Qwen3-0.6B",
train_dataset=train_dataset,
eval_dataset=eval_dataset,
args=config,
peft_config=peft_config,
)
print("Starting training...")
trainer.train()
print("Pushing to Hub...")
trainer.push_to_hub()
print("Done!")