File size: 1,495 Bytes
cedfc35 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
# /// script
# requires-python = ">=3.10"
# dependencies = [
# "torch>=2.0.0",
# "transformers>=4.45.0,<5.0.0",
# "datasets>=2.18.0",
# "accelerate>=0.30.0",
# "peft>=0.13.0",
# "trl>=0.12.0",
# ]
# ///
import sys
print(f"Python version: {sys.version}")
import torch
print(f"PyTorch version: {torch.__version__}")
print(f"CUDA available: {torch.cuda.is_available()}")
if torch.cuda.is_available():
print(f"CUDA device: {torch.cuda.get_device_name(0)}")
import transformers
print(f"Transformers version: {transformers.__version__}")
from datasets import load_dataset
from peft import LoraConfig
from trl import SFTTrainer, SFTConfig
print("Loading dataset...")
ds = load_dataset("trl-lib/Capybara", split="train[:50]")
print(f"Loaded {len(ds)} examples")
config = SFTConfig(
output_dir="test-output",
num_train_epochs=1,
per_device_train_batch_size=1,
gradient_accumulation_steps=1,
logging_steps=5,
report_to="none",
push_to_hub=True,
hub_model_id="luiscosio/qwen25-test-v2",
bf16=True,
)
peft_config = LoraConfig(
r=8,
lora_alpha=16,
task_type="CAUSAL_LM",
target_modules=["q_proj", "v_proj"]
)
print("Creating trainer with Qwen2.5-0.5B...")
trainer = SFTTrainer(
model="Qwen/Qwen2.5-0.5B",
train_dataset=ds,
args=config,
peft_config=peft_config,
)
print("Training...")
trainer.train()
trainer.push_to_hub()
print("Done!")
|