Upload t.py
Browse files
t.py
ADDED
|
@@ -0,0 +1,368 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
"""Gemma3_(4B).ipynb
|
| 3 |
+
|
| 4 |
+
Automatically generated by Colab.
|
| 5 |
+
|
| 6 |
+
Original file is located at
|
| 7 |
+
https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Gemma3_(4B).ipynb
|
| 8 |
+
|
| 9 |
+
To run this, press "*Runtime*" and press "*Run all*" on a **free** Tesla T4 Google Colab instance!
|
| 10 |
+
<div class="align-center">
|
| 11 |
+
<a href="https://unsloth.ai/"><img src="https://github.com/unslothai/unsloth/raw/main/images/unsloth%20new%20logo.png" width="115"></a>
|
| 12 |
+
<a href="https://discord.gg/unsloth"><img src="https://github.com/unslothai/unsloth/raw/main/images/Discord button.png" width="145"></a>
|
| 13 |
+
<a href="https://docs.unsloth.ai/"><img src="https://github.com/unslothai/unsloth/blob/main/images/documentation%20green%20button.png?raw=true" width="125"></a></a> Join Discord if you need help + ⭐ <i>Star us on <a href="https://github.com/unslothai/unsloth">Github</a> </i> ⭐
|
| 14 |
+
</div>
|
| 15 |
+
|
| 16 |
+
To install Unsloth on your own computer, follow the installation instructions on our Github page [here](https://docs.unsloth.ai/get-started/installing-+-updating).
|
| 17 |
+
|
| 18 |
+
You will learn how to do [data prep](#Data), how to [train](#Train), how to [run the model](#Inference), & [how to save it](#Save)
|
| 19 |
+
|
| 20 |
+
### News
|
| 21 |
+
|
| 22 |
+
**Read our [Gemma 3 blog](https://unsloth.ai/blog/gemma3) for what's new in Unsloth and our [Reasoning blog](https://unsloth.ai/blog/r1-reasoning) on how to train reasoning models.**
|
| 23 |
+
|
| 24 |
+
Visit our docs for all our [model uploads](https://docs.unsloth.ai/get-started/all-our-models) and [notebooks](https://docs.unsloth.ai/get-started/unsloth-notebooks).
|
| 25 |
+
|
| 26 |
+
### Installation
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
# Commented out IPython magic to ensure Python compatibility.
|
| 30 |
+
# %%capture
|
| 31 |
+
# import os
|
| 32 |
+
# if "COLAB_" not in "".join(os.environ.keys()):
|
| 33 |
+
# !pip install unsloth vllm
|
| 34 |
+
# else:
|
| 35 |
+
# # [NOTE] Do the below ONLY in Colab! Use [[pip install unsloth vllm]]
|
| 36 |
+
# !pip install --no-deps unsloth vllm
|
| 37 |
+
# # Install latest Hugging Face for Gemma-3!
|
| 38 |
+
# !pip install --no-deps git+https://github.com/huggingface/transformers@v4.49.0-Gemma-3
|
| 39 |
+
|
| 40 |
+
# Commented out IPython magic to ensure Python compatibility.
|
| 41 |
+
# #@title Colab Extra Install { display-mode: "form" }
|
| 42 |
+
# %%capture
|
| 43 |
+
# import os
|
| 44 |
+
# if "COLAB_" not in "".join(os.environ.keys()):
|
| 45 |
+
# !pip install unsloth vllm
|
| 46 |
+
# else:
|
| 47 |
+
# !pip install --no-deps unsloth vllm
|
| 48 |
+
# # [NOTE] Do the below ONLY in Colab! Use [[pip install unsloth vllm]]
|
| 49 |
+
# # Skip restarting message in Colab
|
| 50 |
+
# import sys, re, requests; modules = list(sys.modules.keys())
|
| 51 |
+
# for x in modules: sys.modules.pop(x) if "PIL" in x or "google" in x else None
|
| 52 |
+
# !pip install --no-deps bitsandbytes accelerate xformers==0.0.29.post3 peft "trl==0.15.2" triton cut_cross_entropy unsloth_zoo
|
| 53 |
+
# !pip install sentencepiece protobuf datasets huggingface_hub hf_transfer
|
| 54 |
+
#
|
| 55 |
+
# # vLLM requirements - vLLM breaks Colab due to reinstalling numpy
|
| 56 |
+
# f = requests.get("https://raw.githubusercontent.com/vllm-project/vllm/refs/heads/main/requirements/common.txt").content
|
| 57 |
+
# with open("vllm_requirements.txt", "wb") as file:
|
| 58 |
+
# file.write(re.sub(rb"(transformers|numpy|xformers)[^\n]{1,}\n", b"", f))
|
| 59 |
+
# !pip install -r vllm_requirements.txt
|
| 60 |
+
|
| 61 |
+
"""### Unsloth
|
| 62 |
+
|
| 63 |
+
`FastModel` supports loading nearly any model now! This includes Vision and Text models!
|
| 64 |
+
"""
|
| 65 |
+
|
| 66 |
+
from unsloth import FastModel
|
| 67 |
+
import torch
|
| 68 |
+
|
| 69 |
+
fourbit_models = [
|
| 70 |
+
# 4bit dynamic quants for superior accuracy and low memory use
|
| 71 |
+
"unsloth/gemma-3-1b-it-unsloth-bnb-4bit",
|
| 72 |
+
"unsloth/gemma-3-4b-it-unsloth-bnb-4bit",
|
| 73 |
+
"unsloth/gemma-3-12b-it-unsloth-bnb-4bit",
|
| 74 |
+
"unsloth/gemma-3-27b-it-unsloth-bnb-4bit",
|
| 75 |
+
|
| 76 |
+
# Other popular models!
|
| 77 |
+
"unsloth/Llama-3.1-8B",
|
| 78 |
+
"unsloth/Llama-3.2-3B",
|
| 79 |
+
"unsloth/Llama-3.3-70B",
|
| 80 |
+
"unsloth/mistral-7b-instruct-v0.3",
|
| 81 |
+
"unsloth/Phi-4",
|
| 82 |
+
] # More models at https://huggingface.co/unsloth
|
| 83 |
+
|
| 84 |
+
model, tokenizer = FastModel.from_pretrained(
|
| 85 |
+
model_name = "NewEden/Gemma-Merged-V2",
|
| 86 |
+
max_seq_length = 8192, # Choose any for long context!
|
| 87 |
+
load_in_4bit = False, # 4 bit quantization to reduce memory
|
| 88 |
+
load_in_8bit = False, # [NEW!] A bit more accurate, uses 2x memory
|
| 89 |
+
full_finetuning = False, # [NEW!] We have full finetuning now!
|
| 90 |
+
# token = "hf_...", # use one if using gated models
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
"""We now add LoRA adapters so we only need to update a small amount of parameters!"""
|
| 94 |
+
|
| 95 |
+
model = FastModel.get_peft_model(
|
| 96 |
+
model,
|
| 97 |
+
finetune_vision_layers = False, # Turn off for just text!
|
| 98 |
+
finetune_language_layers = True, # Should leave on!
|
| 99 |
+
finetune_attention_modules = True, # Attention good for GRPO
|
| 100 |
+
finetune_mlp_modules = True, # SHould leave on always!
|
| 101 |
+
target_modules = ["q_proj", "k_proj", "v_proj", "o_proj",
|
| 102 |
+
"gate_proj", "up_proj", "down_proj",],
|
| 103 |
+
r = 64, # Larger = higher accuracy, but might overfit
|
| 104 |
+
lora_alpha = 32, # Recommended alpha == r at least
|
| 105 |
+
lora_dropout = 0.1,
|
| 106 |
+
bias = "none",
|
| 107 |
+
|
| 108 |
+
random_state = 3407,
|
| 109 |
+
)
|
| 110 |
+
|
| 111 |
+
"""<a name="Data"></a>
|
| 112 |
+
### Data Prep
|
| 113 |
+
We now use the `Gemma-3` format for conversation style finetunes. We use [Maxime Labonne's FineTome-100k](https://huggingface.co/datasets/mlabonne/FineTome-100k) dataset in ShareGPT style. Gemma-3 renders multi turn conversations like below:
|
| 114 |
+
|
| 115 |
+
```
|
| 116 |
+
<bos><start_of_turn>user
|
| 117 |
+
Hello!<end_of_turn>
|
| 118 |
+
<start_of_turn>model
|
| 119 |
+
Hey there!<end_of_turn>
|
| 120 |
+
```
|
| 121 |
+
|
| 122 |
+
We use our `get_chat_template` function to get the correct chat template. We support `zephyr, chatml, mistral, llama, alpaca, vicuna, vicuna_old, phi3, llama3, phi4, qwen2.5, gemma3` and more.
|
| 123 |
+
"""
|
| 124 |
+
|
| 125 |
+
from unsloth.chat_templates import get_chat_template
|
| 126 |
+
tokenizer = get_chat_template(
|
| 127 |
+
tokenizer,
|
| 128 |
+
chat_template = "gemma-3",
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
from datasets import load_dataset
|
| 132 |
+
dataset = load_dataset("NewEden/Light-Novels-Roleplay-Logs-Books-Oh-My-duplicate-turns-removed", split = "train")
|
| 133 |
+
|
| 134 |
+
"""We now use `standardize_data_formats` to try converting datasets to the correct format for finetuning purposes!"""
|
| 135 |
+
|
| 136 |
+
from unsloth.chat_templates import standardize_data_formats
|
| 137 |
+
dataset = standardize_data_formats(dataset)
|
| 138 |
+
|
| 139 |
+
"""Let's see how row 100 looks like!"""
|
| 140 |
+
|
| 141 |
+
dataset[100]
|
| 142 |
+
|
| 143 |
+
"""We now have to apply the chat template for `Gemma-3` onto the conversations, and save it to `text`"""
|
| 144 |
+
|
| 145 |
+
def apply_chat_template(examples):
|
| 146 |
+
texts = tokenizer.apply_chat_template(examples["conversations"])
|
| 147 |
+
return { "text" : texts }
|
| 148 |
+
pass
|
| 149 |
+
dataset = dataset.map(apply_chat_template, batched = True)
|
| 150 |
+
|
| 151 |
+
"""Let's see how the chat template did! Notice `Gemma-3` default adds a `<bos>`!"""
|
| 152 |
+
|
| 153 |
+
dataset[100]["text"]
|
| 154 |
+
|
| 155 |
+
"""<a name="Train"></a>
|
| 156 |
+
### Train the model
|
| 157 |
+
Now let's use Huggingface TRL's `SFTTrainer`! More docs here: [TRL SFT docs](https://huggingface.co/docs/trl/sft_trainer). We do 60 steps to speed things up, but you can set `num_train_epochs=1` for a full run, and turn off `max_steps=None`.
|
| 158 |
+
"""
|
| 159 |
+
|
| 160 |
+
from trl import SFTTrainer, SFTConfig
|
| 161 |
+
trainer = SFTTrainer(
|
| 162 |
+
model = model,
|
| 163 |
+
tokenizer = tokenizer,
|
| 164 |
+
train_dataset = dataset,
|
| 165 |
+
eval_dataset = None, # Can set up evaluation!
|
| 166 |
+
args = SFTConfig(
|
| 167 |
+
dataset_text_field = "text",
|
| 168 |
+
per_device_train_batch_size = 3,
|
| 169 |
+
gradient_accumulation_steps = 6, # Use GA to mimic batch size!
|
| 170 |
+
warmup_steps = 50,
|
| 171 |
+
num_train_epochs = 4, # Set this for 1 full training run.
|
| 172 |
+
learning_rate = 1e-5, # Reduce to 2e-5 for long training runs
|
| 173 |
+
max_grad_norm = 0.2,
|
| 174 |
+
logging_steps = 1,
|
| 175 |
+
optim = "paged_adamw_8bit",
|
| 176 |
+
weight_decay = 0.01,
|
| 177 |
+
lr_scheduler_type = "cosine",
|
| 178 |
+
seed = 3407,
|
| 179 |
+
report_to = "wandb", # Use this for WandB etc
|
| 180 |
+
),
|
| 181 |
+
)
|
| 182 |
+
|
| 183 |
+
"""We also use Unsloth's `train_on_completions` method to only train on the assistant outputs and ignore the loss on the user's inputs. This helps increase accuracy of finetunes!"""
|
| 184 |
+
|
| 185 |
+
from unsloth.chat_templates import train_on_responses_only
|
| 186 |
+
trainer = train_on_responses_only(
|
| 187 |
+
trainer,
|
| 188 |
+
instruction_part = "<start_of_turn>user\n",
|
| 189 |
+
response_part = "<start_of_turn>model\n",
|
| 190 |
+
)
|
| 191 |
+
|
| 192 |
+
"""Let's verify masking the instruction part is done! Let's print the 100th row again:"""
|
| 193 |
+
|
| 194 |
+
tokenizer.decode(trainer.train_dataset[100]["input_ids"])
|
| 195 |
+
|
| 196 |
+
"""Now let's print the masked out example - you should see only the answer is present:"""
|
| 197 |
+
|
| 198 |
+
tokenizer.decode([tokenizer.pad_token_id if x == -100 else x for x in trainer.train_dataset[100]["labels"]]).replace(tokenizer.pad_token, " ")
|
| 199 |
+
|
| 200 |
+
# @title Show current memory stats
|
| 201 |
+
gpu_stats = torch.cuda.get_device_properties(0)
|
| 202 |
+
start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)
|
| 203 |
+
max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3)
|
| 204 |
+
print(f"GPU = {gpu_stats.name}. Max memory = {max_memory} GB.")
|
| 205 |
+
print(f"{start_gpu_memory} GB of memory reserved.")
|
| 206 |
+
|
| 207 |
+
"""Let's train the model! To resume a training run, set `trainer.train(resume_from_checkpoint = True)`"""
|
| 208 |
+
|
| 209 |
+
trainer_stats = trainer.train()
|
| 210 |
+
|
| 211 |
+
# @title Show final memory and time stats
|
| 212 |
+
used_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)
|
| 213 |
+
used_memory_for_lora = round(used_memory - start_gpu_memory, 3)
|
| 214 |
+
used_percentage = round(used_memory / max_memory * 100, 3)
|
| 215 |
+
lora_percentage = round(used_memory_for_lora / max_memory * 100, 3)
|
| 216 |
+
print(f"{trainer_stats.metrics['train_runtime']} seconds used for training.")
|
| 217 |
+
print(
|
| 218 |
+
f"{round(trainer_stats.metrics['train_runtime']/60, 2)} minutes used for training."
|
| 219 |
+
)
|
| 220 |
+
print(f"Peak reserved memory = {used_memory} GB.")
|
| 221 |
+
print(f"Peak reserved memory for training = {used_memory_for_lora} GB.")
|
| 222 |
+
print(f"Peak reserved memory % of max memory = {used_percentage} %.")
|
| 223 |
+
print(f"Peak reserved memory for training % of max memory = {lora_percentage} %.")
|
| 224 |
+
|
| 225 |
+
"""<a name="Inference"></a>
|
| 226 |
+
### Inference
|
| 227 |
+
Let's run the model via Unsloth native inference! According to the `Gemma-3` team, the recommended settings for inference are `temperature = 1.0, top_p = 0.95, top_k = 64`
|
| 228 |
+
"""
|
| 229 |
+
|
| 230 |
+
from unsloth.chat_templates import get_chat_template
|
| 231 |
+
tokenizer = get_chat_template(
|
| 232 |
+
tokenizer,
|
| 233 |
+
chat_template = "gemma-3",
|
| 234 |
+
)
|
| 235 |
+
messages = [{
|
| 236 |
+
"role": "user",
|
| 237 |
+
"content": [{
|
| 238 |
+
"type" : "text",
|
| 239 |
+
"text" : "Continue the sequence: 1, 1, 2, 3, 5, 8,",
|
| 240 |
+
}]
|
| 241 |
+
}]
|
| 242 |
+
text = tokenizer.apply_chat_template(
|
| 243 |
+
messages,
|
| 244 |
+
add_generation_prompt = True, # Must add for generation
|
| 245 |
+
)
|
| 246 |
+
outputs = model.generate(
|
| 247 |
+
**tokenizer([text], return_tensors = "pt").to("cuda"),
|
| 248 |
+
max_new_tokens = 64, # Increase for longer outputs!
|
| 249 |
+
# Recommended Gemma-3 settings!
|
| 250 |
+
temperature = 1.0, top_p = 0.95, top_k = 64,
|
| 251 |
+
)
|
| 252 |
+
tokenizer.batch_decode(outputs)
|
| 253 |
+
|
| 254 |
+
""" You can also use a `TextStreamer` for continuous inference - so you can see the generation token by token, instead of waiting the whole time!"""
|
| 255 |
+
|
| 256 |
+
messages = [{
|
| 257 |
+
"role": "user",
|
| 258 |
+
"content": [{"type" : "text", "text" : "Why is the sky blue?",}]
|
| 259 |
+
}]
|
| 260 |
+
text = tokenizer.apply_chat_template(
|
| 261 |
+
messages,
|
| 262 |
+
add_generation_prompt = True, # Must add for generation
|
| 263 |
+
)
|
| 264 |
+
|
| 265 |
+
from transformers import TextStreamer
|
| 266 |
+
_ = model.generate(
|
| 267 |
+
**tokenizer([text], return_tensors = "pt").to("cuda"),
|
| 268 |
+
max_new_tokens = 64, # Increase for longer outputs!
|
| 269 |
+
# Recommended Gemma-3 settings!
|
| 270 |
+
temperature = 1.0, top_p = 0.95, top_k = 64,
|
| 271 |
+
streamer = TextStreamer(tokenizer, skip_prompt = True),
|
| 272 |
+
)
|
| 273 |
+
|
| 274 |
+
"""<a name="Save"></a>
|
| 275 |
+
### Saving, loading finetuned models
|
| 276 |
+
To save the final model as LoRA adapters, either use Huggingface's `push_to_hub` for an online save or `save_pretrained` for a local save.
|
| 277 |
+
|
| 278 |
+
**[NOTE]** This ONLY saves the LoRA adapters, and not the full model. To save to 16bit or GGUF, scroll down!
|
| 279 |
+
"""
|
| 280 |
+
|
| 281 |
+
model.save_pretrained("gemma-3") # Local saving
|
| 282 |
+
tokenizer.save_pretrained("gemma-3")
|
| 283 |
+
# model.push_to_hub("HF_ACCOUNT/gemma-3", token = "...") # Online saving
|
| 284 |
+
# tokenizer.push_to_hub("HF_ACCOUNT/gemma-3", token = "...") # Online saving
|
| 285 |
+
|
| 286 |
+
"""Now if you want to load the LoRA adapters we just saved for inference, set `False` to `True`:"""
|
| 287 |
+
|
| 288 |
+
if False:
|
| 289 |
+
from unsloth import FastModel
|
| 290 |
+
model, tokenizer = FastModel.from_pretrained(
|
| 291 |
+
model_name = "lora_model", # YOUR MODEL YOU USED FOR TRAINING
|
| 292 |
+
max_seq_length = 2048,
|
| 293 |
+
load_in_4bit = True,
|
| 294 |
+
)
|
| 295 |
+
|
| 296 |
+
messages = [{
|
| 297 |
+
"role": "user",
|
| 298 |
+
"content": [{"type" : "text", "text" : "What is Gemma-3?",}]
|
| 299 |
+
}]
|
| 300 |
+
text = tokenizer.apply_chat_template(
|
| 301 |
+
messages,
|
| 302 |
+
add_generation_prompt = True, # Must add for generation
|
| 303 |
+
)
|
| 304 |
+
|
| 305 |
+
from transformers import TextStreamer
|
| 306 |
+
_ = model.generate(
|
| 307 |
+
**tokenizer([text], return_tensors = "pt").to("cuda"),
|
| 308 |
+
max_new_tokens = 64, # Increase for longer outputs!
|
| 309 |
+
# Recommended Gemma-3 settings!
|
| 310 |
+
temperature = 1.0, top_p = 0.95, top_k = 64,
|
| 311 |
+
streamer = TextStreamer(tokenizer, skip_prompt = True),
|
| 312 |
+
)
|
| 313 |
+
|
| 314 |
+
"""### Saving to float16 for VLLM
|
| 315 |
+
|
| 316 |
+
We also support saving to `float16` directly for deployment! We save it in the folder `gemma-3-finetune`. Set `if False` to `if True` to let it run!
|
| 317 |
+
"""
|
| 318 |
+
|
| 319 |
+
if False: # Change to True to save finetune!
|
| 320 |
+
model.save_pretrained_merged("gemma-3-finetune", tokenizer)
|
| 321 |
+
|
| 322 |
+
"""If you want to upload / push to your Hugging Face account, set `if False` to `if True` and add your Hugging Face token and upload location!"""
|
| 323 |
+
|
| 324 |
+
if False: # Change to True to upload finetune
|
| 325 |
+
model.push_to_hub_merged(
|
| 326 |
+
"HF_ACCOUNT/gemma-3-finetune", tokenizer,
|
| 327 |
+
token = "hf_..."
|
| 328 |
+
)
|
| 329 |
+
|
| 330 |
+
"""### GGUF / llama.cpp Conversion
|
| 331 |
+
To save to `GGUF` / `llama.cpp`, we support it natively now for all models! For now, you can convert easily to `Q8_0, F16 or BF16` precision. `Q4_K_M` for 4bit will come later!
|
| 332 |
+
"""
|
| 333 |
+
|
| 334 |
+
if False: # Change to True to save to GGUF
|
| 335 |
+
model.save_pretrained_gguf(
|
| 336 |
+
"gemma-3-finetune",
|
| 337 |
+
quantization_type = "Q8_0", # For now only Q8_0, BF16, F16 supported
|
| 338 |
+
)
|
| 339 |
+
|
| 340 |
+
"""Likewise, if you want to instead push to GGUF to your Hugging Face account, set `if False` to `if True` and add your Hugging Face token and upload location!"""
|
| 341 |
+
|
| 342 |
+
if False: # Change to True to upload GGUF
|
| 343 |
+
model.push_to_hub_gguf(
|
| 344 |
+
"gemma-3-finetune",
|
| 345 |
+
quantization_type = "Q8_0", # Only Q8_0, BF16, F16 supported
|
| 346 |
+
repo_id = "HF_ACCOUNT/gemma-finetune-gguf",
|
| 347 |
+
token = "hf_...",
|
| 348 |
+
)
|
| 349 |
+
|
| 350 |
+
"""Now, use the `gemma-3-finetune.gguf` file or `gemma-3-finetune-Q4_K_M.gguf` file in llama.cpp or a UI based system like Jan or Open WebUI. You can install Jan [here](https://github.com/janhq/jan) and Open WebUI [here](https://github.com/open-webui/open-webui)
|
| 351 |
+
|
| 352 |
+
And we're done! If you have any questions on Unsloth, we have a [Discord](https://discord.gg/unsloth) channel! If you find any bugs or want to keep updated with the latest LLM stuff, or need help, join projects etc, feel free to join our Discord!
|
| 353 |
+
|
| 354 |
+
Some other links:
|
| 355 |
+
1. Train your own reasoning model - Llama GRPO notebook [Free Colab](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.1_(8B)-GRPO.ipynb)
|
| 356 |
+
2. Saving finetunes to Ollama. [Free notebook](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3_(8B)-Ollama.ipynb)
|
| 357 |
+
3. Llama 3.2 Vision finetuning - Radiography use case. [Free Colab](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.2_(11B)-Vision.ipynb)
|
| 358 |
+
6. See notebooks for DPO, ORPO, Continued pretraining, conversational finetuning and more on our [documentation](https://docs.unsloth.ai/get-started/unsloth-notebooks)!
|
| 359 |
+
|
| 360 |
+
<div class="align-center">
|
| 361 |
+
<a href="https://unsloth.ai"><img src="https://github.com/unslothai/unsloth/raw/main/images/unsloth%20new%20logo.png" width="115"></a>
|
| 362 |
+
<a href="https://discord.gg/unsloth"><img src="https://github.com/unslothai/unsloth/raw/main/images/Discord.png" width="145"></a>
|
| 363 |
+
<a href="https://docs.unsloth.ai/"><img src="https://github.com/unslothai/unsloth/blob/main/images/documentation%20green%20button.png?raw=true" width="125"></a>
|
| 364 |
+
|
| 365 |
+
Join Discord if you need help + ⭐️ <i>Star us on <a href="https://github.com/unslothai/unsloth">Github</a> </i> ⭐️
|
| 366 |
+
</div>
|
| 367 |
+
|
| 368 |
+
"""
|