Commit
·
89351df
1
Parent(s):
bb33774
pin xet
Browse files- gpt_oss_transformers.py +143 -97
gpt_oss_transformers.py
CHANGED
|
@@ -3,40 +3,42 @@
|
|
| 3 |
# dependencies = [
|
| 4 |
# "datasets",
|
| 5 |
# "huggingface-hub[hf_transfer]",
|
|
|
|
| 6 |
# "torch",
|
| 7 |
-
# "transformers>=4.
|
| 8 |
# "tqdm",
|
| 9 |
# "accelerate",
|
| 10 |
-
# "kernels>=0.9.0", # For Flash Attention 3 support (optional but recommended)
|
| 11 |
# ]
|
| 12 |
# ///
|
| 13 |
"""
|
| 14 |
-
Generate responses with transparent reasoning using OpenAI's
|
| 15 |
|
| 16 |
-
This implementation
|
| 17 |
-
The models
|
| 18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
|
| 20 |
Example usage:
|
|
|
|
|
|
|
|
|
|
| 21 |
# Generate haiku with reasoning
|
| 22 |
uv run gpt_oss_transformers.py \\
|
| 23 |
--input-dataset davanstrien/haiku_dpo \\
|
| 24 |
--output-dataset username/haiku-reasoning \\
|
| 25 |
--prompt-column question
|
| 26 |
|
| 27 |
-
#
|
| 28 |
-
uv run gpt_oss_transformers.py \\
|
| 29 |
-
--input-dataset username/prompts \\
|
| 30 |
-
--output-dataset username/responses-with-reasoning \\
|
| 31 |
-
--prompt-column prompt \\
|
| 32 |
-
--reasoning-level high \\
|
| 33 |
-
--max-samples 100
|
| 34 |
-
|
| 35 |
-
# HF Jobs execution
|
| 36 |
hf jobs uv run --flavor a10g-small \\
|
| 37 |
https://huggingface.co/datasets/uv-scripts/openai-oss/raw/main/gpt_oss_transformers.py \\
|
| 38 |
-
--input-dataset
|
| 39 |
-
--output-dataset username/
|
|
|
|
| 40 |
"""
|
| 41 |
|
| 42 |
import argparse
|
|
@@ -89,34 +91,79 @@ def parse_channels(raw_output: str) -> Dict[str, str]:
|
|
| 89 |
"""
|
| 90 |
Extract think/content from GPT OSS channel-based output.
|
| 91 |
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
|
|
|
|
|
|
| 95 |
"""
|
| 96 |
-
|
| 97 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 98 |
|
| 99 |
-
# Extract
|
| 100 |
-
|
| 101 |
-
r"<\|start\|>assistant<\|channel\|>analysis<\|message\|>(.*?)<\|end\|>"
|
| 102 |
-
)
|
| 103 |
-
analysis_match = re.search(analysis_pattern, raw_output, re.DOTALL)
|
| 104 |
-
if analysis_match:
|
| 105 |
-
think = analysis_match.group(1).strip()
|
| 106 |
|
| 107 |
-
#
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 114 |
|
| 115 |
# If no channels found, treat entire output as content
|
| 116 |
-
if not think and not content:
|
| 117 |
-
content = raw_output.strip()
|
| 118 |
|
| 119 |
-
return
|
| 120 |
|
| 121 |
|
| 122 |
def create_dataset_card(
|
|
@@ -227,10 +274,11 @@ def main(
|
|
| 227 |
logger.info("HuggingFace token found, authenticating...")
|
| 228 |
login(token=HF_TOKEN)
|
| 229 |
|
| 230 |
-
# Load tokenizer
|
| 231 |
logger.info(f"Loading tokenizer: {model_id}")
|
| 232 |
tokenizer = AutoTokenizer.from_pretrained(
|
| 233 |
-
model_id,
|
|
|
|
| 234 |
)
|
| 235 |
|
| 236 |
# Add padding token if needed
|
|
@@ -252,58 +300,49 @@ def main(
|
|
| 252 |
|
| 253 |
# Load model
|
| 254 |
logger.info(f"Loading model: {model_id}")
|
| 255 |
-
logger.info("
|
| 256 |
-
|
| 257 |
-
#
|
| 258 |
-
logger.info("Note: MXFP4
|
| 259 |
-
|
| 260 |
# Check available GPU memory
|
| 261 |
if num_gpus > 0:
|
| 262 |
gpu_memory = torch.cuda.get_device_properties(0).total_memory / 1024**3
|
| 263 |
if gpu_memory < 40 and "20b" in model_id.lower():
|
| 264 |
-
logger.
|
| 265 |
-
|
|
|
|
|
|
|
| 266 |
|
| 267 |
try:
|
| 268 |
-
#
|
| 269 |
-
|
| 270 |
model = AutoModelForCausalLM.from_pretrained(
|
| 271 |
model_id,
|
| 272 |
-
torch_dtype=torch.bfloat16,
|
| 273 |
-
attn_implementation="kernels-community/vllm-flash-attn3"
|
| 274 |
**model_kwargs,
|
| 275 |
)
|
| 276 |
model.eval()
|
| 277 |
-
logger.info("Successfully loaded
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 278 |
except torch.cuda.OutOfMemoryError as e:
|
| 279 |
logger.error(f"Out of memory error: {e}")
|
| 280 |
-
logger.error("\
|
| 281 |
-
logger.error("- 20B model: ~40GB VRAM (use
|
| 282 |
-
logger.error("- 120B model: ~240GB VRAM (use
|
|
|
|
|
|
|
|
|
|
| 283 |
sys.exit(1)
|
| 284 |
except Exception as e:
|
| 285 |
-
|
| 286 |
-
|
| 287 |
-
logger.info("Using eager attention instead (standard implementation)")
|
| 288 |
-
try:
|
| 289 |
-
model = AutoModelForCausalLM.from_pretrained(
|
| 290 |
-
model_id,
|
| 291 |
-
torch_dtype=torch.bfloat16,
|
| 292 |
-
attn_implementation="eager", # Fallback to eager attention
|
| 293 |
-
**model_kwargs,
|
| 294 |
-
)
|
| 295 |
-
model.eval()
|
| 296 |
-
logger.info("Successfully loaded with eager attention")
|
| 297 |
-
except torch.cuda.OutOfMemoryError as oom_error:
|
| 298 |
-
logger.error(f"Out of memory with eager attention: {oom_error}")
|
| 299 |
-
logger.error("The model requires more GPU memory than available")
|
| 300 |
-
sys.exit(1)
|
| 301 |
-
except Exception as eager_error:
|
| 302 |
-
logger.error(f"Failed with eager attention: {eager_error}")
|
| 303 |
-
sys.exit(1)
|
| 304 |
-
else:
|
| 305 |
-
logger.error(f"Unexpected error loading model: {e}")
|
| 306 |
-
sys.exit(1)
|
| 307 |
|
| 308 |
# Generation configuration
|
| 309 |
generation_config = GenerationConfig(
|
|
@@ -336,29 +375,36 @@ def main(
|
|
| 336 |
prompts = []
|
| 337 |
original_prompts = []
|
| 338 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 339 |
for example in tqdm(dataset, desc="Preparing prompts"):
|
| 340 |
prompt_text = example[prompt_column]
|
| 341 |
original_prompts.append(prompt_text)
|
| 342 |
|
| 343 |
-
# Create
|
| 344 |
-
messages = [
|
| 345 |
-
|
| 346 |
-
|
| 347 |
-
|
| 348 |
-
|
| 349 |
-
|
| 350 |
-
|
| 351 |
-
|
| 352 |
-
|
| 353 |
-
|
| 354 |
-
|
| 355 |
-
|
| 356 |
-
|
| 357 |
-
|
| 358 |
-
|
| 359 |
-
|
| 360 |
-
|
| 361 |
-
|
|
|
|
|
|
|
| 362 |
prompts.append(prompt)
|
| 363 |
|
| 364 |
# Generate responses in batches
|
|
|
|
| 3 |
# dependencies = [
|
| 4 |
# "datasets",
|
| 5 |
# "huggingface-hub[hf_transfer]",
|
| 6 |
+
# "hf-xet >= 1.1.7",
|
| 7 |
# "torch",
|
| 8 |
+
# "transformers>=4.55.0",
|
| 9 |
# "tqdm",
|
| 10 |
# "accelerate",
|
|
|
|
| 11 |
# ]
|
| 12 |
# ///
|
| 13 |
"""
|
| 14 |
+
Generate responses with transparent reasoning using OpenAI's GPT OSS models.
|
| 15 |
|
| 16 |
+
This implementation works on regular GPUs (L4, A100, A10G, T4) without requiring H100s.
|
| 17 |
+
The models automatically dequantize MXFP4 to bf16 when needed, making them accessible
|
| 18 |
+
on standard datacenter hardware.
|
| 19 |
+
|
| 20 |
+
Key features:
|
| 21 |
+
- Works on regular GPUs without special hardware
|
| 22 |
+
- Extracts reasoning from analysis/commentary channels
|
| 23 |
+
- Handles the simplified channel output format
|
| 24 |
+
- No Flash Attention 3 or special kernels needed
|
| 25 |
|
| 26 |
Example usage:
|
| 27 |
+
# Quick test with a single prompt
|
| 28 |
+
uv run gpt_oss_transformers.py --prompt "Write a haiku about mountains"
|
| 29 |
+
|
| 30 |
# Generate haiku with reasoning
|
| 31 |
uv run gpt_oss_transformers.py \\
|
| 32 |
--input-dataset davanstrien/haiku_dpo \\
|
| 33 |
--output-dataset username/haiku-reasoning \\
|
| 34 |
--prompt-column question
|
| 35 |
|
| 36 |
+
# HF Jobs execution (A10G for $1.50/hr)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
hf jobs uv run --flavor a10g-small \\
|
| 38 |
https://huggingface.co/datasets/uv-scripts/openai-oss/raw/main/gpt_oss_transformers.py \\
|
| 39 |
+
--input-dataset davanstrien/haiku_dpo \\
|
| 40 |
+
--output-dataset username/haiku-reasoning \\
|
| 41 |
+
--prompt-column question
|
| 42 |
"""
|
| 43 |
|
| 44 |
import argparse
|
|
|
|
| 91 |
"""
|
| 92 |
Extract think/content from GPT OSS channel-based output.
|
| 93 |
|
| 94 |
+
The actual output format is simpler than expected:
|
| 95 |
+
analysisREASONING_TEXTassistantfinalRESPONSE_TEXT
|
| 96 |
+
|
| 97 |
+
Sometimes includes commentary channel:
|
| 98 |
+
commentaryMETA_TEXTanalysisREASONING_TEXTassistantfinalRESPONSE_TEXT
|
| 99 |
"""
|
| 100 |
+
result = {"think": "", "content": "", "raw_output": raw_output}
|
| 101 |
+
|
| 102 |
+
# Clean up the text - remove system prompt if present
|
| 103 |
+
if "user" in raw_output:
|
| 104 |
+
# Take everything after the last user prompt
|
| 105 |
+
parts = raw_output.split("user")
|
| 106 |
+
if len(parts) > 1:
|
| 107 |
+
text = parts[-1]
|
| 108 |
+
# Find where the assistant response starts
|
| 109 |
+
for marker in ["analysis", "commentary", "assistant"]:
|
| 110 |
+
if marker in text:
|
| 111 |
+
idx = text.find(marker)
|
| 112 |
+
if idx > 0:
|
| 113 |
+
text = text[idx:]
|
| 114 |
+
raw_output = text
|
| 115 |
+
break
|
| 116 |
+
else:
|
| 117 |
+
text = raw_output
|
| 118 |
|
| 119 |
+
# Extract reasoning (analysis and/or commentary)
|
| 120 |
+
reasoning_parts = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 121 |
|
| 122 |
+
# Try to extract analysis
|
| 123 |
+
if "analysis" in text:
|
| 124 |
+
match = re.search(
|
| 125 |
+
r"analysis(.*?)(?:commentary|assistantfinal|final|$)", text, re.DOTALL
|
| 126 |
+
)
|
| 127 |
+
if match:
|
| 128 |
+
reasoning_parts.append(("Analysis", match.group(1).strip()))
|
| 129 |
+
|
| 130 |
+
# Try to extract commentary
|
| 131 |
+
if "commentary" in text:
|
| 132 |
+
match = re.search(
|
| 133 |
+
r"commentary(.*?)(?:analysis|assistantfinal|final|$)", text, re.DOTALL
|
| 134 |
+
)
|
| 135 |
+
if match:
|
| 136 |
+
reasoning_parts.append(("Commentary", match.group(1).strip()))
|
| 137 |
+
|
| 138 |
+
# Combine reasoning
|
| 139 |
+
if reasoning_parts:
|
| 140 |
+
result["think"] = "\n\n".join(
|
| 141 |
+
f"[{label}] {content}" for label, content in reasoning_parts
|
| 142 |
+
)
|
| 143 |
+
|
| 144 |
+
# Extract final response
|
| 145 |
+
if "assistantfinal" in text:
|
| 146 |
+
parts = text.split("assistantfinal")
|
| 147 |
+
if len(parts) > 1:
|
| 148 |
+
result["content"] = parts[-1].strip()
|
| 149 |
+
elif "final" in text:
|
| 150 |
+
# Fallback - look for "final" keyword
|
| 151 |
+
parts = text.split("final")
|
| 152 |
+
if len(parts) > 1:
|
| 153 |
+
result["content"] = parts[-1].strip()
|
| 154 |
+
|
| 155 |
+
# Clean up any remaining tokens
|
| 156 |
+
for key in ["think", "content"]:
|
| 157 |
+
result[key] = result[key].replace("<|end|>", "").replace("<|return|>", "")
|
| 158 |
+
result[key] = (
|
| 159 |
+
result[key].replace("<|message|>", "").replace("assistant", "").strip()
|
| 160 |
+
)
|
| 161 |
|
| 162 |
# If no channels found, treat entire output as content
|
| 163 |
+
if not result["think"] and not result["content"]:
|
| 164 |
+
result["content"] = raw_output.strip()
|
| 165 |
|
| 166 |
+
return result
|
| 167 |
|
| 168 |
|
| 169 |
def create_dataset_card(
|
|
|
|
| 274 |
logger.info("HuggingFace token found, authenticating...")
|
| 275 |
login(token=HF_TOKEN)
|
| 276 |
|
| 277 |
+
# Load tokenizer (always use padding_side="left" for generation)
|
| 278 |
logger.info(f"Loading tokenizer: {model_id}")
|
| 279 |
tokenizer = AutoTokenizer.from_pretrained(
|
| 280 |
+
model_id,
|
| 281 |
+
padding_side="left", # Always use left padding for generation
|
| 282 |
)
|
| 283 |
|
| 284 |
# Add padding token if needed
|
|
|
|
| 300 |
|
| 301 |
# Load model
|
| 302 |
logger.info(f"Loading model: {model_id}")
|
| 303 |
+
logger.info("Using standard configuration (no Flash Attention 3 needed)")
|
| 304 |
+
|
| 305 |
+
# Note about MXFP4
|
| 306 |
+
logger.info("Note: MXFP4 will auto-dequantize to bf16 on non-Hopper GPUs")
|
| 307 |
+
|
| 308 |
# Check available GPU memory
|
| 309 |
if num_gpus > 0:
|
| 310 |
gpu_memory = torch.cuda.get_device_properties(0).total_memory / 1024**3
|
| 311 |
if gpu_memory < 40 and "20b" in model_id.lower():
|
| 312 |
+
logger.info(
|
| 313 |
+
f"GPU has {gpu_memory:.1f}GB. 20B model needs ~40GB when dequantized"
|
| 314 |
+
)
|
| 315 |
+
logger.info("Model will still load but may use CPU offloading if needed")
|
| 316 |
|
| 317 |
try:
|
| 318 |
+
# Load with standard configuration (no Flash Attention 3)
|
| 319 |
+
# This works on L4, A100, A10G, T4 GPUs
|
| 320 |
model = AutoModelForCausalLM.from_pretrained(
|
| 321 |
model_id,
|
| 322 |
+
torch_dtype=torch.bfloat16, # Can also use "auto"
|
| 323 |
+
# DO NOT USE: attn_implementation="kernels-community/vllm-flash-attn3"
|
| 324 |
**model_kwargs,
|
| 325 |
)
|
| 326 |
model.eval()
|
| 327 |
+
logger.info("Successfully loaded model")
|
| 328 |
+
|
| 329 |
+
# Report memory usage
|
| 330 |
+
if torch.cuda.is_available():
|
| 331 |
+
memory_gb = torch.cuda.memory_allocated() / 1024**3
|
| 332 |
+
logger.info(f"GPU memory used: {memory_gb:.1f}GB")
|
| 333 |
+
|
| 334 |
except torch.cuda.OutOfMemoryError as e:
|
| 335 |
logger.error(f"Out of memory error: {e}")
|
| 336 |
+
logger.error("\nMemory requirements:")
|
| 337 |
+
logger.error("- 20B model: ~40GB VRAM (use A100-40GB or 2xL4)")
|
| 338 |
+
logger.error("- 120B model: ~240GB VRAM (use 4xA100-80GB)")
|
| 339 |
+
logger.error("\nFor HF Jobs, try:")
|
| 340 |
+
logger.error("- 20B: --flavor a10g-large or a100-large")
|
| 341 |
+
logger.error("- 120B: --flavor 4xa100")
|
| 342 |
sys.exit(1)
|
| 343 |
except Exception as e:
|
| 344 |
+
logger.error(f"Error loading model: {e}")
|
| 345 |
+
sys.exit(1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 346 |
|
| 347 |
# Generation configuration
|
| 348 |
generation_config = GenerationConfig(
|
|
|
|
| 375 |
prompts = []
|
| 376 |
original_prompts = []
|
| 377 |
|
| 378 |
+
# Get current date for system prompt
|
| 379 |
+
from datetime import datetime
|
| 380 |
+
|
| 381 |
+
current_date = datetime.now().strftime("%Y-%m-%d")
|
| 382 |
+
|
| 383 |
for example in tqdm(dataset, desc="Preparing prompts"):
|
| 384 |
prompt_text = example[prompt_column]
|
| 385 |
original_prompts.append(prompt_text)
|
| 386 |
|
| 387 |
+
# Create messages with reasoning level in system prompt
|
| 388 |
+
messages = [
|
| 389 |
+
{
|
| 390 |
+
"role": "system",
|
| 391 |
+
"content": f"""You are ChatGPT, a large language model trained by OpenAI.
|
| 392 |
+
Knowledge cutoff: 2024-06
|
| 393 |
+
Current date: {current_date}
|
| 394 |
+
|
| 395 |
+
Reasoning: {reasoning_level}
|
| 396 |
+
|
| 397 |
+
# Valid channels: analysis, commentary, final. Channel must be included for every message.""",
|
| 398 |
+
},
|
| 399 |
+
{"role": "user", "content": prompt_text},
|
| 400 |
+
]
|
| 401 |
+
|
| 402 |
+
# Apply chat template
|
| 403 |
+
prompt = tokenizer.apply_chat_template(
|
| 404 |
+
messages,
|
| 405 |
+
add_generation_prompt=True,
|
| 406 |
+
tokenize=False,
|
| 407 |
+
)
|
| 408 |
prompts.append(prompt)
|
| 409 |
|
| 410 |
# Generate responses in batches
|