Commit
·
90312a8
1
Parent(s):
bbe5ce0
Fix memory issues and update GPU requirements
Browse files- Add attn_implementation='eager' as per OpenAI cookbook
- Add proper device_map configuration for 120B model
- Add memory checks with helpful error messages
- Update GPU requirements in README (20B needs 40GB+)
- Add OOM error handling with clear guidance
- Note that MXFP4 is dequantized to bf16, doubling memory needs
- README.md +6 -4
- gpt_oss_transformers.py +41 -10
README.md
CHANGED
|
@@ -109,10 +109,12 @@ huggingface-cli job run --gpu-flavor a10g-small \
|
|
| 109 |
|
| 110 |
| Model | GPU Flavor | Memory | Cost/Hour | Best For |
|
| 111 |
|-------|------------|--------|-----------|----------|
|
| 112 |
-
| `gpt-oss-20b` | `a10g-
|
| 113 |
-
| `gpt-oss-20b` | `
|
| 114 |
-
| `gpt-oss-120b` | `
|
| 115 |
-
| `gpt-oss-120b` | `
|
|
|
|
|
|
|
| 116 |
|
| 117 |
## 🏃 Local Execution
|
| 118 |
|
|
|
|
| 109 |
|
| 110 |
| Model | GPU Flavor | Memory | Cost/Hour | Best For |
|
| 111 |
|-------|------------|--------|-----------|----------|
|
| 112 |
+
| `gpt-oss-20b` | `a10g-large` | 48GB | $2.50 | 20B model (needs ~40GB) |
|
| 113 |
+
| `gpt-oss-20b` | `a100-large` | 80GB | $4.34 | 20B with headroom |
|
| 114 |
+
| `gpt-oss-120b` | `4xa100` | 320GB | $17.36 | 120B model (needs ~240GB) |
|
| 115 |
+
| `gpt-oss-120b` | `8xl40s` | 384GB | $23.50 | 120B maximum speed |
|
| 116 |
+
|
| 117 |
+
**Note**: The MXFP4 quantization is dequantized to bf16 during loading, which doubles memory requirements.
|
| 118 |
|
| 119 |
## 🏃 Local Execution
|
| 120 |
|
gpt_oss_transformers.py
CHANGED
|
@@ -236,31 +236,62 @@ def main(
|
|
| 236 |
if tokenizer.pad_token is None:
|
| 237 |
tokenizer.pad_token = tokenizer.eos_token
|
| 238 |
|
| 239 |
-
# Model loading configuration
|
| 240 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 241 |
|
| 242 |
# Load model
|
| 243 |
logger.info(f"Loading model: {model_id}")
|
| 244 |
logger.info("This may take a few minutes for large models...")
|
| 245 |
# Note: GPT OSS models are MXFP4 quantized out of the box
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 246 |
|
| 247 |
try:
|
| 248 |
model = AutoModelForCausalLM.from_pretrained(
|
| 249 |
model_id,
|
| 250 |
torch_dtype=torch.bfloat16,
|
| 251 |
-
|
|
|
|
| 252 |
)
|
| 253 |
model.eval()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 254 |
except Exception as e:
|
| 255 |
logger.error(f"Failed to load model: {e}")
|
| 256 |
-
logger.error("Trying with
|
| 257 |
# Fallback to simpler loading
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 264 |
|
| 265 |
# Generation configuration
|
| 266 |
generation_config = GenerationConfig(
|
|
|
|
| 236 |
if tokenizer.pad_token is None:
|
| 237 |
tokenizer.pad_token = tokenizer.eos_token
|
| 238 |
|
| 239 |
+
# Model loading configuration based on OpenAI cookbook
|
| 240 |
+
# For 20B model, standard auto device map works
|
| 241 |
+
# For 120B model, use tensor parallel planning
|
| 242 |
+
if "120b" in model_id:
|
| 243 |
+
model_kwargs = {
|
| 244 |
+
"device_map": {"tp_plan": "auto"},
|
| 245 |
+
"enable_expert_parallel": True,
|
| 246 |
+
}
|
| 247 |
+
else:
|
| 248 |
+
model_kwargs = {
|
| 249 |
+
"device_map": "auto",
|
| 250 |
+
}
|
| 251 |
|
| 252 |
# Load model
|
| 253 |
logger.info(f"Loading model: {model_id}")
|
| 254 |
logger.info("This may take a few minutes for large models...")
|
| 255 |
# Note: GPT OSS models are MXFP4 quantized out of the box
|
| 256 |
+
# The quantization will be dequantized to bf16 during loading
|
| 257 |
+
logger.info("Note: MXFP4 quantization will be dequantized to bf16 for inference")
|
| 258 |
+
|
| 259 |
+
# Check available GPU memory
|
| 260 |
+
if num_gpus > 0:
|
| 261 |
+
gpu_memory = torch.cuda.get_device_properties(0).total_memory / 1024**3
|
| 262 |
+
if gpu_memory < 40 and "20b" in model_id.lower():
|
| 263 |
+
logger.warning(f"GPU has {gpu_memory:.1f}GB but 20B model needs ~40GB when dequantized")
|
| 264 |
+
logger.warning("Consider using --flavor a10g-large (48GB) or a100-large (80GB)")
|
| 265 |
|
| 266 |
try:
|
| 267 |
model = AutoModelForCausalLM.from_pretrained(
|
| 268 |
model_id,
|
| 269 |
torch_dtype=torch.bfloat16,
|
| 270 |
+
attn_implementation="eager", # As per cookbook, avoid flash attention issues
|
| 271 |
+
**model_kwargs,
|
| 272 |
)
|
| 273 |
model.eval()
|
| 274 |
+
except torch.cuda.OutOfMemoryError as e:
|
| 275 |
+
logger.error(f"Out of memory error: {e}")
|
| 276 |
+
logger.error("\nThe GPT OSS models require significant memory:")
|
| 277 |
+
logger.error("- 20B model: ~40GB VRAM (use a10g-large or a100-large)")
|
| 278 |
+
logger.error("- 120B model: ~240GB VRAM (use 4xa100 or 8xl40s)")
|
| 279 |
+
sys.exit(1)
|
| 280 |
except Exception as e:
|
| 281 |
logger.error(f"Failed to load model: {e}")
|
| 282 |
+
logger.error("Trying with fallback configuration...")
|
| 283 |
# Fallback to simpler loading
|
| 284 |
+
try:
|
| 285 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 286 |
+
model_id,
|
| 287 |
+
torch_dtype="auto",
|
| 288 |
+
device_map="auto",
|
| 289 |
+
attn_implementation="eager",
|
| 290 |
+
)
|
| 291 |
+
model.eval()
|
| 292 |
+
except Exception as fallback_error:
|
| 293 |
+
logger.error(f"Fallback also failed: {fallback_error}")
|
| 294 |
+
sys.exit(1)
|
| 295 |
|
| 296 |
# Generation configuration
|
| 297 |
generation_config = GenerationConfig(
|