Commit
·
d13896f
1
Parent(s):
11742a6
Add
Browse files- aliases.py +7 -0
- checkpoint.py +2022 -0
- config.json +130 -16
- config_molmoe.py +907 -88
- modeling_molmoe.py +0 -0
- pytorch_model.bin +2 -2
- util.py +785 -0
aliases.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from os import PathLike
|
| 2 |
+
from typing import Union
|
| 3 |
+
|
| 4 |
+
__all__ = ["PathOrStr"]
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
PathOrStr = Union[str, PathLike]
|
checkpoint.py
ADDED
|
@@ -0,0 +1,2022 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gc
|
| 2 |
+
import io
|
| 3 |
+
import logging
|
| 4 |
+
import pickle
|
| 5 |
+
import shutil
|
| 6 |
+
import traceback
|
| 7 |
+
from abc import ABCMeta, abstractmethod
|
| 8 |
+
from collections import defaultdict
|
| 9 |
+
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor, as_completed
|
| 10 |
+
from contextlib import contextmanager
|
| 11 |
+
from copy import deepcopy
|
| 12 |
+
from dataclasses import dataclass, field, replace
|
| 13 |
+
from functools import reduce
|
| 14 |
+
from multiprocessing import shared_memory
|
| 15 |
+
from pathlib import Path
|
| 16 |
+
from typing import Any, Dict, Generator, List, Optional, Set, Tuple, cast
|
| 17 |
+
|
| 18 |
+
import numpy as np
|
| 19 |
+
import torch
|
| 20 |
+
import torch.distributed.checkpoint as dist_cp
|
| 21 |
+
import torch.multiprocessing as mp
|
| 22 |
+
import torch.nn as nn
|
| 23 |
+
from packaging import version
|
| 24 |
+
from torch.distributed import _remote_device
|
| 25 |
+
from torch.distributed._shard._utils import narrow_tensor_by_index
|
| 26 |
+
from torch.distributed._shard.metadata import ShardMetadata
|
| 27 |
+
from torch.distributed._shard.sharded_tensor import ShardedTensor
|
| 28 |
+
from torch.distributed.checkpoint.filesystem import WriteResult, _StorageInfo
|
| 29 |
+
from torch.distributed.checkpoint.metadata import Metadata, MetadataIndex
|
| 30 |
+
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
|
| 31 |
+
from torch.distributed.checkpoint.planner import LoadItemType, ReadItem
|
| 32 |
+
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
|
| 33 |
+
from torch.distributed.fsdp import StateDictType
|
| 34 |
+
from torch.distributed.fsdp.api import (
|
| 35 |
+
FullOptimStateDictConfig,
|
| 36 |
+
FullStateDictConfig,
|
| 37 |
+
ShardedOptimStateDictConfig,
|
| 38 |
+
ShardedStateDictConfig,
|
| 39 |
+
)
|
| 40 |
+
from torch.futures import Future
|
| 41 |
+
from torch.nn.parallel import DistributedDataParallel as DDP
|
| 42 |
+
|
| 43 |
+
try:
|
| 44 |
+
from torch.distributed.fsdp.flat_param import FlatParamHandle # type: ignore
|
| 45 |
+
except ModuleNotFoundError:
|
| 46 |
+
from torch.distributed.fsdp._flat_param import FlatParamHandle # type: ignore
|
| 47 |
+
|
| 48 |
+
from olmo import util
|
| 49 |
+
|
| 50 |
+
from .aliases import PathOrStr
|
| 51 |
+
from .config import BaseConfig, ShardedCheckpointerType, TrainConfig
|
| 52 |
+
from .exceptions import OLMoCheckpointError
|
| 53 |
+
from .optim import Optimizer, fix_optim_state_dict
|
| 54 |
+
from .safetensors_util import safetensors_file_to_state_dict
|
| 55 |
+
from .torch_util import (
|
| 56 |
+
barrier,
|
| 57 |
+
gc_cuda,
|
| 58 |
+
get_fs_local_rank,
|
| 59 |
+
get_global_rank,
|
| 60 |
+
get_local_rank,
|
| 61 |
+
get_local_world_size,
|
| 62 |
+
get_world_size,
|
| 63 |
+
)
|
| 64 |
+
from .util import (
|
| 65 |
+
_get_s3_client,
|
| 66 |
+
default_thread_count,
|
| 67 |
+
dir_is_empty,
|
| 68 |
+
get_bytes_range,
|
| 69 |
+
get_progress_bar,
|
| 70 |
+
resource_path,
|
| 71 |
+
upload,
|
| 72 |
+
wait_for,
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
__all__ = [
|
| 76 |
+
"save_fsdp_model_and_optim_state",
|
| 77 |
+
"load_fsdp_model_and_optim_state",
|
| 78 |
+
"load_fsdp_optim_state",
|
| 79 |
+
"save_state_dict",
|
| 80 |
+
"load_state_dict",
|
| 81 |
+
"load_model_state",
|
| 82 |
+
"RemoteFileSystemWriter",
|
| 83 |
+
"RemoteFileSystemReader",
|
| 84 |
+
"Checkpointer",
|
| 85 |
+
"FullCheckpointer",
|
| 86 |
+
"TorchNewStyleShardedCheckpointer",
|
| 87 |
+
"TorchLegacyShardedCheckpointer",
|
| 88 |
+
"LocalShardedCheckpointer",
|
| 89 |
+
"build_sharded_checkpointer",
|
| 90 |
+
]
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
log = logging.getLogger(__name__)
|
| 94 |
+
|
| 95 |
+
MODEL_AND_OPTIM_FOLDER = "model_and_optim"
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def save_fsdp_model_and_optim_state(
|
| 99 |
+
checkpoint_dir: PathOrStr,
|
| 100 |
+
fsdp_model: FSDP,
|
| 101 |
+
optim: Optimizer,
|
| 102 |
+
*,
|
| 103 |
+
upload_to: Optional[str] = None,
|
| 104 |
+
save_overwrite: bool = False,
|
| 105 |
+
):
|
| 106 |
+
"""
|
| 107 |
+
Use this to save a state dict for an FSDP model and its optimizer via :module:`torch.distributed.checkpoint`
|
| 108 |
+
functions. This should be used during distributed training and should be called by all ranks.
|
| 109 |
+
|
| 110 |
+
:param checkpoint_dir: The directory to save to.
|
| 111 |
+
:param fsdp_model: The FSDP model.
|
| 112 |
+
:param optim: The FSDP model's optimizer.
|
| 113 |
+
:param upload_to: Optional, a remote "directory" to upload the checkpoint files to.
|
| 114 |
+
:param save_overwrite: Overwrite existing files.
|
| 115 |
+
|
| 116 |
+
:raises FileExistsError: If a model and optim checkpoint already exists in ``checkpoint_dir`` and ``save_overwrite=False``.
|
| 117 |
+
"""
|
| 118 |
+
checkpoint_dir = Path(checkpoint_dir)
|
| 119 |
+
target_dir = checkpoint_dir / MODEL_AND_OPTIM_FOLDER
|
| 120 |
+
if save_overwrite:
|
| 121 |
+
if get_fs_local_rank() == 0:
|
| 122 |
+
shutil.rmtree(target_dir, ignore_errors=True)
|
| 123 |
+
elif not dir_is_empty(target_dir):
|
| 124 |
+
raise FileExistsError(target_dir)
|
| 125 |
+
barrier()
|
| 126 |
+
if get_fs_local_rank() == 0:
|
| 127 |
+
target_dir.mkdir(exist_ok=True, parents=True)
|
| 128 |
+
barrier()
|
| 129 |
+
with FSDP.state_dict_type(
|
| 130 |
+
fsdp_model,
|
| 131 |
+
state_dict_type=StateDictType.SHARDED_STATE_DICT,
|
| 132 |
+
state_dict_config=ShardedStateDictConfig(offload_to_cpu=True),
|
| 133 |
+
optim_state_dict_config=ShardedOptimStateDictConfig(offload_to_cpu=True),
|
| 134 |
+
):
|
| 135 |
+
model_and_optim_state = {
|
| 136 |
+
"model": fsdp_model.state_dict(),
|
| 137 |
+
"optim": FSDP.optim_state_dict(fsdp_model, optim),
|
| 138 |
+
}
|
| 139 |
+
dist_cp.save_state_dict(
|
| 140 |
+
model_and_optim_state,
|
| 141 |
+
RemoteFileSystemWriter(
|
| 142 |
+
target_dir,
|
| 143 |
+
upload_to=None if upload_to is None else f"{upload_to.rstrip('/')}/{MODEL_AND_OPTIM_FOLDER}",
|
| 144 |
+
save_overwrite=save_overwrite,
|
| 145 |
+
),
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
def load_fsdp_model_and_optim_state(
|
| 150 |
+
checkpoint_dir: PathOrStr,
|
| 151 |
+
fsdp_model: FSDP,
|
| 152 |
+
optim: Optimizer,
|
| 153 |
+
*,
|
| 154 |
+
local_cache: Optional[PathOrStr] = None,
|
| 155 |
+
load_optimizer_state: bool = True,
|
| 156 |
+
):
|
| 157 |
+
"""
|
| 158 |
+
Use this to load a state dict for an FSDP model and its optimizer via :module:`torch.distributed.checkpoint`
|
| 159 |
+
functions. This should be used during distributed training and should be called by all ranks.
|
| 160 |
+
|
| 161 |
+
:param checkpoint_dir: The checkpoint directory to load from. This can be a local or remote directory.
|
| 162 |
+
:param fsdp_model: The FSDP model.
|
| 163 |
+
:param optim: The FSDP model's optimizer.
|
| 164 |
+
:param local_cache: A local cache of the checkpoint directory. Use this when the ``checkpoint_dir`` is a
|
| 165 |
+
remote "directory" but there might be a cached version of the same artifacts.
|
| 166 |
+
:param load_optimizer_state: Set to ``False`` to skip loading the optimizer state.
|
| 167 |
+
|
| 168 |
+
:raises FileNotFoundError: If the ``checkpoint_dir`` doesn't contain a model and optimizer checkpoint.
|
| 169 |
+
"""
|
| 170 |
+
load_path = str(checkpoint_dir).rstrip("/")
|
| 171 |
+
local_cache = None if local_cache is None else Path(local_cache)
|
| 172 |
+
with FSDP.state_dict_type(
|
| 173 |
+
fsdp_model,
|
| 174 |
+
state_dict_type=StateDictType.SHARDED_STATE_DICT,
|
| 175 |
+
state_dict_config=ShardedStateDictConfig(offload_to_cpu=True),
|
| 176 |
+
optim_state_dict_config=ShardedOptimStateDictConfig(offload_to_cpu=True),
|
| 177 |
+
):
|
| 178 |
+
# Load the model state dict in place.
|
| 179 |
+
log.info("Loading model state...")
|
| 180 |
+
model_state = {"model": fsdp_model.state_dict()}
|
| 181 |
+
dist_cp.load_state_dict(
|
| 182 |
+
model_state,
|
| 183 |
+
RemoteFileSystemReader(
|
| 184 |
+
f"{load_path}/{MODEL_AND_OPTIM_FOLDER}",
|
| 185 |
+
local_cache=None if local_cache is None else local_cache / MODEL_AND_OPTIM_FOLDER,
|
| 186 |
+
),
|
| 187 |
+
)
|
| 188 |
+
fsdp_model.load_state_dict(model_state["model"])
|
| 189 |
+
|
| 190 |
+
if not load_optimizer_state:
|
| 191 |
+
return
|
| 192 |
+
|
| 193 |
+
# Load optim state dict in place.
|
| 194 |
+
log.info("Loading sharded optimizer state...")
|
| 195 |
+
optim_state = load_sharded_optimizer_state_dict(
|
| 196 |
+
model_state_dict=model_state["model"],
|
| 197 |
+
optimizer_key="optim",
|
| 198 |
+
storage_reader=RemoteFileSystemReader(
|
| 199 |
+
f"{load_path}/{MODEL_AND_OPTIM_FOLDER}",
|
| 200 |
+
local_cache=None if local_cache is None else local_cache / MODEL_AND_OPTIM_FOLDER,
|
| 201 |
+
),
|
| 202 |
+
)
|
| 203 |
+
# optim_state["optim"] = {
|
| 204 |
+
# 'state': { fqn: { 'grad_norm_exp_avg': Tensor, 'step': Tensor, 'exp_avg': ShardedTensor, 'exp_avg_sq': ShardedTensor } },
|
| 205 |
+
# 'param_groups': [{ 'param_names': [ fsdp_fqn, ... ], 'params': [ fqn, ... ], ... }],
|
| 206 |
+
# }
|
| 207 |
+
del model_state
|
| 208 |
+
|
| 209 |
+
# Make sure tensors are on CPU! PyTorch puts them on GPU even though we have `offload_to_cpu=True`.
|
| 210 |
+
for state in optim_state["optim"]["state"].values():
|
| 211 |
+
for k in state.keys():
|
| 212 |
+
state[k] = state[k].cpu()
|
| 213 |
+
gc_cuda()
|
| 214 |
+
|
| 215 |
+
load_fsdp_optim_state(fsdp_model, optim, optim_state["optim"])
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
def load_fsdp_optim_state(fsdp_model: FSDP, optim: Optimizer, optim_state: Dict[str, Any]):
|
| 219 |
+
log.info("Flattening sharded optimizer state...")
|
| 220 |
+
# flattened_osd = {
|
| 221 |
+
# 'state': { id: { 'grad_norm_exp_avg': Tensor, 'step': Tensor, 'exp_avg': Tensor, 'exp_avg_sq': Tensor } },
|
| 222 |
+
# 'param_groups': [{ 'param_names': [ fsdp_fqn, ... ], 'params': [ id, ... ], ... }],
|
| 223 |
+
# }
|
| 224 |
+
# NOTE: Careful! The order of the these arguments has changed from 2.0 to 2.1... ¯\_(ツ)_/¯
|
| 225 |
+
if version.parse(torch.__version__) < version.parse("2.1.0"):
|
| 226 |
+
flattened_osd = FSDP.optim_state_dict_to_load(optim_state, fsdp_model, optim) # type: ignore
|
| 227 |
+
else:
|
| 228 |
+
flattened_osd = FSDP.optim_state_dict_to_load(fsdp_model, optim, optim_state) # type: ignore
|
| 229 |
+
|
| 230 |
+
del optim_state
|
| 231 |
+
gc_cuda()
|
| 232 |
+
|
| 233 |
+
log.info("Loading flattened optimizer state...")
|
| 234 |
+
|
| 235 |
+
# Put optim state on CPU since `Optimizer.load_state_dict()` will create a deepcopy of the whole state dict,
|
| 236 |
+
# which takes up unnecessary GPU memory.
|
| 237 |
+
for state in flattened_osd["state"].values():
|
| 238 |
+
for k in state.keys():
|
| 239 |
+
state[k] = state[k].cpu()
|
| 240 |
+
gc_cuda()
|
| 241 |
+
|
| 242 |
+
optim.load_state_dict(fix_optim_state_dict(optim, flattened_osd))
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
def save_state_dict(
|
| 246 |
+
checkpoint_dir: PathOrStr,
|
| 247 |
+
fname: str,
|
| 248 |
+
state_dict: Dict[str, Any],
|
| 249 |
+
*,
|
| 250 |
+
upload_to: Optional[str] = None,
|
| 251 |
+
save_overwrite: bool = False,
|
| 252 |
+
synchronize: bool = True,
|
| 253 |
+
):
|
| 254 |
+
"""
|
| 255 |
+
Save a regular state dict to the file ``fname`` within ``checkpoint_dir`` using :func:`torch.save()`.
|
| 256 |
+
This can be used during distributed training or not. If during distributed training the ``fname`` should be unique
|
| 257 |
+
for each rank.
|
| 258 |
+
|
| 259 |
+
:param checkpoint_dir: The directory to save to.
|
| 260 |
+
:param fname: The target file within ``checkpoint_dir`` to save to. This should be a path relative to the ``checkpoint_dir``.
|
| 261 |
+
:param state_dict: The state dict to save.
|
| 262 |
+
:param upload_to: Optional, a remote "directory" to upload the file to.
|
| 263 |
+
:param save_overwrite: Overwrite existing files.
|
| 264 |
+
:param synchronize: If ``False``, don't do any distributed synchronization. Use this when only calling
|
| 265 |
+
this function from a single rank.
|
| 266 |
+
|
| 267 |
+
:raises FileExistsError: If the ``fname`` already exists within ``checkpoint_dir`` and ``save_overwrite=False``.
|
| 268 |
+
"""
|
| 269 |
+
checkpoint_dir = Path(checkpoint_dir)
|
| 270 |
+
target_path = checkpoint_dir / fname
|
| 271 |
+
if save_overwrite:
|
| 272 |
+
target_path.unlink(missing_ok=True)
|
| 273 |
+
elif target_path.is_file():
|
| 274 |
+
raise FileExistsError(target_path)
|
| 275 |
+
if synchronize:
|
| 276 |
+
barrier()
|
| 277 |
+
target_path.parent.mkdir(exist_ok=True, parents=True)
|
| 278 |
+
if synchronize:
|
| 279 |
+
barrier()
|
| 280 |
+
torch.save(state_dict, target_path)
|
| 281 |
+
if upload_to is not None:
|
| 282 |
+
upload_target = f"{upload_to.rstrip('/')}/{fname}"
|
| 283 |
+
log.info(f"Uploading {target_path} to {upload_target}...")
|
| 284 |
+
upload(target_path, upload_target, save_overwrite=save_overwrite)
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
def load_state_dict(
|
| 288 |
+
checkpoint_dir: PathOrStr,
|
| 289 |
+
fname: str,
|
| 290 |
+
*,
|
| 291 |
+
local_cache: Optional[PathOrStr] = None,
|
| 292 |
+
map_location: Optional[str] = None,
|
| 293 |
+
):
|
| 294 |
+
"""
|
| 295 |
+
Load a regular state dict from the file ``fname`` within ``checkpoint_dir`` using :func:`torch.load()`.
|
| 296 |
+
This can be used during distributed training or not.
|
| 297 |
+
|
| 298 |
+
:param checkpoint_dir: A local or remote checkpoint directory.
|
| 299 |
+
:param fname: The target file within the ``checkpoint_dir``. This should be a path relative to the ``checkpoint_dir``.
|
| 300 |
+
:param local_cache: A local cache of the checkpoint directory. Use this when the ``checkpoint_dir`` is a
|
| 301 |
+
remote "directory" but there might be a cached version of the same artifacts.
|
| 302 |
+
|
| 303 |
+
:raises FileNotFoundError: If ``fname`` doesn't exist in the ``checkpoint_dir`` or the local cache.
|
| 304 |
+
"""
|
| 305 |
+
if fname.endswith(".pt"):
|
| 306 |
+
# Try safetensors version first.
|
| 307 |
+
try:
|
| 308 |
+
path = resource_path(
|
| 309 |
+
str(checkpoint_dir).rstrip("/"), fname[:-2] + "safetensors", local_cache=local_cache
|
| 310 |
+
)
|
| 311 |
+
return safetensors_file_to_state_dict(path, map_location=map_location)
|
| 312 |
+
except FileNotFoundError:
|
| 313 |
+
pass
|
| 314 |
+
|
| 315 |
+
path = resource_path(str(checkpoint_dir).rstrip("/"), fname, local_cache=local_cache)
|
| 316 |
+
return torch.load(path, map_location=map_location)
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
def load_model_state(checkpoint_dir: PathOrStr, model: torch.nn.Module):
|
| 320 |
+
"""
|
| 321 |
+
Load model state from a distributed FSDP model checkpoint created from :func:`save_fsdp_model_and_optim_state()`.
|
| 322 |
+
Note that ``model`` should not be wrapped with FSDP.
|
| 323 |
+
"""
|
| 324 |
+
state_dict = {"model": model.state_dict()}
|
| 325 |
+
dist_cp.load_state_dict(
|
| 326 |
+
state_dict,
|
| 327 |
+
RemoteFileSystemReader(f"{str(checkpoint_dir).rstrip('/')}/{MODEL_AND_OPTIM_FOLDER}"),
|
| 328 |
+
no_dist=True,
|
| 329 |
+
)
|
| 330 |
+
model.load_state_dict(state_dict["model"])
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
class RemoteFileSystemWriter(dist_cp.FileSystemWriter):
|
| 334 |
+
"""
|
| 335 |
+
A subclass of :class:`~torch.distributed.checkpoint.FileSystemWriter` that can upload files
|
| 336 |
+
directly to a cloud bucket when ``upload_to`` is specified.
|
| 337 |
+
"""
|
| 338 |
+
|
| 339 |
+
def __init__(
|
| 340 |
+
self,
|
| 341 |
+
path: PathOrStr,
|
| 342 |
+
single_file_per_rank: bool = True,
|
| 343 |
+
sync_files: bool = True,
|
| 344 |
+
thread_count: Optional[int] = None,
|
| 345 |
+
per_thread_copy_ahead: int = 10_000_000,
|
| 346 |
+
upload_to: Optional[str] = None,
|
| 347 |
+
save_overwrite: bool = False,
|
| 348 |
+
) -> None:
|
| 349 |
+
if thread_count is not None and thread_count <= 0:
|
| 350 |
+
raise ValueError("thread count must be at least 1")
|
| 351 |
+
super().__init__(
|
| 352 |
+
path,
|
| 353 |
+
single_file_per_rank=single_file_per_rank,
|
| 354 |
+
sync_files=sync_files,
|
| 355 |
+
# NOTE: we default to 1 thread here instead of whatever `default_thread_count()`
|
| 356 |
+
# returns because uploading big checkpoint files with multiple threads causes
|
| 357 |
+
# boto3 to fail in weird ways.
|
| 358 |
+
thread_count=thread_count or 1,
|
| 359 |
+
per_thread_copy_ahead=per_thread_copy_ahead,
|
| 360 |
+
)
|
| 361 |
+
self.upload_to = None if upload_to is None else upload_to.rstrip("/")
|
| 362 |
+
self.save_overwrite = save_overwrite
|
| 363 |
+
|
| 364 |
+
def write_data(
|
| 365 |
+
self,
|
| 366 |
+
plan: dist_cp.SavePlan,
|
| 367 |
+
planner: dist_cp.SavePlanner,
|
| 368 |
+
) -> Future[List[WriteResult]]:
|
| 369 |
+
fut = super().write_data(plan, planner)
|
| 370 |
+
if self.upload_to is not None:
|
| 371 |
+
files_to_upload = set()
|
| 372 |
+
for write_result in fut.wait():
|
| 373 |
+
files_to_upload.add(write_result.storage_data.relative_path)
|
| 374 |
+
|
| 375 |
+
# Create the global S3 client up front to work around a threading issue in boto.
|
| 376 |
+
if self.upload_to.startswith("s3://"):
|
| 377 |
+
_get_s3_client("s3")
|
| 378 |
+
elif self.upload_to.startswith("r2://"):
|
| 379 |
+
_get_s3_client("r2")
|
| 380 |
+
elif self.upload_to.startswith("weka://"):
|
| 381 |
+
_get_s3_client("weka")
|
| 382 |
+
|
| 383 |
+
with ThreadPoolExecutor(max_workers=self.thread_count) as executor:
|
| 384 |
+
futures = []
|
| 385 |
+
for fname in files_to_upload:
|
| 386 |
+
source = self.path / fname
|
| 387 |
+
target = f"{self.upload_to}/{fname}"
|
| 388 |
+
log.info(f"Uploading {source} to {target}...")
|
| 389 |
+
futures.append(executor.submit(upload, source, target, save_overwrite=self.save_overwrite))
|
| 390 |
+
for f in as_completed(futures):
|
| 391 |
+
try:
|
| 392 |
+
f.result()
|
| 393 |
+
except BaseException:
|
| 394 |
+
# NOTE: we might get an error here that can't be pickled, which causes a different failure
|
| 395 |
+
# later when PyTorch tries to reduce that error across ranks. So here we just make
|
| 396 |
+
# sure we're raising a simple error type that can be pickled.
|
| 397 |
+
raise OLMoCheckpointError(f"Original error:\n{traceback.format_exc()}")
|
| 398 |
+
return fut
|
| 399 |
+
|
| 400 |
+
def finish(self, metadata: Metadata, results: List[List[WriteResult]]) -> None:
|
| 401 |
+
super().finish(metadata, results)
|
| 402 |
+
if self.upload_to is not None:
|
| 403 |
+
source = self.path / ".metadata"
|
| 404 |
+
target = f"{self.upload_to}/.metadata"
|
| 405 |
+
log.info(f"Uploading {source} to {target}...")
|
| 406 |
+
upload(source, target, save_overwrite=self.save_overwrite)
|
| 407 |
+
|
| 408 |
+
|
| 409 |
+
class RemoteFileSystemReader(dist_cp.StorageReader):
|
| 410 |
+
"""
|
| 411 |
+
A :class:`~torch.distributed.checkpoint.StorageReader` based on :class:`~torch.distributed.checkpoint.FileSystemReader`
|
| 412 |
+
that can read data directly from cloud storage as well as a local directory.
|
| 413 |
+
"""
|
| 414 |
+
|
| 415 |
+
def __init__(
|
| 416 |
+
self, path: PathOrStr, *, local_cache: Optional[PathOrStr] = None, thread_count: Optional[int] = None
|
| 417 |
+
):
|
| 418 |
+
super().__init__()
|
| 419 |
+
if thread_count is not None and thread_count <= 0:
|
| 420 |
+
raise ValueError("thread count must be at least 1")
|
| 421 |
+
self.path = str(path).rstrip("/")
|
| 422 |
+
self.cache = None if local_cache is None else Path(local_cache)
|
| 423 |
+
self.thread_count = thread_count or default_thread_count()
|
| 424 |
+
self.storage_data: Dict[MetadataIndex, _StorageInfo] = dict()
|
| 425 |
+
self._metadata: Optional[Metadata] = None
|
| 426 |
+
|
| 427 |
+
def _get_bytes(self, relative_path: str, offset: int, length: int) -> bytes:
|
| 428 |
+
if self.cache is not None and (path := self.cache / relative_path).is_file():
|
| 429 |
+
return get_bytes_range(path, offset, length)
|
| 430 |
+
else:
|
| 431 |
+
return get_bytes_range(f"{self.path}/{relative_path}", offset, length)
|
| 432 |
+
|
| 433 |
+
def _get_content_for_read(self, read_item: ReadItem) -> Tuple[ReadItem, bytes]:
|
| 434 |
+
sinfo = self.storage_data[read_item.storage_index]
|
| 435 |
+
content = self._get_bytes(sinfo.relative_path, sinfo.offset, sinfo.length)
|
| 436 |
+
return (read_item, content)
|
| 437 |
+
|
| 438 |
+
def read_data(self, plan: dist_cp.LoadPlan, planner: dist_cp.LoadPlanner) -> Future[None]:
|
| 439 |
+
# Create the global S3 client up front to work around a threading issue in boto.
|
| 440 |
+
if isinstance(self.path, str):
|
| 441 |
+
if self.path.startswith("s3://"):
|
| 442 |
+
_get_s3_client("s3")
|
| 443 |
+
elif self.path.startswith("r2://"):
|
| 444 |
+
_get_s3_client("r2")
|
| 445 |
+
elif self.path.startswith("weka://"):
|
| 446 |
+
_get_s3_client("weka")
|
| 447 |
+
|
| 448 |
+
with ThreadPoolExecutor(max_workers=self.thread_count) as executor:
|
| 449 |
+
read_item_content_futures = []
|
| 450 |
+
for read_item in plan.items:
|
| 451 |
+
read_item_content_futures.append(executor.submit(self._get_content_for_read, read_item))
|
| 452 |
+
read_item_content_results = []
|
| 453 |
+
for f in as_completed(read_item_content_futures):
|
| 454 |
+
try:
|
| 455 |
+
read_item_content_results.append(f.result())
|
| 456 |
+
except BaseException:
|
| 457 |
+
# NOTE: we might get an error here that can't be pickled, which causes a different failure
|
| 458 |
+
# later when PyTorch tries to reduce that error across ranks. So here we just make
|
| 459 |
+
# sure we're raising a simple error type that can be pickled.
|
| 460 |
+
raise OLMoCheckpointError(f"Original error:\n{traceback.format_exc()}")
|
| 461 |
+
|
| 462 |
+
# Modified from `FileSystemReader.read_data()`
|
| 463 |
+
for read_item, content in read_item_content_results:
|
| 464 |
+
bytes = io.BytesIO(content)
|
| 465 |
+
bytes.seek(0)
|
| 466 |
+
if read_item.type == LoadItemType.BYTE_IO:
|
| 467 |
+
planner.load_bytes(read_item, bytes)
|
| 468 |
+
else:
|
| 469 |
+
tensor = cast(torch.Tensor, torch.load(bytes, map_location="cpu"))
|
| 470 |
+
tensor = narrow_tensor_by_index(tensor, read_item.storage_offsets, read_item.lengths)
|
| 471 |
+
target_tensor = planner.resolve_tensor(read_item).detach()
|
| 472 |
+
|
| 473 |
+
assert (
|
| 474 |
+
target_tensor.size() == tensor.size()
|
| 475 |
+
), f"req {read_item.storage_index} mismatch sizes {target_tensor.size()} vs {tensor.size()}"
|
| 476 |
+
target_tensor.copy_(tensor)
|
| 477 |
+
planner.commit_tensor(read_item, target_tensor)
|
| 478 |
+
|
| 479 |
+
fut: Future = Future()
|
| 480 |
+
fut.set_result(None)
|
| 481 |
+
return fut
|
| 482 |
+
|
| 483 |
+
def read_metadata(self) -> Metadata:
|
| 484 |
+
if self._metadata is None:
|
| 485 |
+
with resource_path(self.path, ".metadata", local_cache=self.cache).open("rb") as metadata_file:
|
| 486 |
+
self._metadata = pickle.load(metadata_file)
|
| 487 |
+
return self._metadata
|
| 488 |
+
|
| 489 |
+
def set_up_storage_reader(self, metadata: Metadata, is_coordinator: bool) -> None:
|
| 490 |
+
del is_coordinator
|
| 491 |
+
self.storage_data = metadata.storage_data
|
| 492 |
+
assert self.storage_data is not None
|
| 493 |
+
|
| 494 |
+
def prepare_local_plan(self, plan: dist_cp.LoadPlan) -> dist_cp.LoadPlan:
|
| 495 |
+
return plan
|
| 496 |
+
|
| 497 |
+
def prepare_global_plan(self, global_plan: List[dist_cp.LoadPlan]) -> List[dist_cp.LoadPlan]:
|
| 498 |
+
return global_plan
|
| 499 |
+
|
| 500 |
+
|
| 501 |
+
class Checkpointer(metaclass=ABCMeta):
|
| 502 |
+
def __init__(self, cfg: TrainConfig, thread_count: Optional[int] = None):
|
| 503 |
+
self.cfg = cfg
|
| 504 |
+
self.thread_count = thread_count or default_thread_count()
|
| 505 |
+
|
| 506 |
+
@abstractmethod
|
| 507 |
+
def save_checkpoint(
|
| 508 |
+
self,
|
| 509 |
+
dir: PathOrStr,
|
| 510 |
+
dist_model: nn.Module,
|
| 511 |
+
optim: Optimizer,
|
| 512 |
+
train_state: Dict[str, Any],
|
| 513 |
+
*,
|
| 514 |
+
upload_to: Optional[str] = None,
|
| 515 |
+
) -> None:
|
| 516 |
+
raise NotImplementedError
|
| 517 |
+
|
| 518 |
+
@abstractmethod
|
| 519 |
+
def restore_checkpoint(
|
| 520 |
+
self,
|
| 521 |
+
load_path: PathOrStr,
|
| 522 |
+
dist_model: nn.Module,
|
| 523 |
+
optim: Optimizer,
|
| 524 |
+
*,
|
| 525 |
+
local_cache: Optional[PathOrStr] = None,
|
| 526 |
+
load_optimizer_state: bool = True,
|
| 527 |
+
) -> Dict[str, Any]:
|
| 528 |
+
"""
|
| 529 |
+
Restores a checkpoint to the model and optimizer. Returns the remaining trainer state.
|
| 530 |
+
"""
|
| 531 |
+
raise NotImplementedError
|
| 532 |
+
|
| 533 |
+
def unshard_checkpoint(
|
| 534 |
+
self,
|
| 535 |
+
load_path: PathOrStr,
|
| 536 |
+
*,
|
| 537 |
+
local_cache: Optional[PathOrStr] = None,
|
| 538 |
+
load_optimizer_state: bool = True,
|
| 539 |
+
load_trainer_state: bool = True,
|
| 540 |
+
device: Optional[torch.device] = None,
|
| 541 |
+
) -> Tuple[Dict[str, torch.Tensor], Optional[Dict[str, Any]], Optional[Dict[str, Any]]]:
|
| 542 |
+
"""
|
| 543 |
+
Unshard a checkpoint.
|
| 544 |
+
|
| 545 |
+
Note this is not marked abstract because child classes are not required to implemented this.
|
| 546 |
+
"""
|
| 547 |
+
raise NotImplementedError
|
| 548 |
+
|
| 549 |
+
@contextmanager
|
| 550 |
+
def _temporary_wd(self, dir: PathOrStr) -> Generator[Path, None, None]:
|
| 551 |
+
# Make sure checkpoint directory doesn't exist unless it's okay to overwrite it.
|
| 552 |
+
checkpoint_dir = Path(dir)
|
| 553 |
+
if not dir_is_empty(checkpoint_dir):
|
| 554 |
+
if self.cfg.save_overwrite:
|
| 555 |
+
if get_fs_local_rank() == 0:
|
| 556 |
+
shutil.rmtree(checkpoint_dir, ignore_errors=True)
|
| 557 |
+
else:
|
| 558 |
+
raise FileExistsError(checkpoint_dir)
|
| 559 |
+
# No need to mkdir here since we'll directly replace the temporary directory with
|
| 560 |
+
# this directory below.
|
| 561 |
+
barrier()
|
| 562 |
+
|
| 563 |
+
# Prepare temporary directory. We don't have to be as careful here, we can
|
| 564 |
+
# just remove it if it already exists.
|
| 565 |
+
checkpoint_dir_tmp = checkpoint_dir.with_name(checkpoint_dir.name + "-tmp")
|
| 566 |
+
if get_fs_local_rank() == 0:
|
| 567 |
+
shutil.rmtree(checkpoint_dir_tmp, ignore_errors=True)
|
| 568 |
+
checkpoint_dir_tmp.mkdir(exist_ok=True, parents=True)
|
| 569 |
+
|
| 570 |
+
# In the cases where we're using a shared NFS drive between ranks to save checkpoints,
|
| 571 |
+
# creating the temp directory from rank 0 might not be immediately
|
| 572 |
+
# realized in the file systems of the other ranks.
|
| 573 |
+
# So we wait here across all ranks until that tmp checkpoint directory is visible.
|
| 574 |
+
wait_for(lambda: checkpoint_dir_tmp.exists(), "Waiting for checkpoint directory", timeout=10.0)
|
| 575 |
+
|
| 576 |
+
barrier()
|
| 577 |
+
|
| 578 |
+
# Yield temporary directory for `.save_checkpoint()` to use.
|
| 579 |
+
yield checkpoint_dir_tmp
|
| 580 |
+
|
| 581 |
+
barrier()
|
| 582 |
+
|
| 583 |
+
# Finally if all went well replace the temporary directory with the actual
|
| 584 |
+
# checkpoint directory.
|
| 585 |
+
if get_fs_local_rank() == 0:
|
| 586 |
+
# Replace temp directory with target checkpoint directory.
|
| 587 |
+
try:
|
| 588 |
+
checkpoint_dir_tmp.replace(checkpoint_dir)
|
| 589 |
+
except FileNotFoundError:
|
| 590 |
+
# Caught when another (file-system) local rank 0 has already replaced the tmp directory.
|
| 591 |
+
# This can happen when nodes are saving to a common NFS drive but otherwise have distinct
|
| 592 |
+
# file-systems.
|
| 593 |
+
if not checkpoint_dir.exists():
|
| 594 |
+
raise
|
| 595 |
+
|
| 596 |
+
# In the cases where we're using a shared NFS drive between ranks to save checkpoints,
|
| 597 |
+
# replacing the temp directory with the final directory from rank 0 might not be immediately
|
| 598 |
+
# realized in the file systems of the other ranks.
|
| 599 |
+
# So we wait here across all ranks until that final checkpoint directory is visible.
|
| 600 |
+
wait_for(lambda: checkpoint_dir.exists(), "Waiting for checkpoint directory", timeout=10.0)
|
| 601 |
+
|
| 602 |
+
barrier()
|
| 603 |
+
|
| 604 |
+
def _save_config(self, dir: PathOrStr, *, upload_to: Optional[str] = None) -> None:
|
| 605 |
+
if get_global_rank() == 0:
|
| 606 |
+
log.info("Saving config...")
|
| 607 |
+
self.cfg.save(config_path := Path(dir) / "config.yaml")
|
| 608 |
+
if upload_to is not None:
|
| 609 |
+
upload_target = f"{upload_to}/config.yaml"
|
| 610 |
+
log.info(f"Uploading {config_path} to {upload_target}")
|
| 611 |
+
upload(config_path, upload_target, save_overwrite=self.cfg.save_overwrite)
|
| 612 |
+
|
| 613 |
+
|
| 614 |
+
class FullCheckpointer(Checkpointer):
|
| 615 |
+
"""
|
| 616 |
+
A :class:`Checkpointer` that saves a single full model and optimizer state dictionary.
|
| 617 |
+
"""
|
| 618 |
+
|
| 619 |
+
def save_checkpoint(
|
| 620 |
+
self,
|
| 621 |
+
dir: PathOrStr,
|
| 622 |
+
dist_model: nn.Module,
|
| 623 |
+
optim: Optimizer,
|
| 624 |
+
trainer_state: Dict[str, Any],
|
| 625 |
+
*,
|
| 626 |
+
upload_to: Optional[str] = None,
|
| 627 |
+
) -> None:
|
| 628 |
+
with self._temporary_wd(dir) as checkpoint_dir:
|
| 629 |
+
if isinstance(dist_model, FSDP):
|
| 630 |
+
with FSDP.state_dict_type(
|
| 631 |
+
dist_model,
|
| 632 |
+
state_dict_type=StateDictType.FULL_STATE_DICT,
|
| 633 |
+
state_dict_config=FullStateDictConfig(rank0_only=True, offload_to_cpu=True),
|
| 634 |
+
optim_state_dict_config=FullOptimStateDictConfig(rank0_only=True, offload_to_cpu=True),
|
| 635 |
+
):
|
| 636 |
+
# We'll write the model and optimizer state dicts individually to reduce (CPU) memory consumption.
|
| 637 |
+
# First the model state.
|
| 638 |
+
model_state_dict = dist_model.state_dict()
|
| 639 |
+
self._write_model_dict(
|
| 640 |
+
model_state_dict, checkpoint_dir, upload_to, save_overwrite=self.cfg.save_overwrite
|
| 641 |
+
)
|
| 642 |
+
|
| 643 |
+
# Then the optimizer state.
|
| 644 |
+
optim_state_dict = FSDP.optim_state_dict(dist_model, optim)
|
| 645 |
+
self._write_optim_dict(
|
| 646 |
+
optim_state_dict, checkpoint_dir, upload_to, save_overwrite=self.cfg.save_overwrite
|
| 647 |
+
)
|
| 648 |
+
elif isinstance(dist_model, DDP):
|
| 649 |
+
# _write_model_dict and _write_optim_dict only write checkpoints for rank 0
|
| 650 |
+
# First, get the model state dict from DDP wrapped model
|
| 651 |
+
model_state_dict = dist_model.module.state_dict()
|
| 652 |
+
self._write_model_dict(
|
| 653 |
+
model_state_dict, checkpoint_dir, upload_to, save_overwrite=self.cfg.save_overwrite
|
| 654 |
+
)
|
| 655 |
+
|
| 656 |
+
# Then get the optimizer state dict
|
| 657 |
+
optim_state_dict = optim.state_dict()
|
| 658 |
+
self._write_optim_dict(
|
| 659 |
+
optim_state_dict, checkpoint_dir, upload_to, save_overwrite=self.cfg.save_overwrite
|
| 660 |
+
)
|
| 661 |
+
else:
|
| 662 |
+
log.info(
|
| 663 |
+
"`FullCheckpointer.save_checkpoint` only supported for FSDP and DDP distributed strategies!"
|
| 664 |
+
)
|
| 665 |
+
|
| 666 |
+
# Save trainer state.
|
| 667 |
+
if get_global_rank() == 0:
|
| 668 |
+
log.info("Saving trainer state...")
|
| 669 |
+
save_state_dict(
|
| 670 |
+
checkpoint_dir,
|
| 671 |
+
"train.pt",
|
| 672 |
+
trainer_state,
|
| 673 |
+
upload_to=upload_to,
|
| 674 |
+
save_overwrite=self.cfg.save_overwrite,
|
| 675 |
+
synchronize=False,
|
| 676 |
+
)
|
| 677 |
+
# Save config.
|
| 678 |
+
self._save_config(checkpoint_dir, upload_to=upload_to)
|
| 679 |
+
|
| 680 |
+
def restore_checkpoint(
|
| 681 |
+
self,
|
| 682 |
+
load_path: PathOrStr,
|
| 683 |
+
dist_model: nn.Module,
|
| 684 |
+
optim: Optimizer,
|
| 685 |
+
*,
|
| 686 |
+
local_cache: Optional[PathOrStr] = None,
|
| 687 |
+
load_optimizer_state: bool = True,
|
| 688 |
+
) -> Dict[str, Any]:
|
| 689 |
+
if isinstance(dist_model, FSDP):
|
| 690 |
+
with FSDP.state_dict_type(
|
| 691 |
+
dist_model,
|
| 692 |
+
state_dict_type=StateDictType.FULL_STATE_DICT,
|
| 693 |
+
state_dict_config=FullStateDictConfig(rank0_only=False, offload_to_cpu=True),
|
| 694 |
+
optim_state_dict_config=FullOptimStateDictConfig(rank0_only=False, offload_to_cpu=True),
|
| 695 |
+
):
|
| 696 |
+
with torch.no_grad():
|
| 697 |
+
# fill everything with NaN, so we can check afterwards that every parameter has been restored
|
| 698 |
+
for module_name, module in dist_model.named_modules():
|
| 699 |
+
if not isinstance(module, FSDP):
|
| 700 |
+
continue
|
| 701 |
+
for param in module.params:
|
| 702 |
+
param.fill_(torch.nan)
|
| 703 |
+
|
| 704 |
+
# restore params from checkpoint
|
| 705 |
+
state_dict_to_load = load_state_dict(
|
| 706 |
+
load_path, "model.pt", local_cache=local_cache, map_location="cpu"
|
| 707 |
+
)
|
| 708 |
+
(
|
| 709 |
+
state_dict_to_load,
|
| 710 |
+
og_keys_to_new,
|
| 711 |
+
) = dist_model._fsdp_wrapped_module._make_state_dict_compatible(state_dict_to_load)
|
| 712 |
+
|
| 713 |
+
for module_name, module in dist_model.named_modules():
|
| 714 |
+
if not isinstance(module, FSDP):
|
| 715 |
+
continue
|
| 716 |
+
for param in module.params:
|
| 717 |
+
assert param._is_flat_param
|
| 718 |
+
for fqn, spi in zip(param._fqns, param._shard_param_infos):
|
| 719 |
+
if not spi.in_shard:
|
| 720 |
+
continue
|
| 721 |
+
key = f"{module_name}.{fqn}"
|
| 722 |
+
key = key.replace("_fsdp_wrapped_module.", "")
|
| 723 |
+
key = key.lstrip(".")
|
| 724 |
+
t = state_dict_to_load[key]
|
| 725 |
+
t = t.flatten()
|
| 726 |
+
param[spi.offset_in_shard : spi.offset_in_shard + spi.numel_in_shard].copy_(
|
| 727 |
+
t[spi.intra_param_start_idx : spi.intra_param_end_idx + 1]
|
| 728 |
+
)
|
| 729 |
+
|
| 730 |
+
# make sure that every parameter has been restored
|
| 731 |
+
for module_name, module in dist_model.named_modules():
|
| 732 |
+
if not isinstance(module, FSDP):
|
| 733 |
+
continue
|
| 734 |
+
for param in module.params:
|
| 735 |
+
if torch.isnan(param).any():
|
| 736 |
+
raise ValueError(
|
| 737 |
+
f"Module '{module_name}' contains NaNs, this is likely a bug restoring from full checkpoints"
|
| 738 |
+
)
|
| 739 |
+
|
| 740 |
+
# Load optimizer state.
|
| 741 |
+
if load_optimizer_state:
|
| 742 |
+
optim_state_dict_to_load = load_state_dict(
|
| 743 |
+
load_path, "optim.pt", local_cache=local_cache, map_location="cpu"
|
| 744 |
+
)
|
| 745 |
+
optim_state_dict_to_load = self._make_optim_state_dict_compatible(
|
| 746 |
+
optim_state_dict_to_load,
|
| 747 |
+
og_keys_to_new,
|
| 748 |
+
)
|
| 749 |
+
gc.collect()
|
| 750 |
+
torch.cuda.empty_cache()
|
| 751 |
+
barrier()
|
| 752 |
+
for turn in range(get_local_world_size()):
|
| 753 |
+
log.info("Loading optimizer state turn %d ...", turn)
|
| 754 |
+
if turn == get_local_rank():
|
| 755 |
+
load_fsdp_optim_state(dist_model, optim, optim_state_dict_to_load)
|
| 756 |
+
gc.collect()
|
| 757 |
+
torch.cuda.empty_cache()
|
| 758 |
+
barrier()
|
| 759 |
+
del optim_state_dict_to_load
|
| 760 |
+
elif isinstance(dist_model, DDP):
|
| 761 |
+
# Load model state.
|
| 762 |
+
with torch.no_grad():
|
| 763 |
+
state_dict_to_load = load_state_dict(
|
| 764 |
+
load_path, "model.pt", local_cache=local_cache, map_location="cpu"
|
| 765 |
+
)
|
| 766 |
+
dist_model.module.load_state_dict(state_dict_to_load, strict=True)
|
| 767 |
+
|
| 768 |
+
# Load optimizer state.
|
| 769 |
+
if load_optimizer_state:
|
| 770 |
+
optim_state_dict_to_load = load_state_dict(
|
| 771 |
+
load_path, "optim.pt", local_cache=local_cache, map_location="cpu"
|
| 772 |
+
)
|
| 773 |
+
optim.load_state_dict(optim_state_dict_to_load)
|
| 774 |
+
|
| 775 |
+
gc.collect()
|
| 776 |
+
torch.cuda.empty_cache()
|
| 777 |
+
barrier()
|
| 778 |
+
else:
|
| 779 |
+
raise NotImplementedError(
|
| 780 |
+
"`FullCheckpointer.restore_checkpoint` only supported for FSDP and DDP distributed strategies!"
|
| 781 |
+
)
|
| 782 |
+
|
| 783 |
+
# Load other state.
|
| 784 |
+
try:
|
| 785 |
+
trainer_state = load_state_dict(load_path, "train.pt", local_cache=local_cache)
|
| 786 |
+
except FileNotFoundError:
|
| 787 |
+
# for backwards compatibility
|
| 788 |
+
trainer_state = load_state_dict(load_path, "other.pt", local_cache=local_cache)
|
| 789 |
+
barrier()
|
| 790 |
+
return trainer_state
|
| 791 |
+
|
| 792 |
+
def _write_model_dict(self, model_state_dict, checkpoint_dir, upload_to, save_overwrite):
|
| 793 |
+
if get_global_rank() == 0:
|
| 794 |
+
log.info("Saving model state...")
|
| 795 |
+
save_state_dict(
|
| 796 |
+
checkpoint_dir,
|
| 797 |
+
"model.pt",
|
| 798 |
+
model_state_dict,
|
| 799 |
+
upload_to=upload_to,
|
| 800 |
+
save_overwrite=save_overwrite,
|
| 801 |
+
synchronize=False,
|
| 802 |
+
)
|
| 803 |
+
|
| 804 |
+
del model_state_dict
|
| 805 |
+
barrier()
|
| 806 |
+
|
| 807 |
+
def _write_optim_dict(self, optim_state_dict, checkpoint_dir, upload_to, save_overwrite):
|
| 808 |
+
if get_global_rank() == 0:
|
| 809 |
+
log.info("Saving optim state...")
|
| 810 |
+
save_state_dict(
|
| 811 |
+
checkpoint_dir,
|
| 812 |
+
"optim.pt",
|
| 813 |
+
optim_state_dict,
|
| 814 |
+
upload_to=upload_to,
|
| 815 |
+
save_overwrite=save_overwrite,
|
| 816 |
+
synchronize=False,
|
| 817 |
+
)
|
| 818 |
+
|
| 819 |
+
del optim_state_dict
|
| 820 |
+
barrier()
|
| 821 |
+
|
| 822 |
+
def _make_optim_state_dict_compatible(
|
| 823 |
+
self, optim_state_dict: Dict[str, Any], og_keys_to_new: Dict[str, Set[str]]
|
| 824 |
+
) -> Dict[str, Any]:
|
| 825 |
+
# This state dict comes in two forms: one where the state keys are integers and one where the
|
| 826 |
+
# keys are fully qualified parameter names. The latter case is easier to deal with here so we
|
| 827 |
+
# first transform the integer key form into the FQN key form.
|
| 828 |
+
if isinstance(optim_state_dict["param_groups"][0]["params"][0], int):
|
| 829 |
+
id_to_fqn: Dict[int, str] = {}
|
| 830 |
+
for group in optim_state_dict["param_groups"]:
|
| 831 |
+
new_param_names = []
|
| 832 |
+
for fqn, id in zip(group["param_names"], group["params"]):
|
| 833 |
+
fqn = fqn.replace("_fsdp_wrapped_module.", "")
|
| 834 |
+
id_to_fqn[id] = fqn
|
| 835 |
+
new_param_names.append(fqn)
|
| 836 |
+
group["param_names"] = new_param_names
|
| 837 |
+
group["params"] = new_param_names
|
| 838 |
+
for id in list(optim_state_dict["state"].keys()):
|
| 839 |
+
optim_state_dict["state"][id_to_fqn[id]] = optim_state_dict["state"].pop(id)
|
| 840 |
+
else:
|
| 841 |
+
# Otherwise we still want to clean up the param names to remove the "_fsdp_wrapped_module." prefix.
|
| 842 |
+
for group in optim_state_dict["param_groups"]:
|
| 843 |
+
group["param_names"] = [fqn.replace("_fsdp_wrapped_module.", "") for fqn in group["param_names"]]
|
| 844 |
+
group["params"] = [fqn.replace("_fsdp_wrapped_module.", "") for fqn in group["params"]]
|
| 845 |
+
assert group["param_names"] == group["params"]
|
| 846 |
+
for key in list(optim_state_dict["state"].keys()):
|
| 847 |
+
optim_state_dict["state"][key.replace("_fsdp_wrapped_module.", "")] = optim_state_dict[
|
| 848 |
+
"state"
|
| 849 |
+
].pop(key)
|
| 850 |
+
|
| 851 |
+
# Now we can transform the state dict by renaming parameters according to `og_keys_to_new`.
|
| 852 |
+
# First fix param names in the state.
|
| 853 |
+
for og_key, new_keys in og_keys_to_new.items():
|
| 854 |
+
og_state = optim_state_dict["state"].pop(og_key, None)
|
| 855 |
+
if og_state is None:
|
| 856 |
+
continue
|
| 857 |
+
for i, new_key in enumerate(new_keys):
|
| 858 |
+
if i == len(new_keys) - 1:
|
| 859 |
+
optim_state_dict["state"][new_key] = og_state
|
| 860 |
+
else:
|
| 861 |
+
optim_state_dict["state"][new_key] = deepcopy(og_state)
|
| 862 |
+
# Now fix param names in the param groups.
|
| 863 |
+
for group in optim_state_dict["param_groups"]:
|
| 864 |
+
og_names = group["params"]
|
| 865 |
+
new_names = []
|
| 866 |
+
for og_key in og_names:
|
| 867 |
+
for new_key in og_keys_to_new[og_key]:
|
| 868 |
+
new_names.append(new_key)
|
| 869 |
+
group["params"] = new_names
|
| 870 |
+
group["param_names"] = new_names
|
| 871 |
+
|
| 872 |
+
return optim_state_dict
|
| 873 |
+
|
| 874 |
+
def load_checkpoint(
|
| 875 |
+
self,
|
| 876 |
+
load_path: PathOrStr,
|
| 877 |
+
*,
|
| 878 |
+
local_cache: Optional[PathOrStr] = None,
|
| 879 |
+
load_optimizer_state: bool = True,
|
| 880 |
+
device: Optional[torch.device] = None,
|
| 881 |
+
) -> Tuple[Dict[str, torch.Tensor], Optional[Dict[str, Any]]]:
|
| 882 |
+
device = device if device is not None else torch.device("cpu")
|
| 883 |
+
model_state = load_state_dict(load_path, "model.pt", local_cache=local_cache, map_location=device) # type: ignore
|
| 884 |
+
optim_state = None
|
| 885 |
+
if load_optimizer_state:
|
| 886 |
+
optim_state = load_state_dict(load_path, "optim.pt", local_cache=local_cache, map_location=device) # type: ignore
|
| 887 |
+
return model_state, optim_state
|
| 888 |
+
|
| 889 |
+
|
| 890 |
+
class TorchNewStyleShardedCheckpointer(Checkpointer):
|
| 891 |
+
"""
|
| 892 |
+
A sharded :class:`Checkpointer` that uses PyTorch's new distributed checkpointing functionality.
|
| 893 |
+
"""
|
| 894 |
+
|
| 895 |
+
def save_checkpoint(
|
| 896 |
+
self,
|
| 897 |
+
dir: PathOrStr,
|
| 898 |
+
dist_model: nn.Module,
|
| 899 |
+
optim: Optimizer,
|
| 900 |
+
trainer_state: Dict[str, Any],
|
| 901 |
+
*,
|
| 902 |
+
upload_to: Optional[str] = None,
|
| 903 |
+
) -> None:
|
| 904 |
+
assert isinstance(
|
| 905 |
+
dist_model, FSDP
|
| 906 |
+
), f"{self.__class__.__name__} is being called to save a model where `distributed_strategy` is not FSDP."
|
| 907 |
+
with self._temporary_wd(dir) as checkpoint_dir:
|
| 908 |
+
# Save model and optim state.
|
| 909 |
+
save_fsdp_model_and_optim_state(
|
| 910 |
+
checkpoint_dir,
|
| 911 |
+
dist_model,
|
| 912 |
+
optim,
|
| 913 |
+
upload_to=upload_to,
|
| 914 |
+
save_overwrite=self.cfg.save_overwrite,
|
| 915 |
+
)
|
| 916 |
+
|
| 917 |
+
# Save trainer state.
|
| 918 |
+
log.info("Saving trainer state...")
|
| 919 |
+
save_state_dict(
|
| 920 |
+
checkpoint_dir,
|
| 921 |
+
f"train/rank{get_global_rank()}.pt",
|
| 922 |
+
trainer_state,
|
| 923 |
+
upload_to=upload_to,
|
| 924 |
+
save_overwrite=self.cfg.save_overwrite,
|
| 925 |
+
)
|
| 926 |
+
|
| 927 |
+
# Save config.
|
| 928 |
+
self._save_config(checkpoint_dir, upload_to=upload_to)
|
| 929 |
+
|
| 930 |
+
def restore_checkpoint(
|
| 931 |
+
self,
|
| 932 |
+
load_path: PathOrStr,
|
| 933 |
+
dist_model: nn.Module,
|
| 934 |
+
optim: Optimizer,
|
| 935 |
+
*,
|
| 936 |
+
local_cache: Optional[PathOrStr] = None,
|
| 937 |
+
load_optimizer_state: bool = True,
|
| 938 |
+
) -> Dict[str, Any]:
|
| 939 |
+
# Load model and optimizer state in place.
|
| 940 |
+
log.info("Loading model and optimizer state...")
|
| 941 |
+
assert isinstance(
|
| 942 |
+
dist_model, FSDP
|
| 943 |
+
), f"{self.__class__.__name__} is being called to load a model where `distributed_strategy` is not FSDP."
|
| 944 |
+
|
| 945 |
+
load_fsdp_model_and_optim_state(
|
| 946 |
+
load_path,
|
| 947 |
+
dist_model,
|
| 948 |
+
optim,
|
| 949 |
+
local_cache=local_cache,
|
| 950 |
+
load_optimizer_state=load_optimizer_state,
|
| 951 |
+
)
|
| 952 |
+
|
| 953 |
+
# Load trainer state dict.
|
| 954 |
+
log.info("Loading trainer state...")
|
| 955 |
+
try:
|
| 956 |
+
trainer_state = load_state_dict(
|
| 957 |
+
load_path, f"train/rank{get_global_rank()}.pt", local_cache=local_cache
|
| 958 |
+
)
|
| 959 |
+
except FileNotFoundError:
|
| 960 |
+
# Fall back to rank 0 train state.
|
| 961 |
+
# This can happen when we're restoring a checkpoint with a different world size.
|
| 962 |
+
trainer_state = load_state_dict(load_path, "train/rank0.pt", local_cache=local_cache)
|
| 963 |
+
barrier()
|
| 964 |
+
return trainer_state
|
| 965 |
+
|
| 966 |
+
|
| 967 |
+
class TorchLegacyShardedCheckpointer(Checkpointer):
|
| 968 |
+
"""
|
| 969 |
+
A sharded :class:`Checkpointer` that just uses `torch.save()` with extra logic for handling FSDP model
|
| 970 |
+
and optim state.
|
| 971 |
+
|
| 972 |
+
The world size must be kept consistent when using this checkpointer.
|
| 973 |
+
"""
|
| 974 |
+
|
| 975 |
+
def __init__(self, cfg: TrainConfig, thread_count: Optional[int] = None, use_shared_mem_impl: bool = False):
|
| 976 |
+
super().__init__(cfg, thread_count)
|
| 977 |
+
self.use_shared_mem_impl = use_shared_mem_impl
|
| 978 |
+
|
| 979 |
+
def save_checkpoint(
|
| 980 |
+
self,
|
| 981 |
+
dir: PathOrStr,
|
| 982 |
+
dist_model: nn.Module,
|
| 983 |
+
optim: Optimizer,
|
| 984 |
+
trainer_state: Dict[str, Any],
|
| 985 |
+
*,
|
| 986 |
+
upload_to: Optional[str] = None,
|
| 987 |
+
) -> None:
|
| 988 |
+
assert isinstance(
|
| 989 |
+
dist_model, FSDP
|
| 990 |
+
), f"{self.__class__.__name__} is being called to save a model where `distributed_strategy` is not FSDP."
|
| 991 |
+
with self._temporary_wd(dir) as checkpoint_dir:
|
| 992 |
+
with FSDP.state_dict_type(
|
| 993 |
+
dist_model,
|
| 994 |
+
state_dict_type=StateDictType.SHARDED_STATE_DICT,
|
| 995 |
+
state_dict_config=ShardedStateDictConfig(offload_to_cpu=True),
|
| 996 |
+
optim_state_dict_config=ShardedOptimStateDictConfig(offload_to_cpu=True),
|
| 997 |
+
):
|
| 998 |
+
state_dict = {
|
| 999 |
+
"model": dist_model.state_dict(),
|
| 1000 |
+
"optim": FSDP.optim_state_dict(dist_model, optim),
|
| 1001 |
+
**trainer_state,
|
| 1002 |
+
}
|
| 1003 |
+
save_state_dict(
|
| 1004 |
+
checkpoint_dir,
|
| 1005 |
+
f"rank{get_global_rank()}.pt",
|
| 1006 |
+
state_dict,
|
| 1007 |
+
upload_to=upload_to,
|
| 1008 |
+
save_overwrite=self.cfg.save_overwrite,
|
| 1009 |
+
)
|
| 1010 |
+
|
| 1011 |
+
# Save config.
|
| 1012 |
+
self._save_config(checkpoint_dir, upload_to=upload_to)
|
| 1013 |
+
|
| 1014 |
+
def restore_checkpoint(
|
| 1015 |
+
self,
|
| 1016 |
+
load_path: PathOrStr,
|
| 1017 |
+
dist_model: nn.Module,
|
| 1018 |
+
optim: Optimizer,
|
| 1019 |
+
*,
|
| 1020 |
+
local_cache: Optional[PathOrStr] = None,
|
| 1021 |
+
load_optimizer_state: bool = True,
|
| 1022 |
+
) -> Dict[str, Any]:
|
| 1023 |
+
assert isinstance(
|
| 1024 |
+
dist_model, FSDP
|
| 1025 |
+
), f"{self.__class__.__name__} is being called to load a model where `distributed_strategy` is not FSDP."
|
| 1026 |
+
with FSDP.state_dict_type(
|
| 1027 |
+
dist_model,
|
| 1028 |
+
state_dict_type=StateDictType.SHARDED_STATE_DICT,
|
| 1029 |
+
state_dict_config=ShardedStateDictConfig(offload_to_cpu=True),
|
| 1030 |
+
optim_state_dict_config=ShardedOptimStateDictConfig(offload_to_cpu=True),
|
| 1031 |
+
):
|
| 1032 |
+
# Deserialize state dict.
|
| 1033 |
+
state_dict = load_state_dict(
|
| 1034 |
+
load_path, f"rank{get_global_rank()}.pt", local_cache=local_cache, map_location="cpu"
|
| 1035 |
+
)
|
| 1036 |
+
|
| 1037 |
+
# Load model and optimizer state.
|
| 1038 |
+
log.info("Loading model state...")
|
| 1039 |
+
dist_model.load_state_dict(state_dict["model"])
|
| 1040 |
+
del state_dict["model"]
|
| 1041 |
+
if load_optimizer_state:
|
| 1042 |
+
log.info("Loading optimizer state...")
|
| 1043 |
+
load_fsdp_optim_state(dist_model, optim, state_dict["optim"])
|
| 1044 |
+
del state_dict["optim"]
|
| 1045 |
+
|
| 1046 |
+
barrier()
|
| 1047 |
+
return state_dict
|
| 1048 |
+
|
| 1049 |
+
def unshard_checkpoint(
|
| 1050 |
+
self,
|
| 1051 |
+
load_path: PathOrStr,
|
| 1052 |
+
*,
|
| 1053 |
+
local_cache: Optional[PathOrStr] = None,
|
| 1054 |
+
load_optimizer_state: bool = True,
|
| 1055 |
+
load_trainer_state: bool = True,
|
| 1056 |
+
device: Optional[torch.device] = None,
|
| 1057 |
+
) -> Tuple[Dict[str, torch.Tensor], Optional[Dict[str, Any]], Optional[Dict[str, Any]]]:
|
| 1058 |
+
assert local_cache is None, "this method currently only supports local files"
|
| 1059 |
+
full_state_dict = self._unshard(load_path, device or torch.device("cpu"), skip_keys={"rng"})
|
| 1060 |
+
model_state = full_state_dict.pop("model")
|
| 1061 |
+
optim_state = full_state_dict.pop("optim")
|
| 1062 |
+
return (
|
| 1063 |
+
model_state,
|
| 1064 |
+
optim_state if load_optimizer_state else None,
|
| 1065 |
+
full_state_dict if load_trainer_state else None,
|
| 1066 |
+
)
|
| 1067 |
+
|
| 1068 |
+
def _copy_sharded_tensors_to_shared_mem(self, state: Dict, world_size: int, rank: int, key: Tuple):
|
| 1069 |
+
key = tuple() if key is None else key
|
| 1070 |
+
if isinstance(state, (list, tuple, set)):
|
| 1071 |
+
for i, sub_state in enumerate(state):
|
| 1072 |
+
self._copy_sharded_tensors_to_shared_mem(sub_state, world_size, rank, key + (i,))
|
| 1073 |
+
elif isinstance(state, dict):
|
| 1074 |
+
for name in state.keys():
|
| 1075 |
+
self._copy_sharded_tensors_to_shared_mem(state[name], world_size, rank, key + (name,))
|
| 1076 |
+
elif isinstance(state, ShardedTensor):
|
| 1077 |
+
self._copy_sharded_tensor_to_shared_mem(state, world_size, rank, key)
|
| 1078 |
+
return
|
| 1079 |
+
else:
|
| 1080 |
+
return
|
| 1081 |
+
|
| 1082 |
+
def _get_shard_placement_and_rank_sizes(
|
| 1083 |
+
self, shards_metadata: List[ShardMetadata], world_size: int
|
| 1084 |
+
) -> Tuple[Dict[ShardMetadata, Tuple[int, int]], List[int]]:
|
| 1085 |
+
def shard_size(shard_md):
|
| 1086 |
+
return reduce((lambda x, y: x * y), shard_md.shard_sizes) # type: ignore[attr-defined]
|
| 1087 |
+
|
| 1088 |
+
rank_sizes = [0 for _ in range(world_size)]
|
| 1089 |
+
shard_placement: Dict[ShardMetadata, Tuple[int, int]] = {}
|
| 1090 |
+
for shard_md in shards_metadata:
|
| 1091 |
+
shard_rank = cast(_remote_device, shard_md.placement).rank()
|
| 1092 |
+
assert shard_rank is not None
|
| 1093 |
+
if shard_rank >= world_size:
|
| 1094 |
+
raise RuntimeError(f"Shard rank {shard_rank} exceeds world size {world_size}")
|
| 1095 |
+
|
| 1096 |
+
shard_placement[shard_md] = (shard_rank, rank_sizes[shard_rank])
|
| 1097 |
+
rank_sizes[shard_rank] += shard_size(shard_md)
|
| 1098 |
+
|
| 1099 |
+
return shard_placement, rank_sizes
|
| 1100 |
+
|
| 1101 |
+
def _copy_sharded_tensor_to_shared_mem(
|
| 1102 |
+
self, sharded_tensor: ShardedTensor, world_size: int, rank: int, key: Tuple
|
| 1103 |
+
) -> Any:
|
| 1104 |
+
shard0_md = sharded_tensor.metadata()
|
| 1105 |
+
shard_placement, rank_sizes = self._get_shard_placement_and_rank_sizes(
|
| 1106 |
+
shard0_md.shards_metadata, world_size
|
| 1107 |
+
)
|
| 1108 |
+
|
| 1109 |
+
rank_size = rank_sizes[rank]
|
| 1110 |
+
assert rank_size >= 0
|
| 1111 |
+
if rank_size == 0:
|
| 1112 |
+
return
|
| 1113 |
+
|
| 1114 |
+
assert shard0_md.tensor_properties.dtype == torch.float32, "Expected sharded tensor to be fp32"
|
| 1115 |
+
numpy_type = np.float32
|
| 1116 |
+
|
| 1117 |
+
sharded_memory_name = "-".join(key + (str(rank),))
|
| 1118 |
+
|
| 1119 |
+
shm = shared_memory.SharedMemory(
|
| 1120 |
+
create=True, size=rank_size * np.dtype(numpy_type).itemsize, name=sharded_memory_name
|
| 1121 |
+
)
|
| 1122 |
+
np_arr = np.ndarray((rank_size,), dtype=numpy_type, buffer=shm.buf)
|
| 1123 |
+
|
| 1124 |
+
for local_shard in sharded_tensor.local_shards():
|
| 1125 |
+
shard_rank = cast(_remote_device, local_shard.metadata.placement).rank()
|
| 1126 |
+
assert shard_rank == rank
|
| 1127 |
+
|
| 1128 |
+
src = local_shard.tensor.flatten()
|
| 1129 |
+
shard_offset = shard_placement[local_shard.metadata][1]
|
| 1130 |
+
|
| 1131 |
+
np_arr[shard_offset : shard_offset + src.numel()] = src.numpy()
|
| 1132 |
+
|
| 1133 |
+
shm.close()
|
| 1134 |
+
|
| 1135 |
+
def _copy_sharded_data_to_shared_mem(self, world_size: int, shard_filepath: Path):
|
| 1136 |
+
shard_number = int(shard_filepath.name[4:-3])
|
| 1137 |
+
log.info("Starting unsharding shard number %d to shared memory", shard_number)
|
| 1138 |
+
|
| 1139 |
+
with self._patch_sharded_tensor_load():
|
| 1140 |
+
shard = torch.load(shard_filepath, map_location="cpu")
|
| 1141 |
+
log.debug("Done loading shard number %d", shard_number)
|
| 1142 |
+
|
| 1143 |
+
self._copy_sharded_tensors_to_shared_mem(
|
| 1144 |
+
shard, world_size, shard_number, (str(shard_filepath.parent).replace("/", "_"),)
|
| 1145 |
+
)
|
| 1146 |
+
log.info("Done unsharding shard number %d to shared memory", shard_number)
|
| 1147 |
+
|
| 1148 |
+
def _unshard_using_sharded_mem(
|
| 1149 |
+
self, state: Any, world_size: int, device: torch.device, shard_dir: PathOrStr
|
| 1150 |
+
) -> Any:
|
| 1151 |
+
return self._unshard_state_using_shared_mem(state, world_size, device, (str(shard_dir).replace("/", "_"),))
|
| 1152 |
+
|
| 1153 |
+
def _unshard_state_using_shared_mem(
|
| 1154 |
+
self, state: Any, world_size: int, device: torch.device, key: Tuple
|
| 1155 |
+
) -> Any:
|
| 1156 |
+
if isinstance(state, (list, tuple, set)):
|
| 1157 |
+
return state.__class__(
|
| 1158 |
+
self._unshard_state_using_shared_mem(sub_state, world_size, device, key + (i,))
|
| 1159 |
+
for i, sub_state in enumerate(state)
|
| 1160 |
+
)
|
| 1161 |
+
elif isinstance(state, dict):
|
| 1162 |
+
return {
|
| 1163 |
+
name: self._unshard_state_using_shared_mem(state[name], world_size, device, key + (name,))
|
| 1164 |
+
for name in state.keys()
|
| 1165 |
+
}
|
| 1166 |
+
elif isinstance(state, ShardedTensor):
|
| 1167 |
+
return self._unshard_tensor_using_shared_mem(state, world_size, device, key)
|
| 1168 |
+
elif isinstance(state, torch.Tensor):
|
| 1169 |
+
return state.to(device=device)
|
| 1170 |
+
else:
|
| 1171 |
+
return state
|
| 1172 |
+
|
| 1173 |
+
def _unshard_tensor_using_shared_mem(
|
| 1174 |
+
self, sharded_tensor: ShardedTensor, world_size: int, device: torch.device, key: Tuple
|
| 1175 |
+
) -> torch.Tensor:
|
| 1176 |
+
shard0_md = sharded_tensor.metadata()
|
| 1177 |
+
|
| 1178 |
+
def shard_size(shard_md):
|
| 1179 |
+
return reduce((lambda x, y: x * y), shard_md.shard_sizes) # type: ignore[attr-defined]
|
| 1180 |
+
|
| 1181 |
+
shard_placement, rank_sizes = self._get_shard_placement_and_rank_sizes(
|
| 1182 |
+
shard0_md.shards_metadata, world_size
|
| 1183 |
+
)
|
| 1184 |
+
|
| 1185 |
+
assert shard0_md.tensor_properties.dtype == torch.float32, "Expected sharded tensor to be fp32"
|
| 1186 |
+
numpy_type = np.float32
|
| 1187 |
+
|
| 1188 |
+
out = torch.empty(
|
| 1189 |
+
*sharded_tensor.metadata().size, dtype=sharded_tensor.metadata().tensor_properties.dtype, device=device
|
| 1190 |
+
)
|
| 1191 |
+
dims = len(sharded_tensor.metadata().size)
|
| 1192 |
+
for shard_md, (rank, rank_offset) in shard_placement.items():
|
| 1193 |
+
if rank >= world_size:
|
| 1194 |
+
raise RuntimeError(f"Shard rank {rank} exceeds world size {world_size}")
|
| 1195 |
+
|
| 1196 |
+
sharded_memory_name = "-".join(key + (str(rank),))
|
| 1197 |
+
shm = shared_memory.SharedMemory(name=sharded_memory_name)
|
| 1198 |
+
|
| 1199 |
+
rank_size = rank_sizes[rank]
|
| 1200 |
+
assert rank_size >= 0
|
| 1201 |
+
if rank_size == 0:
|
| 1202 |
+
continue
|
| 1203 |
+
|
| 1204 |
+
np_arr = np.ndarray((rank_size,), dtype=numpy_type, buffer=shm.buf)
|
| 1205 |
+
|
| 1206 |
+
tensor = torch.from_numpy(np_arr)[rank_offset : rank_offset + shard_size(shard_md)]
|
| 1207 |
+
tensor = tensor.view(shard_md.shard_sizes)
|
| 1208 |
+
|
| 1209 |
+
out_narrow_view = out
|
| 1210 |
+
for dim in range(dims):
|
| 1211 |
+
out_narrow_view = out_narrow_view.narrow(
|
| 1212 |
+
dim,
|
| 1213 |
+
shard_md.shard_offsets[dim],
|
| 1214 |
+
shard_md.shard_sizes[dim],
|
| 1215 |
+
)
|
| 1216 |
+
|
| 1217 |
+
out_narrow_view.copy_(tensor)
|
| 1218 |
+
|
| 1219 |
+
shm.close()
|
| 1220 |
+
shm.unlink()
|
| 1221 |
+
|
| 1222 |
+
return out
|
| 1223 |
+
|
| 1224 |
+
@contextmanager
|
| 1225 |
+
def _patch_sharded_tensor_load(self):
|
| 1226 |
+
"""
|
| 1227 |
+
Monkeypatch for torch's ShardedTensor, so we can unpickle without having torch.distributed set up.
|
| 1228 |
+
"""
|
| 1229 |
+
|
| 1230 |
+
def _rebuild_from_type_v2_monkey(func, new_type, args, state):
|
| 1231 |
+
ret = func(*args)
|
| 1232 |
+
if type(ret) is not new_type:
|
| 1233 |
+
ret = ret.as_subclass(new_type)
|
| 1234 |
+
|
| 1235 |
+
# Shortcut the construction of ShardedTensor
|
| 1236 |
+
# This is in the top 5 of my worst hacks.
|
| 1237 |
+
if isinstance(ret, ShardedTensor):
|
| 1238 |
+
ret._local_shards, ret._metadata, _, ret._sharding_spec, ret._init_rrefs = state
|
| 1239 |
+
return ret
|
| 1240 |
+
|
| 1241 |
+
# The rest of this function ought to be in the top 5 of somebody else's worst hacks.
|
| 1242 |
+
# Tensor does define __setstate__ even though it doesn't define
|
| 1243 |
+
# __getstate__. So only use __setstate__ if it is NOT the one defined
|
| 1244 |
+
# on Tensor
|
| 1245 |
+
if getattr(ret.__class__, "__setstate__", torch.Tensor.__setstate__) is not torch.Tensor.__setstate__:
|
| 1246 |
+
ret.__setstate__(state)
|
| 1247 |
+
else:
|
| 1248 |
+
ret = torch._utils._set_obj_state(ret, state)
|
| 1249 |
+
return ret
|
| 1250 |
+
|
| 1251 |
+
original_rebuild_from_type_v2 = torch._tensor._rebuild_from_type_v2
|
| 1252 |
+
try:
|
| 1253 |
+
torch._tensor._rebuild_from_type_v2 = _rebuild_from_type_v2_monkey
|
| 1254 |
+
yield
|
| 1255 |
+
finally:
|
| 1256 |
+
torch._tensor._rebuild_from_type_v2 = original_rebuild_from_type_v2
|
| 1257 |
+
|
| 1258 |
+
def _unshard_using_shared_memory(
|
| 1259 |
+
self, input_dir: PathOrStr, device: torch.device, skip_keys: Optional[Set[str]] = None
|
| 1260 |
+
):
|
| 1261 |
+
"""
|
| 1262 |
+
This unsharding implementation consists of:
|
| 1263 |
+
|
| 1264 |
+
1. Loading each shard on a separate process and copying their sharded tensors to shared memory.
|
| 1265 |
+
2. Loading 1 shard on the main process as a base unsharded object.
|
| 1266 |
+
3. Using the sharded tensors in shared memory to populate the base unsharded object.
|
| 1267 |
+
|
| 1268 |
+
This implementation is an alternative to a prior implementation that instead loaded
|
| 1269 |
+
all shards using threads, because that implementation turned out to
|
| 1270 |
+
be extremely slow (e.g. 6+ hours) sometimes when the world size was 1024.
|
| 1271 |
+
The current implementation is slower than the old one in many scenarios,
|
| 1272 |
+
but is significantly faster in the above mentioned case (e.g. 30 minutes)
|
| 1273 |
+
if there are enough CPUs.
|
| 1274 |
+
|
| 1275 |
+
We keep the other implementation since this once can be more unreliable,
|
| 1276 |
+
likely due to its dependence on a large amount of shared memory.
|
| 1277 |
+
"""
|
| 1278 |
+
|
| 1279 |
+
input_dir = Path(input_dir)
|
| 1280 |
+
skip_keys = skip_keys or set()
|
| 1281 |
+
|
| 1282 |
+
shard_filepaths = list(input_dir.glob("rank*.pt"))
|
| 1283 |
+
world_size = len(shard_filepaths)
|
| 1284 |
+
if world_size == 0:
|
| 1285 |
+
raise RuntimeError("No shards found for unsharding")
|
| 1286 |
+
|
| 1287 |
+
log.info("Number of shards: %d", world_size)
|
| 1288 |
+
shard_size_gb = shard_filepaths[0].stat().st_size / (1024 * 1024 * 1024)
|
| 1289 |
+
min_ram_required_estimate_gb = shard_size_gb * world_size
|
| 1290 |
+
log.info(
|
| 1291 |
+
"Shards are %.2fGB each, at least %.2fGB RAM is required", shard_size_gb, min_ram_required_estimate_gb
|
| 1292 |
+
)
|
| 1293 |
+
|
| 1294 |
+
log.info("Copying sharded tensors to shared memory using multiple processes")
|
| 1295 |
+
# Copy sharded data to shared memory using multiple processes, so this process can load
|
| 1296 |
+
# from memory rather than disk. We spawn a new process instead of forking since shared memory
|
| 1297 |
+
# appears to get deleted when forked processes end for some reason.
|
| 1298 |
+
executor = ProcessPoolExecutor(
|
| 1299 |
+
mp_context=mp.get_context("spawn"), initializer=util.prepare_cli_environment
|
| 1300 |
+
)
|
| 1301 |
+
futures = []
|
| 1302 |
+
for shard_filepath in shard_filepaths:
|
| 1303 |
+
shard_rank = int(shard_filepath.name[4:-3])
|
| 1304 |
+
|
| 1305 |
+
if shard_rank >= world_size:
|
| 1306 |
+
raise RuntimeError(
|
| 1307 |
+
f"Shard rank {shard_rank} of file {shard_filepath} exceeds world size {world_size}"
|
| 1308 |
+
)
|
| 1309 |
+
|
| 1310 |
+
futures.append(executor.submit(self._copy_sharded_data_to_shared_mem, world_size, shard_filepath))
|
| 1311 |
+
|
| 1312 |
+
for f in as_completed(futures):
|
| 1313 |
+
f.result()
|
| 1314 |
+
executor.shutdown()
|
| 1315 |
+
|
| 1316 |
+
log.info("Loading a shard on the main process to be unsharded state")
|
| 1317 |
+
with self._patch_sharded_tensor_load():
|
| 1318 |
+
state = torch.load(shard_filepaths[0], map_location="cpu")
|
| 1319 |
+
|
| 1320 |
+
for key in skip_keys:
|
| 1321 |
+
if key in state:
|
| 1322 |
+
del state[key]
|
| 1323 |
+
|
| 1324 |
+
log.info("Unsharding from %d shards ...", world_size)
|
| 1325 |
+
return self._unshard_using_sharded_mem(state, world_size, device, input_dir)
|
| 1326 |
+
|
| 1327 |
+
def _unshard(self, input_dir: PathOrStr, device: torch.device, skip_keys: Optional[Set[str]] = None):
|
| 1328 |
+
if self.use_shared_mem_impl:
|
| 1329 |
+
return self._unshard_using_shared_memory(input_dir, device, skip_keys)
|
| 1330 |
+
|
| 1331 |
+
input_dir = Path(input_dir)
|
| 1332 |
+
skip_keys = skip_keys or set()
|
| 1333 |
+
|
| 1334 |
+
with self._patch_sharded_tensor_load():
|
| 1335 |
+
# We load in threads because it's faster.
|
| 1336 |
+
executor = ThreadPoolExecutor()
|
| 1337 |
+
shards_dict = {}
|
| 1338 |
+
for shard_name in input_dir.glob("rank*.pt"):
|
| 1339 |
+
log.info("Loading %s ...", shard_name)
|
| 1340 |
+
shard_number = int(shard_name.name[4:-3]) # shard names look like "rankXX.pt"
|
| 1341 |
+
shards_dict[shard_number] = executor.submit(torch.load, shard_name, map_location="cpu")
|
| 1342 |
+
shards = [None] * len(shards_dict)
|
| 1343 |
+
for rank, shard_future in shards_dict.items():
|
| 1344 |
+
shard = shard_future.result()
|
| 1345 |
+
for key in skip_keys:
|
| 1346 |
+
if key in shard:
|
| 1347 |
+
del shard[key]
|
| 1348 |
+
shards[rank] = shard
|
| 1349 |
+
assert all(shard is not None for shard in shards)
|
| 1350 |
+
executor.shutdown()
|
| 1351 |
+
del shards_dict
|
| 1352 |
+
|
| 1353 |
+
log.info("Unsharding from %d shards ...", len(shards))
|
| 1354 |
+
|
| 1355 |
+
unsharded_state_dict = self._unshard_object(shards, device=device)
|
| 1356 |
+
# At this point in time we need 2x memory :-(
|
| 1357 |
+
del shards
|
| 1358 |
+
|
| 1359 |
+
return unsharded_state_dict
|
| 1360 |
+
|
| 1361 |
+
def _unshard_object(self, os: List[Any], device: torch.device) -> Any:
|
| 1362 |
+
rank0_item = os[0]
|
| 1363 |
+
assert all(type(o) is type(rank0_item) for o in os)
|
| 1364 |
+
if isinstance(rank0_item, str):
|
| 1365 |
+
assert all(o == rank0_item for o in os)
|
| 1366 |
+
return rank0_item
|
| 1367 |
+
elif isinstance(rank0_item, (list, tuple, set)):
|
| 1368 |
+
assert all(len(o) == len(rank0_item) for o in os)
|
| 1369 |
+
return rank0_item.__class__(self._unshard_object(o, device=device) for o in zip(*os))
|
| 1370 |
+
elif isinstance(rank0_item, dict):
|
| 1371 |
+
assert all(o.keys() == rank0_item.keys() for o in os)
|
| 1372 |
+
return {key: self._unshard_object([o[key] for o in os], device=device) for key in rank0_item.keys()}
|
| 1373 |
+
elif isinstance(rank0_item, ShardedTensor):
|
| 1374 |
+
return self._gather(os, device=device)
|
| 1375 |
+
else:
|
| 1376 |
+
assert all(self._objects_are_equal(o, rank0_item) for o in os)
|
| 1377 |
+
return rank0_item
|
| 1378 |
+
|
| 1379 |
+
def _gather(self, shards: List[ShardedTensor], device: torch.device) -> torch.Tensor:
|
| 1380 |
+
world_size = len(shards)
|
| 1381 |
+
shard0_md = shards[0].metadata()
|
| 1382 |
+
# Make sure all shards agree on the metadata
|
| 1383 |
+
assert all(shard.metadata() == shard0_md for shard in shards)
|
| 1384 |
+
# Make sure the nth shard expects to be the nth shard.
|
| 1385 |
+
assert all(
|
| 1386 |
+
shard_md.placement.rank() == rank # type: ignore
|
| 1387 |
+
for rank, shard_md in enumerate(shard0_md.shards_metadata)
|
| 1388 |
+
)
|
| 1389 |
+
|
| 1390 |
+
def shard_size(shard_md):
|
| 1391 |
+
return reduce((lambda x, y: x * y), shard_md.shard_sizes) # type: ignore[attr-defined]
|
| 1392 |
+
|
| 1393 |
+
rank_sizes = [0 for _ in range(world_size)]
|
| 1394 |
+
max_rank_size = 0
|
| 1395 |
+
shard_placement: Dict[ShardMetadata, Tuple[int, int]] = {}
|
| 1396 |
+
for shard_md in shard0_md.shards_metadata:
|
| 1397 |
+
shard_rank = cast(_remote_device, shard_md.placement).rank()
|
| 1398 |
+
assert shard_rank is not None
|
| 1399 |
+
|
| 1400 |
+
shard_placement[shard_md] = (shard_rank, rank_sizes[shard_rank])
|
| 1401 |
+
rank_sizes[shard_rank] += shard_size(shard_md)
|
| 1402 |
+
max_rank_size = max(max_rank_size, rank_sizes[shard_rank])
|
| 1403 |
+
|
| 1404 |
+
gather_list: List[torch.Tensor] = [torch.empty((max_rank_size,)) for _ in range(world_size)]
|
| 1405 |
+
|
| 1406 |
+
datas = []
|
| 1407 |
+
with torch.no_grad():
|
| 1408 |
+
for shard in shards:
|
| 1409 |
+
data = torch.empty(max_rank_size)
|
| 1410 |
+
|
| 1411 |
+
for local_shard in shard.local_shards():
|
| 1412 |
+
src = local_shard.tensor.flatten()
|
| 1413 |
+
shard_offset = shard_placement[local_shard.metadata][1]
|
| 1414 |
+
data[shard_offset : shard_offset + src.numel()].copy_(src)
|
| 1415 |
+
|
| 1416 |
+
datas.append(data)
|
| 1417 |
+
|
| 1418 |
+
# torch.gather in a nutshell
|
| 1419 |
+
for rank, data in enumerate(datas):
|
| 1420 |
+
gather_list[rank].copy_(data)
|
| 1421 |
+
|
| 1422 |
+
full_size = shard0_md.size
|
| 1423 |
+
out = torch.empty(*full_size, dtype=shard0_md.tensor_properties.dtype, device=device)
|
| 1424 |
+
dims = len(full_size)
|
| 1425 |
+
for shard_md in shard0_md.shards_metadata:
|
| 1426 |
+
rank, rank_offset = shard_placement[shard_md]
|
| 1427 |
+
tensor = gather_list[rank]
|
| 1428 |
+
tensor = tensor[rank_offset : rank_offset + shard_size(shard_md)]
|
| 1429 |
+
tensor = tensor.view(shard_md.shard_sizes)
|
| 1430 |
+
|
| 1431 |
+
out_narrow_view = out
|
| 1432 |
+
for dim in range(dims):
|
| 1433 |
+
out_narrow_view = out_narrow_view.narrow(
|
| 1434 |
+
dim,
|
| 1435 |
+
shard_md.shard_offsets[dim],
|
| 1436 |
+
shard_md.shard_sizes[dim],
|
| 1437 |
+
)
|
| 1438 |
+
|
| 1439 |
+
out_narrow_view.copy_(tensor)
|
| 1440 |
+
|
| 1441 |
+
return out
|
| 1442 |
+
|
| 1443 |
+
def _objects_are_equal(self, a: Any, b: Any) -> bool:
|
| 1444 |
+
if type(a) is not type(b):
|
| 1445 |
+
return False
|
| 1446 |
+
if isinstance(a, np.ndarray):
|
| 1447 |
+
return np.array_equal(a, b)
|
| 1448 |
+
elif isinstance(a, torch.Tensor):
|
| 1449 |
+
return torch.equal(a, b)
|
| 1450 |
+
else:
|
| 1451 |
+
return a == b
|
| 1452 |
+
|
| 1453 |
+
|
| 1454 |
+
@dataclass
|
| 1455 |
+
class _LocalShardedCheckpointerMetadata(BaseConfig):
|
| 1456 |
+
world_size: int = field(default_factory=get_world_size)
|
| 1457 |
+
|
| 1458 |
+
|
| 1459 |
+
@dataclass
|
| 1460 |
+
class _FlatParamShard:
|
| 1461 |
+
full_shape: torch.Size
|
| 1462 |
+
shard_offsets: Tuple[int, int]
|
| 1463 |
+
shard_data: Optional[torch.Tensor]
|
| 1464 |
+
|
| 1465 |
+
def copy_into(self, full_tensor: torch.Tensor) -> None:
|
| 1466 |
+
assert self.shard_data is not None
|
| 1467 |
+
full_tensor_shard_view = full_tensor.view(-1)[self.shard_offsets[0] : self.shard_offsets[1] + 1]
|
| 1468 |
+
assert self.shard_data.shape == full_tensor_shard_view.shape
|
| 1469 |
+
full_tensor_shard_view.copy_(self.shard_data)
|
| 1470 |
+
|
| 1471 |
+
|
| 1472 |
+
class LocalShardedCheckpointer(Checkpointer):
|
| 1473 |
+
"""
|
| 1474 |
+
A sharded :class:`Checkpointer` that directly saves the local FSDP flat params data.
|
| 1475 |
+
The optimizer state is saved directly with `torch.save()` without reformatting via FSDP methods.
|
| 1476 |
+
|
| 1477 |
+
The world size must be kept consistent when using this checkpointer. However, you can easily
|
| 1478 |
+
reconstruct a full unsharded model and/or optimizer state dictionary from a single Python process
|
| 1479 |
+
using :meth:`unshard_checkpoint()` (no distributed initialization required).
|
| 1480 |
+
"""
|
| 1481 |
+
|
| 1482 |
+
# These correspond to metadata attributes on `torch.distributed.fsdp.flat_param.FlatParameter`.
|
| 1483 |
+
_FLAT_PARAM_METADATA_TO_SAVE = (
|
| 1484 |
+
"_fqns",
|
| 1485 |
+
"_shard_param_offsets",
|
| 1486 |
+
"_shard_indices",
|
| 1487 |
+
"_numels",
|
| 1488 |
+
"_numels_with_padding",
|
| 1489 |
+
"_shapes",
|
| 1490 |
+
"_shard_numel_padded",
|
| 1491 |
+
"_shard_param_infos",
|
| 1492 |
+
)
|
| 1493 |
+
|
| 1494 |
+
def _fsdp_modules(self, fsdp_model: FSDP) -> List[Tuple[str, FSDP]]:
|
| 1495 |
+
"""
|
| 1496 |
+
Returns a list of FSDP modules with their FQN.
|
| 1497 |
+
"""
|
| 1498 |
+
modules = []
|
| 1499 |
+
for name, module in fsdp_model.named_modules():
|
| 1500 |
+
if isinstance(module, FSDP):
|
| 1501 |
+
modules.append((name, module))
|
| 1502 |
+
return modules
|
| 1503 |
+
|
| 1504 |
+
def _prepare_fsdp_model(self, fsdp_model: FSDP) -> None:
|
| 1505 |
+
from torch.distributed.fsdp._runtime_utils import _lazy_init
|
| 1506 |
+
|
| 1507 |
+
# TODO (epwalsh): I'm not sure if this is necessary, but this is what PyTorch does before saving/loading
|
| 1508 |
+
# an FSDP state dict through the built-in methods.
|
| 1509 |
+
if torch.cuda.is_available():
|
| 1510 |
+
torch.cuda.synchronize()
|
| 1511 |
+
_lazy_init(fsdp_model, fsdp_model)
|
| 1512 |
+
|
| 1513 |
+
def _fsdp_handles(self, fsdp_model: FSDP) -> List[FlatParamHandle]:
|
| 1514 |
+
if version.parse(torch.__version__) < version.parse("2.1.0"):
|
| 1515 |
+
return fsdp_model._handles # type: ignore
|
| 1516 |
+
elif version.parse(torch.__version__) < version.parse("2.3.0"):
|
| 1517 |
+
# Handle could be None if the FSDP wrapper doesn't manage any parameters.
|
| 1518 |
+
if hasattr(fsdp_model, "_handle") and fsdp_model._handle is not None:
|
| 1519 |
+
return [fsdp_model._handle] # type: ignore
|
| 1520 |
+
else:
|
| 1521 |
+
return []
|
| 1522 |
+
else:
|
| 1523 |
+
# Need to verify FSDP internals with newer versions.
|
| 1524 |
+
raise NotImplementedError
|
| 1525 |
+
|
| 1526 |
+
@torch.no_grad()
|
| 1527 |
+
def _get_flat_param_state_to_save(self, fsdp_model: FSDP) -> Dict[str, Any]:
|
| 1528 |
+
self._prepare_fsdp_model(fsdp_model)
|
| 1529 |
+
module_data = []
|
| 1530 |
+
for module_fqn, fsdp_module in self._fsdp_modules(fsdp_model):
|
| 1531 |
+
handle_data = []
|
| 1532 |
+
for handle in self._fsdp_handles(fsdp_module):
|
| 1533 |
+
data: Dict[str, Any] = {}
|
| 1534 |
+
# This is a `FlatParameter` instance.
|
| 1535 |
+
# See `torch.distributed.fsdp.flat_param` for the API.
|
| 1536 |
+
flat_param = handle.flat_param
|
| 1537 |
+
data["flat_param.data"] = flat_param.detach()
|
| 1538 |
+
for key in self._FLAT_PARAM_METADATA_TO_SAVE:
|
| 1539 |
+
if hasattr(flat_param, key):
|
| 1540 |
+
data[f"flat_param.{key}"] = getattr(flat_param, key)
|
| 1541 |
+
handle_data.append(data)
|
| 1542 |
+
module_data.append({"handles": handle_data, "name": module_fqn})
|
| 1543 |
+
return {"modules": module_data}
|
| 1544 |
+
|
| 1545 |
+
@torch.no_grad()
|
| 1546 |
+
def _load_flat_param_state(self, fsdp_model: FSDP, model_state: Dict[str, Any]):
|
| 1547 |
+
"""Load the state produced from `self._get_flat_param_state_to_save()`."""
|
| 1548 |
+
self._prepare_fsdp_model(fsdp_model)
|
| 1549 |
+
fsdp_modules = self._fsdp_modules(fsdp_model)
|
| 1550 |
+
assert len(model_state["modules"]) == len(fsdp_modules)
|
| 1551 |
+
for (_, fsdp_module), module_data in zip(fsdp_modules, model_state["modules"]):
|
| 1552 |
+
handles = self._fsdp_handles(fsdp_module)
|
| 1553 |
+
assert len(handles) == len(module_data["handles"])
|
| 1554 |
+
for handle, data in zip(handles, module_data["handles"]):
|
| 1555 |
+
flat_param = handle.flat_param
|
| 1556 |
+
# Make sure metadata matches.
|
| 1557 |
+
for key in self._FLAT_PARAM_METADATA_TO_SAVE:
|
| 1558 |
+
if hasattr(flat_param, key):
|
| 1559 |
+
assert getattr(flat_param, key) == data[f"flat_param.{key}"]
|
| 1560 |
+
# Load the flat sharded data.
|
| 1561 |
+
flat_param.copy_(data["flat_param.data"])
|
| 1562 |
+
|
| 1563 |
+
def _save_metadata(self, dir: PathOrStr, *, upload_to: Optional[str] = None) -> None:
|
| 1564 |
+
if get_fs_local_rank() == 0:
|
| 1565 |
+
log.info("Saving metadata...")
|
| 1566 |
+
metadata = _LocalShardedCheckpointerMetadata()
|
| 1567 |
+
metadata.save(metadata_path := Path(dir) / "metadata.yaml")
|
| 1568 |
+
if upload_to is not None and get_global_rank() == 0:
|
| 1569 |
+
upload_target = f"{upload_to}/metadata.yaml"
|
| 1570 |
+
log.info(f"Uploading {metadata_path} to {upload_target}")
|
| 1571 |
+
upload(metadata_path, upload_target, save_overwrite=self.cfg.save_overwrite)
|
| 1572 |
+
|
| 1573 |
+
def _load_metadata(
|
| 1574 |
+
self, load_path: PathOrStr, *, local_cache: Optional[PathOrStr] = None
|
| 1575 |
+
) -> _LocalShardedCheckpointerMetadata:
|
| 1576 |
+
metadata_path = resource_path(load_path, "metadata.yaml", local_cache=local_cache)
|
| 1577 |
+
return _LocalShardedCheckpointerMetadata.load(metadata_path)
|
| 1578 |
+
|
| 1579 |
+
def save_checkpoint(
|
| 1580 |
+
self,
|
| 1581 |
+
dir: PathOrStr,
|
| 1582 |
+
dist_model: nn.Module,
|
| 1583 |
+
optim: Optimizer,
|
| 1584 |
+
trainer_state: Dict[str, Any],
|
| 1585 |
+
*,
|
| 1586 |
+
upload_to: Optional[str] = None,
|
| 1587 |
+
) -> None:
|
| 1588 |
+
assert isinstance(
|
| 1589 |
+
dist_model, FSDP
|
| 1590 |
+
), f"{self.__class__.__name__} is being called to save a model where `distributed_strategy` is not FSDP."
|
| 1591 |
+
|
| 1592 |
+
with self._temporary_wd(dir) as checkpoint_dir:
|
| 1593 |
+
# Gather local FSDP flat params data to save.
|
| 1594 |
+
# We also save some flat param metadata like the corresponding fully qualified names (fqns)
|
| 1595 |
+
# of each original parameter so we can validate that the sharding is the same when loading
|
| 1596 |
+
# one of these checkpoints.
|
| 1597 |
+
log.info("Saving local FSDP flat params data...")
|
| 1598 |
+
save_state_dict(
|
| 1599 |
+
checkpoint_dir,
|
| 1600 |
+
f"model/rank{get_global_rank()}.pt",
|
| 1601 |
+
self._get_flat_param_state_to_save(dist_model),
|
| 1602 |
+
upload_to=upload_to,
|
| 1603 |
+
save_overwrite=self.cfg.save_overwrite,
|
| 1604 |
+
)
|
| 1605 |
+
|
| 1606 |
+
# Save optimizer state.
|
| 1607 |
+
log.info("Saving local optimizer state...")
|
| 1608 |
+
save_state_dict(
|
| 1609 |
+
checkpoint_dir,
|
| 1610 |
+
f"optim/rank{get_global_rank()}.pt",
|
| 1611 |
+
optim.state_dict(),
|
| 1612 |
+
upload_to=upload_to,
|
| 1613 |
+
save_overwrite=self.cfg.save_overwrite,
|
| 1614 |
+
)
|
| 1615 |
+
|
| 1616 |
+
# Save trainer state.
|
| 1617 |
+
log.info("Saving trainer state...")
|
| 1618 |
+
save_state_dict(
|
| 1619 |
+
checkpoint_dir,
|
| 1620 |
+
f"train/rank{get_global_rank()}.pt",
|
| 1621 |
+
trainer_state,
|
| 1622 |
+
upload_to=upload_to,
|
| 1623 |
+
save_overwrite=self.cfg.save_overwrite,
|
| 1624 |
+
)
|
| 1625 |
+
|
| 1626 |
+
# Save metadata.
|
| 1627 |
+
self._save_metadata(checkpoint_dir, upload_to=upload_to)
|
| 1628 |
+
|
| 1629 |
+
# Save config. We do this last b/c the presence of a config in a remote checkpoint
|
| 1630 |
+
# "directory" indicates that the folder is valid, as a opposed to a partially
|
| 1631 |
+
# uploaded checkpoint directory that failed before completing.
|
| 1632 |
+
self._save_config(checkpoint_dir, upload_to=upload_to)
|
| 1633 |
+
|
| 1634 |
+
def restore_checkpoint(
|
| 1635 |
+
self,
|
| 1636 |
+
load_path: PathOrStr,
|
| 1637 |
+
dist_model: nn.Module,
|
| 1638 |
+
optim: Optimizer,
|
| 1639 |
+
*,
|
| 1640 |
+
local_cache: Optional[PathOrStr] = None,
|
| 1641 |
+
load_optimizer_state: bool = True,
|
| 1642 |
+
) -> Dict[str, Any]:
|
| 1643 |
+
# Load metadata and make sure checkpoint is compatible.
|
| 1644 |
+
metadata = self._load_metadata(load_path, local_cache=local_cache)
|
| 1645 |
+
assert metadata.world_size == get_world_size()
|
| 1646 |
+
|
| 1647 |
+
# Load local FSDP flat param data.
|
| 1648 |
+
log.info("Loading local FSDP flat params data...")
|
| 1649 |
+
assert isinstance(
|
| 1650 |
+
dist_model, FSDP
|
| 1651 |
+
), f"{self.__class__.__name__} is being called to load a model where `distributed_strategy` is not FSDP."
|
| 1652 |
+
|
| 1653 |
+
model_state = load_state_dict(
|
| 1654 |
+
load_path, f"model/rank{get_global_rank()}.pt", local_cache=local_cache, map_location="cpu"
|
| 1655 |
+
)
|
| 1656 |
+
self._load_flat_param_state(dist_model, model_state)
|
| 1657 |
+
del model_state
|
| 1658 |
+
|
| 1659 |
+
# Load local optim state.
|
| 1660 |
+
if load_optimizer_state:
|
| 1661 |
+
log.info("Loading local optimizer state...")
|
| 1662 |
+
optim_state = load_state_dict(
|
| 1663 |
+
load_path, f"optim/rank{get_global_rank()}.pt", local_cache=local_cache, map_location="cpu"
|
| 1664 |
+
)
|
| 1665 |
+
# HACK/TODO (epwalsh): When we use adaptive clipping we track the 'grad_norm_exp_avg' for every param
|
| 1666 |
+
# in every rank, and keep this in the optimizer state. But this causes issues when loading the
|
| 1667 |
+
# state since torch sees the state is non-empty for some params which would normally be empty,
|
| 1668 |
+
# and then assumes it should have all of the other state tensors for that param, which is doesn't.
|
| 1669 |
+
# So for now we just remove 'grad_norm_exp_avg' everywhere from the state, which resets that metric.
|
| 1670 |
+
# Not the end of the world but there's probably a better way around this without resetting
|
| 1671 |
+
# the metric.
|
| 1672 |
+
for param_id in list(optim_state["state"].keys()):
|
| 1673 |
+
state = optim_state["state"][param_id]
|
| 1674 |
+
if "grad_norm_exp_avg" in state:
|
| 1675 |
+
del state["grad_norm_exp_avg"]
|
| 1676 |
+
if len(state) == 0:
|
| 1677 |
+
del optim_state["state"][param_id]
|
| 1678 |
+
optim.load_state_dict(optim_state)
|
| 1679 |
+
del optim_state
|
| 1680 |
+
|
| 1681 |
+
# Load local trainer state.
|
| 1682 |
+
log.info("Loading local trainer state...")
|
| 1683 |
+
trainer_state = load_state_dict(load_path, f"train/rank{get_global_rank()}.pt", local_cache=local_cache)
|
| 1684 |
+
barrier()
|
| 1685 |
+
return trainer_state
|
| 1686 |
+
|
| 1687 |
+
def _iter_flat_param_shards(
|
| 1688 |
+
self, model_state: Dict[str, Any]
|
| 1689 |
+
) -> Generator[Tuple[str, _FlatParamShard], None, None]:
|
| 1690 |
+
for module_data in model_state["modules"]:
|
| 1691 |
+
module_prefix = module_data["name"].replace("_fsdp_wrapped_module.", "")
|
| 1692 |
+
for handle in module_data["handles"]:
|
| 1693 |
+
flat_data = handle["flat_param.data"]
|
| 1694 |
+
if (num_padding := handle["flat_param._shard_numel_padded"]) > 0:
|
| 1695 |
+
# If there's padding in the flat param it should be on the right.
|
| 1696 |
+
assert (flat_data[-num_padding:] == 0).all()
|
| 1697 |
+
# NOTE: this changes depending on the torch version, but we don't do a version
|
| 1698 |
+
# check since we might be trying to unshard an old checkpoint that was stored
|
| 1699 |
+
# with a different torch version than we're currently running with.
|
| 1700 |
+
if "flat_param._shard_indices" in handle:
|
| 1701 |
+
# torch <=2.0.1
|
| 1702 |
+
param_start = handle["flat_param._shard_indices"][0]
|
| 1703 |
+
current_flat_index = 0
|
| 1704 |
+
for relative_fqn, full_shape, (offset_start, offset_end) in zip(
|
| 1705 |
+
handle["flat_param._fqns"][param_start:],
|
| 1706 |
+
handle["flat_param._shapes"][param_start:],
|
| 1707 |
+
handle["flat_param._shard_param_offsets"],
|
| 1708 |
+
):
|
| 1709 |
+
root_fqn = relative_fqn if not module_prefix else f"{module_prefix}.{relative_fqn}"
|
| 1710 |
+
numel_shard = offset_end - offset_start + 1
|
| 1711 |
+
flat_param_shard = _FlatParamShard(
|
| 1712 |
+
full_shape=full_shape,
|
| 1713 |
+
shard_offsets=(offset_start, offset_end),
|
| 1714 |
+
shard_data=flat_data[current_flat_index : current_flat_index + numel_shard],
|
| 1715 |
+
)
|
| 1716 |
+
current_flat_index += numel_shard
|
| 1717 |
+
yield root_fqn, flat_param_shard
|
| 1718 |
+
else:
|
| 1719 |
+
# torch >=2.1.0
|
| 1720 |
+
for relative_fqn, full_shape, shard_param_info in zip(
|
| 1721 |
+
handle["flat_param._fqns"],
|
| 1722 |
+
handle["flat_param._shapes"],
|
| 1723 |
+
handle["flat_param._shard_param_infos"],
|
| 1724 |
+
):
|
| 1725 |
+
if not shard_param_info.in_shard:
|
| 1726 |
+
continue
|
| 1727 |
+
root_fqn = relative_fqn if not module_prefix else f"{module_prefix}.{relative_fqn}"
|
| 1728 |
+
flat_param_shard = _FlatParamShard(
|
| 1729 |
+
full_shape=full_shape,
|
| 1730 |
+
shard_offsets=(
|
| 1731 |
+
shard_param_info.intra_param_start_idx,
|
| 1732 |
+
shard_param_info.intra_param_end_idx,
|
| 1733 |
+
),
|
| 1734 |
+
shard_data=flat_data[
|
| 1735 |
+
shard_param_info.offset_in_shard : shard_param_info.offset_in_shard
|
| 1736 |
+
+ shard_param_info.numel_in_shard
|
| 1737 |
+
],
|
| 1738 |
+
)
|
| 1739 |
+
yield root_fqn, flat_param_shard
|
| 1740 |
+
|
| 1741 |
+
def unshard_checkpoint(
|
| 1742 |
+
self,
|
| 1743 |
+
load_path: PathOrStr,
|
| 1744 |
+
*,
|
| 1745 |
+
local_cache: Optional[PathOrStr] = None,
|
| 1746 |
+
load_optimizer_state: bool = True,
|
| 1747 |
+
load_trainer_state: bool = True,
|
| 1748 |
+
device: Optional[torch.device] = None,
|
| 1749 |
+
) -> Tuple[Dict[str, torch.Tensor], Optional[Dict[str, Any]], Optional[Dict[str, Any]]]:
|
| 1750 |
+
device = device or torch.device("cpu")
|
| 1751 |
+
metadata = self._load_metadata(load_path, local_cache=local_cache)
|
| 1752 |
+
|
| 1753 |
+
# Gather paths model state, potentially downloading them.
|
| 1754 |
+
log.info("Gathering model state dicts...")
|
| 1755 |
+
model_state_paths = self._gather_state_dict_paths(
|
| 1756 |
+
load_path, "model", metadata.world_size, local_cache=local_cache
|
| 1757 |
+
)
|
| 1758 |
+
|
| 1759 |
+
# Load model state dicts one-by-one, materializing and populating the full parameters as we go.
|
| 1760 |
+
log.info("Materializing full parameters...")
|
| 1761 |
+
full_model_state: Dict[str, torch.Tensor] = {}
|
| 1762 |
+
# We keep a copy of the flat param metadata minus the actual tensors so we can reconstruct
|
| 1763 |
+
# the full optimizer state below without having to reload the model state dicts.
|
| 1764 |
+
flat_params_data: Dict[int, Dict[str, _FlatParamShard]] = defaultdict(dict)
|
| 1765 |
+
for rank, path in enumerate(model_state_paths):
|
| 1766 |
+
log.info(f"Loading shards from rank {rank}...")
|
| 1767 |
+
model_state = torch.load(path, map_location="cpu")
|
| 1768 |
+
for root_fqn, flat_param_shard in self._iter_flat_param_shards(model_state):
|
| 1769 |
+
if root_fqn not in full_model_state:
|
| 1770 |
+
log.info(
|
| 1771 |
+
f"Materializing full parameter '{root_fqn}' with shape {flat_param_shard.full_shape}..."
|
| 1772 |
+
)
|
| 1773 |
+
assert flat_param_shard.shard_data is not None
|
| 1774 |
+
full_model_state[root_fqn] = torch.empty(
|
| 1775 |
+
flat_param_shard.full_shape, dtype=flat_param_shard.shard_data.dtype, device=device
|
| 1776 |
+
)
|
| 1777 |
+
# Fill with NaNs so we can validate that the whole parameter has been populated
|
| 1778 |
+
# afterwards.
|
| 1779 |
+
full_model_state[root_fqn].fill_(torch.nan)
|
| 1780 |
+
# Copy over the local shard to the relevant part of the full parameter.
|
| 1781 |
+
full_param = full_model_state[root_fqn]
|
| 1782 |
+
log.info(f"Loading rank {rank} shard for '{root_fqn}'...")
|
| 1783 |
+
flat_param_shard.copy_into(full_param)
|
| 1784 |
+
flat_params_data[rank][root_fqn] = replace(flat_param_shard, shard_data=None)
|
| 1785 |
+
|
| 1786 |
+
log.info("Validating full parameters...")
|
| 1787 |
+
for key, tensor in full_model_state.items():
|
| 1788 |
+
if torch.isnan(tensor).any():
|
| 1789 |
+
raise ValueError(f"Parameter '{key}' contains NaNs, this is likely a bug with the unsharder")
|
| 1790 |
+
|
| 1791 |
+
trainer_state: Optional[Dict[str, Any]] = None
|
| 1792 |
+
if load_trainer_state:
|
| 1793 |
+
trainer_state = load_state_dict(load_path, "train/rank0.pt", local_cache=local_cache)
|
| 1794 |
+
|
| 1795 |
+
if not load_optimizer_state:
|
| 1796 |
+
return full_model_state, None, trainer_state
|
| 1797 |
+
|
| 1798 |
+
log.info("Gathering optim state dicts...")
|
| 1799 |
+
optim_state_paths = self._gather_state_dict_paths(
|
| 1800 |
+
load_path, "optim", metadata.world_size, local_cache=local_cache
|
| 1801 |
+
)
|
| 1802 |
+
|
| 1803 |
+
log.info("Materializing full optim state...")
|
| 1804 |
+
full_optim_state: Dict[str, Any] = {"state": defaultdict(dict)}
|
| 1805 |
+
fqn_to_id: Dict[str, int] = {}
|
| 1806 |
+
id_to_fqn: Dict[int, str] = {}
|
| 1807 |
+
for rank, path in enumerate(optim_state_paths):
|
| 1808 |
+
log.info(f"Loading sharded optim state from rank {rank}...")
|
| 1809 |
+
optim_state = torch.load(path, map_location="cpu")
|
| 1810 |
+
|
| 1811 |
+
# Initialize param groups.
|
| 1812 |
+
# We assume parameter groups are the same across all ranks.
|
| 1813 |
+
# The only thing that differs across ranks is the state for each local sharded param.
|
| 1814 |
+
if "param_groups" not in full_optim_state:
|
| 1815 |
+
full_optim_state["param_groups"] = optim_state["param_groups"]
|
| 1816 |
+
else:
|
| 1817 |
+
assert full_optim_state["param_groups"] == optim_state["param_groups"]
|
| 1818 |
+
|
| 1819 |
+
# Generate mapping of parameter FQNs to optimizer param IDs and vice-versa.
|
| 1820 |
+
if not fqn_to_id or not id_to_fqn:
|
| 1821 |
+
for group in full_optim_state["param_groups"]:
|
| 1822 |
+
for fqn, id in zip(group["param_names"], group["params"]):
|
| 1823 |
+
fqn = fqn.replace("_fsdp_wrapped_module.", "")
|
| 1824 |
+
fqn_to_id[fqn] = id
|
| 1825 |
+
id_to_fqn[id] = fqn
|
| 1826 |
+
|
| 1827 |
+
# Iterate over local shard state and copy into the full state.
|
| 1828 |
+
for id, shard_state in optim_state["state"].items():
|
| 1829 |
+
fqn = id_to_fqn[id]
|
| 1830 |
+
flat_param_shard = flat_params_data[rank].get(fqn) # type: ignore[assignment]
|
| 1831 |
+
full_state = full_optim_state["state"][id]
|
| 1832 |
+
for key, shard_value in shard_state.items():
|
| 1833 |
+
assert isinstance(shard_value, torch.Tensor)
|
| 1834 |
+
if shard_value.shape == torch.Size([]):
|
| 1835 |
+
# Add singleton tensors directly to full state. These should be the same across
|
| 1836 |
+
# all ranks.
|
| 1837 |
+
assert key in ("step", "grad_norm_exp_avg") # sanity check
|
| 1838 |
+
if key not in full_state:
|
| 1839 |
+
full_state[key] = shard_value.to(device)
|
| 1840 |
+
else:
|
| 1841 |
+
assert full_state[key] == shard_value
|
| 1842 |
+
else:
|
| 1843 |
+
# Otherwise we have a sharded param state.
|
| 1844 |
+
# If the corresponding full param state hasn't been materialized yet, do so now.
|
| 1845 |
+
assert flat_param_shard is not None, f"missing flat_params_data for {fqn} from rank {rank}"
|
| 1846 |
+
if key not in full_state:
|
| 1847 |
+
log.info(
|
| 1848 |
+
f"Materializing full state '{key}' for '{fqn}' with shape {flat_param_shard.full_shape}..."
|
| 1849 |
+
)
|
| 1850 |
+
full_state[key] = torch.empty(
|
| 1851 |
+
flat_param_shard.full_shape, dtype=shard_value.dtype, device=device
|
| 1852 |
+
)
|
| 1853 |
+
full_state_value = full_state[key]
|
| 1854 |
+
|
| 1855 |
+
# Copy over the local shard state to the relevant part of the full parameter state.
|
| 1856 |
+
log.info(f"Loading rank {rank} shard state of '{key}' for '{fqn}'...")
|
| 1857 |
+
replace(flat_param_shard, shard_data=shard_value).copy_into(full_state_value)
|
| 1858 |
+
|
| 1859 |
+
# Lastly, clean up the parameter names in param groups.
|
| 1860 |
+
for group in full_optim_state["param_groups"]:
|
| 1861 |
+
group["param_names"] = [n.replace("_fsdp_wrapped_module.", "") for n in group["param_names"]]
|
| 1862 |
+
|
| 1863 |
+
return full_model_state, full_optim_state, trainer_state
|
| 1864 |
+
|
| 1865 |
+
def _get_state_dict_path(
|
| 1866 |
+
self,
|
| 1867 |
+
load_path: PathOrStr,
|
| 1868 |
+
state_dict_type: str,
|
| 1869 |
+
rank: int,
|
| 1870 |
+
*,
|
| 1871 |
+
local_cache: Optional[PathOrStr] = None,
|
| 1872 |
+
progress=None,
|
| 1873 |
+
) -> Tuple[int, Path]:
|
| 1874 |
+
fname = f"{state_dict_type}/rank{rank}.pt"
|
| 1875 |
+
return rank, resource_path(str(load_path).rstrip("/"), fname, local_cache=local_cache, progress=progress)
|
| 1876 |
+
|
| 1877 |
+
def _gather_state_dict_paths(
|
| 1878 |
+
self,
|
| 1879 |
+
load_path: PathOrStr,
|
| 1880 |
+
state_dict_type: str,
|
| 1881 |
+
world_size: int,
|
| 1882 |
+
*,
|
| 1883 |
+
local_cache: Optional[PathOrStr] = None,
|
| 1884 |
+
) -> List[Path]:
|
| 1885 |
+
progress = get_progress_bar()
|
| 1886 |
+
with ThreadPoolExecutor(max_workers=self.thread_count) as executor:
|
| 1887 |
+
futures = []
|
| 1888 |
+
for rank in range(world_size):
|
| 1889 |
+
future = executor.submit(
|
| 1890 |
+
self._get_state_dict_path,
|
| 1891 |
+
load_path,
|
| 1892 |
+
state_dict_type,
|
| 1893 |
+
rank,
|
| 1894 |
+
local_cache=local_cache,
|
| 1895 |
+
progress=progress,
|
| 1896 |
+
)
|
| 1897 |
+
futures.append(future)
|
| 1898 |
+
|
| 1899 |
+
results: Dict[int, Path] = {}
|
| 1900 |
+
for future in as_completed(futures):
|
| 1901 |
+
rank, path = future.result()
|
| 1902 |
+
results[rank] = path
|
| 1903 |
+
|
| 1904 |
+
return [results[rank] for rank in range(world_size)]
|
| 1905 |
+
|
| 1906 |
+
|
| 1907 |
+
class OlmoCoreCheckpointer(Checkpointer):
|
| 1908 |
+
def save_checkpoint(
|
| 1909 |
+
self,
|
| 1910 |
+
dir: PathOrStr,
|
| 1911 |
+
dist_model: nn.Module,
|
| 1912 |
+
optim: Optimizer,
|
| 1913 |
+
trainer_state: Dict[str, Any],
|
| 1914 |
+
*,
|
| 1915 |
+
upload_to: Optional[str] = None,
|
| 1916 |
+
) -> None:
|
| 1917 |
+
from olmo_core.distributed.checkpoint import ( # type: ignore
|
| 1918 |
+
save_model_and_optim_state,
|
| 1919 |
+
)
|
| 1920 |
+
|
| 1921 |
+
with self._temporary_wd(dir) as checkpoint_dir:
|
| 1922 |
+
log.info("Saving model and optim state...")
|
| 1923 |
+
if get_fs_local_rank() == 0:
|
| 1924 |
+
(checkpoint_dir / "model").mkdir(exist_ok=True, parents=True)
|
| 1925 |
+
(checkpoint_dir / "optim").mkdir(exist_ok=True, parents=True)
|
| 1926 |
+
(checkpoint_dir / "train").mkdir(exist_ok=True, parents=True)
|
| 1927 |
+
|
| 1928 |
+
wait_for(
|
| 1929 |
+
lambda: (checkpoint_dir / "model").exists(), "Waiting for checkpoint model directory", timeout=10.0
|
| 1930 |
+
)
|
| 1931 |
+
wait_for(
|
| 1932 |
+
lambda: (checkpoint_dir / "optim").exists(), "Waiting for checkpoint optim directory", timeout=10.0
|
| 1933 |
+
)
|
| 1934 |
+
wait_for(
|
| 1935 |
+
lambda: (checkpoint_dir / "train").exists(), "Waiting for checkpoint train directory", timeout=10.0
|
| 1936 |
+
)
|
| 1937 |
+
|
| 1938 |
+
local_files_created = save_model_and_optim_state(checkpoint_dir, dist_model, optim)
|
| 1939 |
+
if upload_to is not None:
|
| 1940 |
+
for path in local_files_created:
|
| 1941 |
+
path = Path(path)
|
| 1942 |
+
upload_target = f"{upload_to.rstrip('/')}/{path.relative_to(checkpoint_dir)}"
|
| 1943 |
+
log.info(f"Uploading {path} to {upload_target}...")
|
| 1944 |
+
upload(path, upload_target, save_overwrite=self.cfg.save_overwrite)
|
| 1945 |
+
|
| 1946 |
+
log.info("Saving trainer state...")
|
| 1947 |
+
save_state_dict(
|
| 1948 |
+
checkpoint_dir,
|
| 1949 |
+
f"train/rank{get_global_rank()}.pt",
|
| 1950 |
+
trainer_state,
|
| 1951 |
+
upload_to=upload_to,
|
| 1952 |
+
)
|
| 1953 |
+
|
| 1954 |
+
self._save_config(checkpoint_dir, upload_to=upload_to)
|
| 1955 |
+
|
| 1956 |
+
def restore_checkpoint(
|
| 1957 |
+
self,
|
| 1958 |
+
load_path: PathOrStr,
|
| 1959 |
+
dist_model: nn.Module,
|
| 1960 |
+
optim: Optimizer,
|
| 1961 |
+
*,
|
| 1962 |
+
local_cache: Optional[PathOrStr] = None,
|
| 1963 |
+
load_optimizer_state: bool = True,
|
| 1964 |
+
) -> Dict[str, Any]:
|
| 1965 |
+
from olmo_core.distributed.checkpoint import ( # type: ignore
|
| 1966 |
+
load_model_and_optim_state,
|
| 1967 |
+
)
|
| 1968 |
+
|
| 1969 |
+
log.info("Loading model and optim state...")
|
| 1970 |
+
load_model_and_optim_state(load_path, dist_model, optim if load_optimizer_state else None)
|
| 1971 |
+
|
| 1972 |
+
log.info("Loading trainer state...")
|
| 1973 |
+
try:
|
| 1974 |
+
trainer_state = load_state_dict(
|
| 1975 |
+
load_path, f"train/rank{get_global_rank()}.pt", local_cache=local_cache
|
| 1976 |
+
)
|
| 1977 |
+
except FileNotFoundError:
|
| 1978 |
+
# Fall back to rank 0 train state.
|
| 1979 |
+
# This can happen when we're restoring a checkpoint with a different world size.
|
| 1980 |
+
trainer_state = load_state_dict(load_path, "train/rank0.pt", local_cache=local_cache)
|
| 1981 |
+
|
| 1982 |
+
barrier()
|
| 1983 |
+
return trainer_state
|
| 1984 |
+
|
| 1985 |
+
def unshard_checkpoint(
|
| 1986 |
+
self,
|
| 1987 |
+
load_path: PathOrStr,
|
| 1988 |
+
*,
|
| 1989 |
+
local_cache: Optional[PathOrStr] = None,
|
| 1990 |
+
load_optimizer_state: bool = True,
|
| 1991 |
+
load_trainer_state: bool = True,
|
| 1992 |
+
device: Optional[torch.device] = None,
|
| 1993 |
+
) -> Tuple[Dict[str, torch.Tensor], Optional[Dict[str, Any]], Optional[Dict[str, Any]]]:
|
| 1994 |
+
from olmo_core.distributed.checkpoint import ( # type: ignore
|
| 1995 |
+
unshard_model_state,
|
| 1996 |
+
unshard_optim_state,
|
| 1997 |
+
)
|
| 1998 |
+
|
| 1999 |
+
model_state = unshard_model_state(load_path, device=device)
|
| 2000 |
+
optim_state: Optional[Dict[str, Any]] = None
|
| 2001 |
+
train_state: Optional[Dict[str, Any]] = None
|
| 2002 |
+
if load_optimizer_state:
|
| 2003 |
+
optim_state = cast(Dict[str, Any], unshard_optim_state(load_path, device=device))
|
| 2004 |
+
if load_trainer_state:
|
| 2005 |
+
train_state = load_state_dict(load_path, "train/rank0.pt", local_cache=local_cache)
|
| 2006 |
+
return model_state, optim_state, train_state
|
| 2007 |
+
|
| 2008 |
+
|
| 2009 |
+
def build_sharded_checkpointer(
|
| 2010 |
+
cfg: TrainConfig, *, name: Optional[ShardedCheckpointerType] = None, use_shared_mem_impl: bool = False
|
| 2011 |
+
) -> Checkpointer:
|
| 2012 |
+
name = name or cfg.sharded_checkpointer
|
| 2013 |
+
if name == ShardedCheckpointerType.torch_new:
|
| 2014 |
+
return TorchNewStyleShardedCheckpointer(cfg)
|
| 2015 |
+
elif name == ShardedCheckpointerType.torch_legacy:
|
| 2016 |
+
return TorchLegacyShardedCheckpointer(cfg, use_shared_mem_impl=use_shared_mem_impl)
|
| 2017 |
+
elif name == ShardedCheckpointerType.local:
|
| 2018 |
+
return LocalShardedCheckpointer(cfg)
|
| 2019 |
+
elif name == ShardedCheckpointerType.olmo_core:
|
| 2020 |
+
return OlmoCoreCheckpointer(cfg)
|
| 2021 |
+
else:
|
| 2022 |
+
raise NotImplementedError(name)
|
config.json
CHANGED
|
@@ -1,29 +1,143 @@
|
|
| 1 |
{
|
| 2 |
-
"architectures": [
|
| 3 |
-
"MOLMoEForCausalLM"
|
| 4 |
-
],
|
| 5 |
"auto_map": {
|
| 6 |
-
|
| 7 |
-
|
| 8 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
"clip_qkv": null,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
"embedding_size": 50304,
|
| 11 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
"initializer_range": 0.02,
|
| 13 |
-
"intermediate_size": 1024,
|
| 14 |
"layer_norm_eps": 1e-05,
|
| 15 |
-
"
|
| 16 |
-
"
|
| 17 |
-
"
|
| 18 |
-
"
|
| 19 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
"qkv_bias": false,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
"rope_theta": 10000.0,
|
| 22 |
-
"
|
| 23 |
-
"
|
| 24 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
"use_cache": true,
|
|
|
|
|
|
|
| 26 |
"use_position_ids": true,
|
| 27 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
"weight_tying": false
|
| 29 |
}
|
|
|
|
| 1 |
{
|
|
|
|
|
|
|
|
|
|
| 2 |
"auto_map": {
|
| 3 |
+
"AutoConfig": "config_molmoe.MolmoConfig",
|
| 4 |
+
"AutoModelForCausalLM": "modeling_molmoe.MolmoForCausalLM"
|
| 5 |
},
|
| 6 |
+
"activation_type": "swiglu",
|
| 7 |
+
"additional_vocab_size": 128,
|
| 8 |
+
"alibi": false,
|
| 9 |
+
"alibi_bias_max": 8.0,
|
| 10 |
+
"always_start_with_space": true,
|
| 11 |
+
"architectures": [
|
| 12 |
+
"OLMoForCausalLM"
|
| 13 |
+
],
|
| 14 |
+
"attention_dropout": 0.0,
|
| 15 |
+
"attention_layer_norm": true,
|
| 16 |
+
"attention_layer_norm_with_affine": true,
|
| 17 |
+
"attention_type": "sdpa",
|
| 18 |
+
"attn_logit_softcapping": null,
|
| 19 |
+
"bias_for_layer_norm": false,
|
| 20 |
+
"block_group_size": 1,
|
| 21 |
+
"block_type": "moe",
|
| 22 |
"clip_qkv": null,
|
| 23 |
+
"crop_mode": "overlap-and-resize-c2",
|
| 24 |
+
"d_model": 2048,
|
| 25 |
+
"default_inference_len": 65,
|
| 26 |
+
"do_random_scale": false,
|
| 27 |
+
"embedding_dropout": 0.0,
|
| 28 |
"embedding_size": 50304,
|
| 29 |
+
"final_logit_softcapping": null,
|
| 30 |
+
"fix_image_input_idx": 2,
|
| 31 |
+
"float32_attention": true,
|
| 32 |
+
"gin_bindings": null,
|
| 33 |
+
"head_dim": null,
|
| 34 |
+
"image_feature_dropout": 0.0,
|
| 35 |
+
"image_padding_embed": "pad_and_partial_pad",
|
| 36 |
+
"image_pooling_2d": "attention-meanq",
|
| 37 |
+
"image_pooling_h": 2,
|
| 38 |
+
"image_pooling_w": 2,
|
| 39 |
+
"image_projector": "mlp",
|
| 40 |
+
"include_bias": false,
|
| 41 |
+
"init_cutoff_factor": 3.0,
|
| 42 |
+
"init_device": "meta",
|
| 43 |
+
"init_fn": "normal",
|
| 44 |
+
"init_std": 0.02,
|
| 45 |
"initializer_range": 0.02,
|
|
|
|
| 46 |
"layer_norm_eps": 1e-05,
|
| 47 |
+
"layer_norm_type": "rms",
|
| 48 |
+
"layer_norm_with_affine": true,
|
| 49 |
+
"llm_load_path": null,
|
| 50 |
+
"loss_token_weighting": "root_subsegments",
|
| 51 |
+
"low_cpu_fsdp": true,
|
| 52 |
+
"max_crops": 12,
|
| 53 |
+
"max_position_embeddings": 32768,
|
| 54 |
+
"max_sequence_length": 4096,
|
| 55 |
+
"message_formatting": "role",
|
| 56 |
+
"mlp_hidden_size": null,
|
| 57 |
+
"mlp_ratio": 1,
|
| 58 |
+
"model_type": "molmo",
|
| 59 |
+
"moe_capacity_factor": 1.25,
|
| 60 |
+
"moe_dropless": true,
|
| 61 |
+
"moe_interleave": false,
|
| 62 |
+
"moe_lbl_in_fp32": false,
|
| 63 |
+
"moe_log_expert_assignment": false,
|
| 64 |
+
"moe_loss_weight": 0.0,
|
| 65 |
+
"moe_mlp_impl": "sparse",
|
| 66 |
+
"moe_num_experts": 64,
|
| 67 |
+
"moe_shared_expert": false,
|
| 68 |
+
"moe_top_k": 8,
|
| 69 |
+
"moe_zloss_weight": 0.0,
|
| 70 |
+
"multi_query_attention": null,
|
| 71 |
+
"n_heads": 16,
|
| 72 |
+
"n_kv_heads": null,
|
| 73 |
+
"n_layers": 16,
|
| 74 |
+
"new_embedding_init_range": 0.02,
|
| 75 |
+
"norm_after": false,
|
| 76 |
+
"normalize_input_embeds": false,
|
| 77 |
+
"overlap_margins": [
|
| 78 |
+
4,
|
| 79 |
+
4
|
| 80 |
+
],
|
| 81 |
+
"pad_to": null,
|
| 82 |
+
"pad_token_id": 1,
|
| 83 |
+
"pad_tokenizer": false,
|
| 84 |
+
"precision": "amp_bf16",
|
| 85 |
+
"prompt_override": null,
|
| 86 |
+
"prompt_type": "uber_model",
|
| 87 |
"qkv_bias": false,
|
| 88 |
+
"query_pre_attn_scalar": 224,
|
| 89 |
+
"residual_dropout": 0.1,
|
| 90 |
+
"response_attention_dropout": 0.0,
|
| 91 |
+
"response_residual_dropout": 0.0,
|
| 92 |
+
"rope": true,
|
| 93 |
+
"rope_full_precision": true,
|
| 94 |
+
"rope_impl": "llama",
|
| 95 |
"rope_theta": 10000.0,
|
| 96 |
+
"scale_logits": false,
|
| 97 |
+
"system_prompt_kind": "demo_or_style",
|
| 98 |
+
"tokenizer": {
|
| 99 |
+
"identifier": "allenai/gpt-neox-olmo-dolma-v1_5",
|
| 100 |
+
"olmo_bos_token_id": null,
|
| 101 |
+
"olmo_eos_token_id": null,
|
| 102 |
+
"tokenizer_adds_space": false,
|
| 103 |
+
"tokenizer_dir": null,
|
| 104 |
+
"truncate_direction": "right"
|
| 105 |
+
},
|
| 106 |
+
"transformers_version": "4.45.0.dev0",
|
| 107 |
+
"unconditioned": false,
|
| 108 |
"use_cache": true,
|
| 109 |
+
"use_cls_feature": false,
|
| 110 |
+
"use_col_tokens": true,
|
| 111 |
"use_position_ids": true,
|
| 112 |
+
"vision_backbone": {
|
| 113 |
+
"attention_dropout": 0.0,
|
| 114 |
+
"fsdp_wrap": false,
|
| 115 |
+
"image_default_input_size": [
|
| 116 |
+
336,
|
| 117 |
+
336
|
| 118 |
+
],
|
| 119 |
+
"image_dropout_rate": 0.0,
|
| 120 |
+
"image_emb_dim": 1024,
|
| 121 |
+
"image_head_dim": 64,
|
| 122 |
+
"image_mlp_activations": "quick_gelu",
|
| 123 |
+
"image_mlp_dim": 4096,
|
| 124 |
+
"image_model_type": "openai",
|
| 125 |
+
"image_norm_eps": 1e-05,
|
| 126 |
+
"image_num_heads": 16,
|
| 127 |
+
"image_num_key_value_heads": 16,
|
| 128 |
+
"image_num_layers": 23,
|
| 129 |
+
"image_num_pos": 577,
|
| 130 |
+
"image_patch_size": 14,
|
| 131 |
+
"image_pos_patch_size": 14,
|
| 132 |
+
"initializer_range": 0.02,
|
| 133 |
+
"residual_dropout": 0.0,
|
| 134 |
+
"resize_mode": "default"
|
| 135 |
+
},
|
| 136 |
+
"vit_layers": [
|
| 137 |
+
-2,
|
| 138 |
+
-9
|
| 139 |
+
],
|
| 140 |
+
"vit_load_path": null,
|
| 141 |
+
"vocab_size": 50280,
|
| 142 |
"weight_tying": false
|
| 143 |
}
|
config_molmoe.py
CHANGED
|
@@ -1,90 +1,909 @@
|
|
| 1 |
-
from
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 88 |
)
|
| 89 |
|
| 90 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import logging
|
| 4 |
+
from dataclasses import asdict, dataclass, field
|
| 5 |
+
from glob import glob
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
from typing import (
|
| 8 |
+
Any,
|
| 9 |
+
Dict,
|
| 10 |
+
Iterable,
|
| 11 |
+
List,
|
| 12 |
+
Optional,
|
| 13 |
+
Tuple,
|
| 14 |
+
Type,
|
| 15 |
+
TypeVar,
|
| 16 |
+
Union,
|
| 17 |
+
cast,
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
import torch
|
| 21 |
+
from transformers import PretrainedConfig
|
| 22 |
+
from omegaconf import DictConfig, ListConfig, OmegaConf
|
| 23 |
+
from omegaconf import OmegaConf as om
|
| 24 |
+
from omegaconf.errors import OmegaConfBaseException
|
| 25 |
+
from torch.distributed.fsdp import MixedPrecision, ShardingStrategy
|
| 26 |
+
import gin
|
| 27 |
+
|
| 28 |
+
#from olmo.aliases import PathOrStr
|
| 29 |
+
from .aliases import PathOrStr
|
| 30 |
+
from olmo.exceptions import OLMoConfigurationError
|
| 31 |
+
from olmo.util import StrEnum, resource_path
|
| 32 |
+
|
| 33 |
+
from olmo.mm_data.data_utils import build_tokenizer
|
| 34 |
+
from olmo.multimodal_preprocessor import MultiModalPreprocessor
|
| 35 |
+
|
| 36 |
+
__all__ = [
|
| 37 |
+
"ActivationType",
|
| 38 |
+
"ActivationCheckpointingStrategy",
|
| 39 |
+
"BlockType",
|
| 40 |
+
"LayerNormType",
|
| 41 |
+
"VisionBackboneType",
|
| 42 |
+
"VisionBackboneConfig",
|
| 43 |
+
"InitFnType",
|
| 44 |
+
"ModelConfig",
|
| 45 |
+
"OptimizerType",
|
| 46 |
+
"OptimizerConfig",
|
| 47 |
+
"SchedulerType",
|
| 48 |
+
"SchedulerConfig",
|
| 49 |
+
"DataConfig",
|
| 50 |
+
"InstanceFilterConfig",
|
| 51 |
+
"EvaluatorConfig",
|
| 52 |
+
"TokenizerConfig",
|
| 53 |
+
"TrainConfig",
|
| 54 |
+
"PaddingDirection",
|
| 55 |
+
"TruncationDirection",
|
| 56 |
+
"SpeedMonitorConfig",
|
| 57 |
+
"WandbConfig",
|
| 58 |
+
"CompilerConfig",
|
| 59 |
+
"WandbConfig",
|
| 60 |
+
"FSDPPrecision",
|
| 61 |
+
"FSDPWrapStrategy",
|
| 62 |
+
"FSDPConfig",
|
| 63 |
+
"CheckpointType",
|
| 64 |
+
]
|
| 65 |
+
|
| 66 |
+
C = TypeVar("C", bound="BaseConfig")
|
| 67 |
+
D = TypeVar("D", bound="DictConfig|ListConfig")
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
class AttentionType(StrEnum):
|
| 71 |
+
sdpa = "sdpa"
|
| 72 |
+
direct = "direct"
|
| 73 |
+
flash = "flash"
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
class BaseConfig:
|
| 77 |
+
@classmethod
|
| 78 |
+
def _register_resolvers(cls, validate_paths: bool = True):
|
| 79 |
+
# Expands path globs into a list.
|
| 80 |
+
def path_glob(*paths) -> List[str]:
|
| 81 |
+
out = []
|
| 82 |
+
for path in paths:
|
| 83 |
+
matches = sorted(glob(path))
|
| 84 |
+
if not matches and validate_paths:
|
| 85 |
+
raise FileNotFoundError(f"{path} does not match any files or dirs")
|
| 86 |
+
out.extend(matches)
|
| 87 |
+
return out
|
| 88 |
+
|
| 89 |
+
# Chooses the first path in the arguments that exists.
|
| 90 |
+
def path_choose(*paths) -> str:
|
| 91 |
+
from .util import is_url
|
| 92 |
+
|
| 93 |
+
for path in paths:
|
| 94 |
+
if is_url(path) or Path(path).exists():
|
| 95 |
+
return path
|
| 96 |
+
if validate_paths:
|
| 97 |
+
raise FileNotFoundError(", ".join(paths))
|
| 98 |
+
else:
|
| 99 |
+
return ""
|
| 100 |
+
|
| 101 |
+
# Finds the latest checkpoint in a folder.
|
| 102 |
+
def path_last_checkpoint(path) -> str:
|
| 103 |
+
from .util import find_latest_checkpoint
|
| 104 |
+
|
| 105 |
+
latest_checkpoint = find_latest_checkpoint(path)
|
| 106 |
+
if latest_checkpoint is None:
|
| 107 |
+
if validate_paths:
|
| 108 |
+
raise FileNotFoundError(f"Could not find a latest checkpoint at {path}")
|
| 109 |
+
else:
|
| 110 |
+
return ""
|
| 111 |
+
else:
|
| 112 |
+
return str(latest_checkpoint)
|
| 113 |
+
|
| 114 |
+
om.register_new_resolver("path.glob", path_glob, replace=True)
|
| 115 |
+
om.register_new_resolver("path.choose", path_choose, replace=True)
|
| 116 |
+
om.register_new_resolver("path.last_checkpoint", path_last_checkpoint, replace=True)
|
| 117 |
+
|
| 118 |
+
@classmethod
|
| 119 |
+
def update_legacy_settings(cls, config: D) -> D:
|
| 120 |
+
"""
|
| 121 |
+
Update the legacy config settings whose schemas have undergone backwards-incompatible changes.
|
| 122 |
+
"""
|
| 123 |
+
return config
|
| 124 |
+
|
| 125 |
+
@classmethod
|
| 126 |
+
def new(cls: Type[C], **kwargs) -> C:
|
| 127 |
+
cls._register_resolvers()
|
| 128 |
+
conf = om.structured(cls)
|
| 129 |
+
try:
|
| 130 |
+
if kwargs:
|
| 131 |
+
conf = om.merge(conf, kwargs)
|
| 132 |
+
return cast(C, om.to_object(conf))
|
| 133 |
+
except OmegaConfBaseException as e:
|
| 134 |
+
raise OLMoConfigurationError(str(e))
|
| 135 |
+
|
| 136 |
+
@classmethod
|
| 137 |
+
def load(
|
| 138 |
+
cls: Type[C],
|
| 139 |
+
path: PathOrStr,
|
| 140 |
+
overrides: Optional[List[str]] = None,
|
| 141 |
+
key: Optional[str] = None,
|
| 142 |
+
validate_paths: bool = True,
|
| 143 |
+
) -> C:
|
| 144 |
+
"""Load from a YAML file."""
|
| 145 |
+
cls._register_resolvers(validate_paths=validate_paths)
|
| 146 |
+
schema = om.structured(cls)
|
| 147 |
+
try:
|
| 148 |
+
raw = om.load(str(path))
|
| 149 |
+
|
| 150 |
+
# Backwards compatibility hack, we need this here not in `update_legacy_settings`
|
| 151 |
+
# since it has to be applied before selecting with `key`
|
| 152 |
+
if "tokenizer" in raw and "model" in raw:
|
| 153 |
+
raw["model"]["tokenizer"] = raw.pop("tokenizer")
|
| 154 |
+
|
| 155 |
+
if key is not None:
|
| 156 |
+
raw = raw[key] # type: ignore
|
| 157 |
+
raw = cls.update_legacy_settings(raw)
|
| 158 |
+
conf = om.merge(schema, raw)
|
| 159 |
+
if overrides:
|
| 160 |
+
conf = om.merge(conf, om.from_dotlist(overrides))
|
| 161 |
+
return cast(C, om.to_object(conf))
|
| 162 |
+
except OmegaConfBaseException as e:
|
| 163 |
+
raise OLMoConfigurationError(str(e))
|
| 164 |
+
|
| 165 |
+
def save(self, path: PathOrStr) -> None:
|
| 166 |
+
"""Save to a YAML file."""
|
| 167 |
+
om.save(config=self, f=str(path))
|
| 168 |
+
|
| 169 |
+
def asdict(self, exclude: Optional[Iterable[str]] = None) -> Dict[str, Any]:
|
| 170 |
+
out = asdict(self) # type: ignore
|
| 171 |
+
if exclude is not None:
|
| 172 |
+
for name in exclude:
|
| 173 |
+
if name in out:
|
| 174 |
+
del out[name]
|
| 175 |
+
return out
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
class LayerNormType(StrEnum):
|
| 179 |
+
default = "default"
|
| 180 |
+
"""
|
| 181 |
+
The default LayerNorm implementation, equivalent to PyTorch's built-in version.
|
| 182 |
+
"""
|
| 183 |
+
|
| 184 |
+
low_precision = "low_precision"
|
| 185 |
+
"""
|
| 186 |
+
A low-precision version of the default LayerNorm.
|
| 187 |
+
"""
|
| 188 |
+
|
| 189 |
+
rms = "rms"
|
| 190 |
+
"""
|
| 191 |
+
An RMSNorm implementation. When using ``torch.compile`` this is
|
| 192 |
+
probably the fastest implementation.
|
| 193 |
+
"""
|
| 194 |
+
|
| 195 |
+
gemma_rms = "gemma_rms"
|
| 196 |
+
"""
|
| 197 |
+
A GemmaRMSNorm implementation. When using ``torch.compile`` this is
|
| 198 |
+
probably the fastest implementation.
|
| 199 |
+
"""
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
class ActivationType(StrEnum):
|
| 203 |
+
quick_gelu = "quick_gelu"
|
| 204 |
+
gelu = "gelu"
|
| 205 |
+
gelu_tanh = "gelu_tanh"
|
| 206 |
+
relu = "relu"
|
| 207 |
+
silu = "silu"
|
| 208 |
+
llama_geglu = "llama_geglu"
|
| 209 |
+
llama_geglu_tanh = "llama_geglu_tanh"
|
| 210 |
+
llama_swiglu = "llama_swiglu"
|
| 211 |
+
swiglu = "swiglu"
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
class BlockType(StrEnum):
|
| 215 |
+
sequential = "sequential"
|
| 216 |
+
|
| 217 |
+
llama = "llama"
|
| 218 |
+
"""
|
| 219 |
+
A block similar to the sequential block with slightly different
|
| 220 |
+
implementations of operations like attention to imitate the behavior of Llama.
|
| 221 |
+
"""
|
| 222 |
+
|
| 223 |
+
gemma = "gemma"
|
| 224 |
+
"""
|
| 225 |
+
A block similar to the sequential block with slightly different
|
| 226 |
+
implementations of operations like attention to imitate the behavior of Gemma.
|
| 227 |
+
"""
|
| 228 |
+
|
| 229 |
+
moe = "moe"
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
class InitFnType(StrEnum):
|
| 233 |
+
mitchell = "mitchell"
|
| 234 |
+
"""
|
| 235 |
+
The strategy suggested to us by Mitchell Wortsman from UW.
|
| 236 |
+
This uses a truncated normal distribution with an adaptive standard deviation that depends
|
| 237 |
+
on the size of the weights as well as the depth of the layer.
|
| 238 |
+
"""
|
| 239 |
+
|
| 240 |
+
normal = "normal"
|
| 241 |
+
"""
|
| 242 |
+
All weights are initialized from the same normal distribution.
|
| 243 |
+
"""
|
| 244 |
+
|
| 245 |
+
kaiming_normal = "kaiming_normal"
|
| 246 |
+
"""
|
| 247 |
+
All weights are initialized with the Kaiming method from a normal distribution.
|
| 248 |
+
Note this currently won't work with FSDP.
|
| 249 |
+
"""
|
| 250 |
+
|
| 251 |
+
fan_in = "fan_in"
|
| 252 |
+
"""
|
| 253 |
+
"Fan-in variance scaling", i.e. normal with a standard deviation of ``1/sqrt(d_in)`` where ``d_in``
|
| 254 |
+
is the input dimensionality of the kernel.
|
| 255 |
+
"""
|
| 256 |
+
|
| 257 |
+
full_megatron = "full_megatron"
|
| 258 |
+
"""
|
| 259 |
+
This is what metaseq calls "full megatron init". It is the init used for Llama 2.
|
| 260 |
+
"""
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
class VisionBackboneType(StrEnum):
|
| 264 |
+
openai = "openai"
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
class ImagePaddingEmbed(StrEnum):
|
| 268 |
+
pad_and_partial_pad = "pad_and_partial_pad"
|
| 269 |
+
pad_embed = "pad_embed"
|
| 270 |
+
regress = "regress"
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
class ImagePooling2DType(StrEnum):
|
| 274 |
+
attention = "attention"
|
| 275 |
+
attention_meanq = "attention-meanq"
|
| 276 |
+
attention_2wide = "attention_2wide"
|
| 277 |
+
attention_v2 = "attention-v2"
|
| 278 |
+
none = "none"
|
| 279 |
+
stack = "stack"
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
class ImageProjectType(StrEnum):
|
| 283 |
+
mlp = "mlp"
|
| 284 |
+
mlpx2 = "2mlp"
|
| 285 |
+
linear = "linear"
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
@dataclass
|
| 289 |
+
class VisionBackboneConfig(BaseConfig):
|
| 290 |
+
image_model_type: VisionBackboneType = VisionBackboneType.openai
|
| 291 |
+
image_default_input_size: Tuple[int, int] = (336, 336)
|
| 292 |
+
image_patch_size: int = 14
|
| 293 |
+
image_pos_patch_size: int = 14
|
| 294 |
+
image_emb_dim: int = 1024
|
| 295 |
+
image_num_heads: int = 16
|
| 296 |
+
image_num_key_value_heads: int = 16
|
| 297 |
+
image_num_layers: int = 24
|
| 298 |
+
image_head_dim: int = 64
|
| 299 |
+
image_mlp_dim: int = 4096
|
| 300 |
+
image_mlp_activations: ActivationType = ActivationType.gelu
|
| 301 |
+
image_dropout_rate: float = 0.0
|
| 302 |
+
image_num_pos: int = 577
|
| 303 |
+
image_norm_eps: float = 1e-5
|
| 304 |
+
attention_dropout: float = 0.0
|
| 305 |
+
residual_dropout: float = 0.0
|
| 306 |
+
initializer_range: float = 0.02
|
| 307 |
+
fsdp_wrap: bool = False
|
| 308 |
+
|
| 309 |
+
# how to preprocess imagse for this ViT
|
| 310 |
+
resize_mode: str = "default"
|
| 311 |
+
|
| 312 |
+
def __post_init__(self):
|
| 313 |
+
self.image_default_input_size = tuple(self.image_default_input_size) # type: ignore[assignment]
|
| 314 |
+
|
| 315 |
+
@property
|
| 316 |
+
def image_num_patch(self):
|
| 317 |
+
h, w = self.image_default_input_size
|
| 318 |
+
return h // self.image_patch_size, w // self.image_patch_size
|
| 319 |
+
|
| 320 |
+
|
| 321 |
+
class TruncationDirection(StrEnum):
|
| 322 |
+
right = "right"
|
| 323 |
+
left = "left"
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
@dataclass
|
| 327 |
+
class TokenizerConfig(BaseConfig):
|
| 328 |
+
identifier: str = "gpt2"
|
| 329 |
+
truncate_direction: TruncationDirection = TruncationDirection.right
|
| 330 |
+
# Does the tokenizer automatically start input text with a space
|
| 331 |
+
tokenizer_adds_space: Optional[bool] = False
|
| 332 |
+
tokenizer_dir: Optional[str] = None # tokenizer directory if using a seqio tokenizer
|
| 333 |
+
olmo_bos_token_id: Optional[int] = None
|
| 334 |
+
olmo_eos_token_id: Optional[int] = None
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
@dataclass
|
| 338 |
+
class ModelConfig(BaseConfig):
|
| 339 |
+
"""
|
| 340 |
+
OLMo (model) configuration.
|
| 341 |
+
"""
|
| 342 |
+
|
| 343 |
+
# Note that the defaults for these attributes are equivalent to the base GPT2 model.
|
| 344 |
+
|
| 345 |
+
d_model: int = 768
|
| 346 |
+
"""
|
| 347 |
+
The hidden size of the model.
|
| 348 |
+
"""
|
| 349 |
+
|
| 350 |
+
n_heads: int = 12
|
| 351 |
+
"""
|
| 352 |
+
The number of self-attention heads.
|
| 353 |
+
"""
|
| 354 |
+
|
| 355 |
+
n_kv_heads: Optional[int] = None
|
| 356 |
+
"""
|
| 357 |
+
The number of heads to use for keys and values. Defaults to `n_heads`.
|
| 358 |
+
Set this to ``None`` or ``n_heads`` for normal multi-head attention.
|
| 359 |
+
Set this to 1 for multi-query attention.
|
| 360 |
+
Set it to some in-between value for Llama2-style grouped query attention.
|
| 361 |
+
"""
|
| 362 |
+
|
| 363 |
+
qkv_bias: bool = False # qwen models use bias in kvq layers
|
| 364 |
+
|
| 365 |
+
clip_qkv: Optional[float] = None
|
| 366 |
+
"""
|
| 367 |
+
Clip QKV to this value when set.
|
| 368 |
+
"""
|
| 369 |
+
|
| 370 |
+
n_layers: int = 12
|
| 371 |
+
"""
|
| 372 |
+
The number of layers/blocks.
|
| 373 |
+
"""
|
| 374 |
+
|
| 375 |
+
mlp_ratio: int = 4
|
| 376 |
+
"""
|
| 377 |
+
The ratio of the inner MLP dimensionality to ``d_model``.
|
| 378 |
+
This is only used when ``mlp_hidden_size`` is not set.
|
| 379 |
+
"""
|
| 380 |
+
|
| 381 |
+
mlp_hidden_size: Optional[int] = None
|
| 382 |
+
"""
|
| 383 |
+
Set the exact hidden size for the MLP. Otherwise the inner MLP hidden size will be set to `mlp_ratio * d_model`.
|
| 384 |
+
"""
|
| 385 |
+
|
| 386 |
+
activation_type: ActivationType = ActivationType.swiglu
|
| 387 |
+
"""
|
| 388 |
+
The activation function to use within the MLP layers.
|
| 389 |
+
"""
|
| 390 |
+
|
| 391 |
+
block_type: BlockType = BlockType.sequential
|
| 392 |
+
"""
|
| 393 |
+
The transformer block implementation.
|
| 394 |
+
"""
|
| 395 |
+
|
| 396 |
+
block_group_size: int = 1
|
| 397 |
+
"""
|
| 398 |
+
The number of blocks to group together into a single parent block.
|
| 399 |
+
This has no affect on the number of parameters in the model and is only used to wrap groups
|
| 400 |
+
of blocks together with a single FSDP wrapper during training.
|
| 401 |
+
"""
|
| 402 |
+
|
| 403 |
+
alibi: bool = False
|
| 404 |
+
"""
|
| 405 |
+
If ``True``, use ALiBi embeddings. Mutually exclusive with ``rope``.
|
| 406 |
+
"""
|
| 407 |
+
|
| 408 |
+
alibi_bias_max: float = 8.0
|
| 409 |
+
"""
|
| 410 |
+
Maximum absolute value of ALiBi bias.
|
| 411 |
+
"""
|
| 412 |
+
|
| 413 |
+
rope: bool = False
|
| 414 |
+
"""
|
| 415 |
+
Use rotary positional embeddings (RoPE). Mutually exclusive with ``alibi``.
|
| 416 |
+
"""
|
| 417 |
+
|
| 418 |
+
rope_full_precision: bool = True
|
| 419 |
+
"""
|
| 420 |
+
If ``True``, apply RoPE embeddings at full precision regardless of the input type. Otherwise,
|
| 421 |
+
apply RoPE at the precision of the input.
|
| 422 |
+
"""
|
| 423 |
+
|
| 424 |
+
rope_theta: float = 10000.
|
| 425 |
+
|
| 426 |
+
rope_impl: str = "cockatoo"
|
| 427 |
+
|
| 428 |
+
vision_backbone: Optional[VisionBackboneConfig] = None
|
| 429 |
+
"""
|
| 430 |
+
Vision backbone settings for multi-modal models.
|
| 431 |
+
"""
|
| 432 |
+
|
| 433 |
+
vit_load_path: Optional[str] = None
|
| 434 |
+
"""
|
| 435 |
+
Use this to load the vit model.
|
| 436 |
+
"""
|
| 437 |
+
|
| 438 |
+
llm_load_path: Optional[str] = None
|
| 439 |
+
"""
|
| 440 |
+
Use this to partially load the llm transformer.
|
| 441 |
+
"""
|
| 442 |
+
|
| 443 |
+
low_cpu_fsdp: bool = True
|
| 444 |
+
"""
|
| 445 |
+
If ``True``, we save cpu memory by loading the pretrained vision model on randk0 only
|
| 446 |
+
when init_device is `meta`.
|
| 447 |
+
If TrainConfig.load_path is set, this should be set to ``False`` (default: True)
|
| 448 |
+
"""
|
| 449 |
+
|
| 450 |
+
attention_type: AttentionType = AttentionType.sdpa
|
| 451 |
+
"""
|
| 452 |
+
Attention implementation to use.
|
| 453 |
+
"""
|
| 454 |
+
|
| 455 |
+
float32_attention: bool = True
|
| 456 |
+
"""
|
| 457 |
+
Compute attention in float32
|
| 458 |
+
"""
|
| 459 |
+
|
| 460 |
+
attention_dropout: float = 0.1
|
| 461 |
+
"""
|
| 462 |
+
The dropout probability within the attention modules.
|
| 463 |
+
"""
|
| 464 |
+
|
| 465 |
+
# Only apply dropout to response tokens
|
| 466 |
+
response_attention_dropout: float = 0.0
|
| 467 |
+
|
| 468 |
+
multi_query_attention: Optional[bool] = None
|
| 469 |
+
"""
|
| 470 |
+
Deprecated. Use n_kv_heads instead.
|
| 471 |
+
"""
|
| 472 |
+
|
| 473 |
+
attention_layer_norm: bool = False
|
| 474 |
+
"""
|
| 475 |
+
Apply layer norm to the keys and queries within the attention mechanism.
|
| 476 |
+
This can help stabilize training.
|
| 477 |
+
"""
|
| 478 |
+
|
| 479 |
+
residual_dropout: float = 0.1
|
| 480 |
+
"""
|
| 481 |
+
The dropout probability for the MLP and attention output within each block.
|
| 482 |
+
"""
|
| 483 |
+
|
| 484 |
+
# Only apply dropout to response tokens
|
| 485 |
+
response_residual_dropout: float = 0.0
|
| 486 |
+
|
| 487 |
+
embedding_dropout: float = 0.1
|
| 488 |
+
"""
|
| 489 |
+
The dropout probability for embeddings.
|
| 490 |
+
"""
|
| 491 |
+
|
| 492 |
+
layer_norm_type: LayerNormType = LayerNormType.default
|
| 493 |
+
"""
|
| 494 |
+
The layernorm implementation to use.
|
| 495 |
+
"""
|
| 496 |
+
|
| 497 |
+
layer_norm_with_affine: bool = True
|
| 498 |
+
"""
|
| 499 |
+
Whether to include bias and weight parameters for the layer norms.
|
| 500 |
+
This only affects layer norms that are immediately followed by a linear layer in the forward pass,
|
| 501 |
+
so everything except QK-norms. To turn off affines for QK norms as well, set :attr:`attention_layer_norm_with_affine`
|
| 502 |
+
to ``False``.
|
| 503 |
+
"""
|
| 504 |
+
|
| 505 |
+
layer_norm_eps: Optional[float] = None
|
| 506 |
+
|
| 507 |
+
attention_layer_norm_with_affine: bool = True
|
| 508 |
+
"""
|
| 509 |
+
Toggle affine transform for the QK norms.
|
| 510 |
+
"""
|
| 511 |
+
|
| 512 |
+
max_sequence_length: int = 1024
|
| 513 |
+
"""
|
| 514 |
+
The maximum input sequence length supported by the model.
|
| 515 |
+
"""
|
| 516 |
+
|
| 517 |
+
max_position_embeddings: Optional[int] = None
|
| 518 |
+
|
| 519 |
+
include_bias: bool = True
|
| 520 |
+
"""
|
| 521 |
+
Whether or not to include bias parameters in linear layers.
|
| 522 |
+
In PaLM, they got rid of all bias terms because they found that large
|
| 523 |
+
models tend to have near 0 bias terms anyway.
|
| 524 |
+
"""
|
| 525 |
+
|
| 526 |
+
bias_for_layer_norm: Optional[bool] = None
|
| 527 |
+
"""
|
| 528 |
+
Whether or not to include bias parameters in layer norm.
|
| 529 |
+
This is separate from the include_bias parameter, because of a ROCm crash when biases are disabled in
|
| 530 |
+
layer norm.
|
| 531 |
+
When this is None (the default), it inherits the setting from include_bias.
|
| 532 |
+
"""
|
| 533 |
+
|
| 534 |
+
scale_logits: bool = False
|
| 535 |
+
"""
|
| 536 |
+
If ``True``, scale the output logits by ``1 / sqrt(d_model)``.
|
| 537 |
+
"""
|
| 538 |
+
|
| 539 |
+
vocab_size: int = 50257
|
| 540 |
+
"""
|
| 541 |
+
Vocabulary size of the model.
|
| 542 |
+
"""
|
| 543 |
+
|
| 544 |
+
embedding_size: Optional[int] = 50304
|
| 545 |
+
"""
|
| 546 |
+
The number of embeddings, i.e. the number of tokens. If set to ``None`` it will default
|
| 547 |
+
to ``vocab_size``. If ``vocab_size`` is not a multiple of 128, setting this to the
|
| 548 |
+
next multiple of 128 that's greater than ``vocab_size`` can improve throughput
|
| 549 |
+
substantially.
|
| 550 |
+
"""
|
| 551 |
+
|
| 552 |
+
# For new special tokens
|
| 553 |
+
additional_vocab_size: Optional[int] = None
|
| 554 |
+
|
| 555 |
+
new_embedding_init_range: float = 0.02
|
| 556 |
+
"""
|
| 557 |
+
How to initialize embedding for new
|
| 558 |
+
"""
|
| 559 |
+
|
| 560 |
+
weight_tying: bool = True
|
| 561 |
+
"""
|
| 562 |
+
Whether to tie output linear weights to the input embedding.
|
| 563 |
+
"""
|
| 564 |
+
|
| 565 |
+
pad_token_id: int = -1
|
| 566 |
+
"""
|
| 567 |
+
The ID of the token to use for padding. Defaults to the ID of the EOS token.
|
| 568 |
+
"""
|
| 569 |
+
|
| 570 |
+
init_device: Optional[str] = None
|
| 571 |
+
"""
|
| 572 |
+
The torch device to use when initializing the model parameters, e.g. "cpu", "cuda:0", "meta".
|
| 573 |
+
"""
|
| 574 |
+
|
| 575 |
+
init_fn: InitFnType = InitFnType.normal
|
| 576 |
+
"""
|
| 577 |
+
The weight initialization strategy.
|
| 578 |
+
"""
|
| 579 |
+
|
| 580 |
+
init_std: float = 0.02
|
| 581 |
+
"""
|
| 582 |
+
The standard deviation to use when initializing weights with a "fixed distribution" ``init_fn``, such
|
| 583 |
+
as "normal".
|
| 584 |
+
"""
|
| 585 |
+
|
| 586 |
+
init_cutoff_factor: Optional[float] = None
|
| 587 |
+
"""
|
| 588 |
+
A positive factor used to scale the cutoff values when initializing weights with a "fixed distribution" ``init_fn``, such
|
| 589 |
+
as "normal". Setting this to None means values are not cutoff.
|
| 590 |
+
"""
|
| 591 |
+
|
| 592 |
+
norm_after: bool = False
|
| 593 |
+
"""
|
| 594 |
+
Apply norm after the attention/feedforward layers rather than before, as introduced in the Swin transformer paper (Liu et al).
|
| 595 |
+
"""
|
| 596 |
+
|
| 597 |
+
precision: Optional[str] = None
|
| 598 |
+
"""
|
| 599 |
+
Precision used to train/evaluate with. You shouldn't set this directly.
|
| 600 |
+
See :data:`TrainConfig.precision` instead.
|
| 601 |
+
"""
|
| 602 |
+
|
| 603 |
+
moe_num_experts: Optional[int] = 8
|
| 604 |
+
"""
|
| 605 |
+
The number of experts to use in the MoE block.
|
| 606 |
+
"""
|
| 607 |
+
|
| 608 |
+
moe_top_k: Optional[int] = 2
|
| 609 |
+
"""
|
| 610 |
+
The number of experts to select for each token.
|
| 611 |
+
"""
|
| 612 |
+
|
| 613 |
+
moe_mlp_impl: Optional[str] = "sparse"
|
| 614 |
+
"""
|
| 615 |
+
Choose "grouped" for grouped GEMM installable via `pip install git+https://git@github.com/tgale96/grouped_gemm.git@66c7195e35e8c4f22fa6a014037ef511bfa397cb`.
|
| 616 |
+
"""
|
| 617 |
+
|
| 618 |
+
moe_log_expert_assignment: Optional[bool] = False
|
| 619 |
+
"""
|
| 620 |
+
Whether to log the expert assignment.
|
| 621 |
+
"""
|
| 622 |
+
|
| 623 |
+
moe_shared_expert: Optional[bool] = False
|
| 624 |
+
"""
|
| 625 |
+
Whether to have an always-used expert like in [DeepSeekMoE](https://arxiv.org/abs/2401.06066).
|
| 626 |
+
"""
|
| 627 |
+
|
| 628 |
+
moe_lbl_in_fp32: Optional[bool] = False
|
| 629 |
+
"""
|
| 630 |
+
Whether to perform load balancing in FP32.
|
| 631 |
+
"""
|
| 632 |
+
|
| 633 |
+
moe_interleave: Optional[bool] = False
|
| 634 |
+
"""
|
| 635 |
+
Interleave sequential with MoE blocks starting with sequential.
|
| 636 |
+
"""
|
| 637 |
+
|
| 638 |
+
moe_loss_weight: Optional[float] = 0.1
|
| 639 |
+
"""
|
| 640 |
+
The weight to use for the MoE load balancing loss.
|
| 641 |
+
"""
|
| 642 |
+
|
| 643 |
+
moe_zloss_weight: Optional[float] = None
|
| 644 |
+
"""
|
| 645 |
+
Weight for MoE router z-loss where None means no router z-loss. 0.001 is a common value.
|
| 646 |
+
"""
|
| 647 |
+
|
| 648 |
+
moe_dropless: Optional[bool] = True
|
| 649 |
+
"""
|
| 650 |
+
Whether to use [dMoE](https://arxiv.org/abs/2211.15841).
|
| 651 |
+
"""
|
| 652 |
+
|
| 653 |
+
moe_capacity_factor: Optional[float] = 1.25
|
| 654 |
+
"""
|
| 655 |
+
The capacity factor to use in the MoE block. Only applies if not using dMoE.
|
| 656 |
+
"""
|
| 657 |
+
|
| 658 |
+
# Image pre-processing options.
|
| 659 |
+
max_crops: int = 12
|
| 660 |
+
|
| 661 |
+
crop_mode: str = "patchify-v2-and-resize-c2"
|
| 662 |
+
|
| 663 |
+
do_random_scale: bool = True
|
| 664 |
+
|
| 665 |
+
use_col_tokens: bool = True
|
| 666 |
+
|
| 667 |
+
# How to prompt the model
|
| 668 |
+
prompt_type: str = "none"
|
| 669 |
+
|
| 670 |
+
# System prompt to use
|
| 671 |
+
system_prompt_kind: str = "style"
|
| 672 |
+
|
| 673 |
+
# How to format messages
|
| 674 |
+
message_formatting: str = "none"
|
| 675 |
+
|
| 676 |
+
always_start_with_space: bool = True
|
| 677 |
+
|
| 678 |
+
prompt_override: Optional[str] = None
|
| 679 |
+
|
| 680 |
+
default_inference_len: Optional[int] = 65
|
| 681 |
+
|
| 682 |
+
overlap_margins: Tuple[int, int] = (4, 4)
|
| 683 |
+
|
| 684 |
+
image_padding_embed: Optional[ImagePaddingEmbed] = None
|
| 685 |
+
|
| 686 |
+
# What layers to get from the image encoder
|
| 687 |
+
vit_layers: Tuple = (-1,)
|
| 688 |
+
|
| 689 |
+
# Controls the image/language connector
|
| 690 |
+
image_pooling_h: int = 2
|
| 691 |
+
|
| 692 |
+
image_pooling_w: int = 2
|
| 693 |
+
|
| 694 |
+
image_pooling_2d: ImagePooling2DType = ImagePooling2DType.attention
|
| 695 |
+
|
| 696 |
+
image_projector: ImageProjectType = ImageProjectType.mlp
|
| 697 |
+
|
| 698 |
+
image_feature_dropout: float = 0.0
|
| 699 |
+
|
| 700 |
+
use_cls_feature: bool = False
|
| 701 |
+
|
| 702 |
+
fix_image_input_idx: int = 2
|
| 703 |
+
|
| 704 |
+
# Makes the model ignore the image
|
| 705 |
+
unconditioned: bool = False
|
| 706 |
+
|
| 707 |
+
# Use in combination with sub-sequence experts to make imags/text tokens always
|
| 708 |
+
# occupy particular sub-sequences of the input
|
| 709 |
+
pad_to: Optional[int] = None
|
| 710 |
+
|
| 711 |
+
# LLM Transformer settings
|
| 712 |
+
initializer_range: float = 0.02
|
| 713 |
+
|
| 714 |
+
pad_tokenizer: bool = False
|
| 715 |
+
|
| 716 |
+
normalize_input_embeds: bool = False
|
| 717 |
+
|
| 718 |
+
use_position_ids: bool = True
|
| 719 |
+
"""
|
| 720 |
+
Whether to use position IDs in the model.
|
| 721 |
+
The model operation regarding positional embeddings changes depending on this variable.
|
| 722 |
+
"""
|
| 723 |
+
|
| 724 |
+
query_pre_attn_scalar: int = 224
|
| 725 |
+
"""
|
| 726 |
+
Scalar to apply to the queries before attention.
|
| 727 |
+
Used for Gemma-2.
|
| 728 |
+
"""
|
| 729 |
+
|
| 730 |
+
attn_logit_softcapping: Optional[float] = None
|
| 731 |
+
"""
|
| 732 |
+
Softcap the logits in the attention mechanism.
|
| 733 |
+
Used for Gemma-2.
|
| 734 |
+
"""
|
| 735 |
+
|
| 736 |
+
final_logit_softcapping: Optional[float] = None
|
| 737 |
+
"""
|
| 738 |
+
Softcap the final logits.
|
| 739 |
+
Used for Gemma-2.
|
| 740 |
+
"""
|
| 741 |
+
|
| 742 |
+
head_dim: Optional[int] = None
|
| 743 |
+
"""
|
| 744 |
+
The head dimensionality for the attention mechanism.
|
| 745 |
+
Used for Gemma-2.
|
| 746 |
+
"""
|
| 747 |
+
|
| 748 |
+
tokenizer: TokenizerConfig = field(default_factory=TokenizerConfig)
|
| 749 |
+
"""
|
| 750 |
+
Tokenizer configuration.
|
| 751 |
+
"""
|
| 752 |
+
|
| 753 |
+
loss_token_weighting: Optional[str] = None
|
| 754 |
+
|
| 755 |
+
gin_bindings: Optional[str] = None
|
| 756 |
+
|
| 757 |
+
def get_tokenizer(self):
|
| 758 |
+
tokenizer_cfg = self.tokenizer
|
| 759 |
+
assert tokenizer_cfg.identifier.startswith("mm:")
|
| 760 |
+
kargs = {}
|
| 761 |
+
if tokenizer_cfg.identifier[3:].startswith("olmo-"):
|
| 762 |
+
kargs["olmo_bos_token_id"] = tokenizer_cfg.olmo_bos_token_id
|
| 763 |
+
kargs["olmo_eos_token_id"] = tokenizer_cfg.olmo_eos_token_id
|
| 764 |
+
return build_tokenizer(
|
| 765 |
+
tokenizer_cfg.identifier[3:],
|
| 766 |
+
adds_space=tokenizer_cfg.tokenizer_adds_space,
|
| 767 |
+
tokenizer_dir=tokenizer_cfg.tokenizer_dir,
|
| 768 |
+
pad_tokenizer_to=self.vocab_size if self.pad_tokenizer else None,
|
| 769 |
+
**kargs
|
| 770 |
+
)
|
| 771 |
+
|
| 772 |
+
def get_preprocessor(self):
|
| 773 |
+
vision_cfg = self.vision_backbone
|
| 774 |
+
h, w = self.llm_patches_per_crop()
|
| 775 |
+
|
| 776 |
+
return MultiModalPreprocessor(
|
| 777 |
+
loss_token_weighting=self.loss_token_weighting,
|
| 778 |
+
always_start_with_space=self.always_start_with_space,
|
| 779 |
+
tokenizer=self.get_tokenizer(),
|
| 780 |
+
prompt_override=self.prompt_override,
|
| 781 |
+
fix_image_input_idx=self.fix_image_input_idx,
|
| 782 |
+
prompt_templates=self.prompt_type,
|
| 783 |
+
system_prompt=self.system_prompt_kind,
|
| 784 |
+
default_inference_len=self.default_inference_len,
|
| 785 |
+
message_format=self.message_formatting,
|
| 786 |
+
unconditioned=self.unconditioned,
|
| 787 |
+
crop_mode=self.crop_mode,
|
| 788 |
+
max_crops=self.max_crops,
|
| 789 |
+
do_random_scale=self.do_random_scale,
|
| 790 |
+
base_image_input_size=vision_cfg.image_default_input_size,
|
| 791 |
+
image_patch_size=vision_cfg.image_patch_size,
|
| 792 |
+
image_token_length_h=h,
|
| 793 |
+
image_token_length_w=w,
|
| 794 |
+
use_col_tokens=self.use_col_tokens,
|
| 795 |
+
overlap_margins=self.overlap_margins,
|
| 796 |
+
image_padding_mask=self.image_padding_embed is not None
|
| 797 |
)
|
| 798 |
|
| 799 |
+
def __post_init__(self):
|
| 800 |
+
self.vit_layers = tuple(self.vit_layers) # type: ignore[assignment]
|
| 801 |
+
|
| 802 |
+
@classmethod
|
| 803 |
+
def update_legacy_settings(cls, config: D) -> D:
|
| 804 |
+
"""
|
| 805 |
+
Update the legacy config settings whose schemas have undergone backwards-incompatible changes.
|
| 806 |
+
"""
|
| 807 |
+
if "flash_attention" in config:
|
| 808 |
+
is_flash = config.flash_attention
|
| 809 |
+
del config.flash_attention
|
| 810 |
+
config.attention_type = AttentionType.flash if is_flash else AttentionType.sdpa
|
| 811 |
+
|
| 812 |
+
if "bos_token_id" in config:
|
| 813 |
+
config.tokenizer.olmo_bos_token_id = config.pop("bos_token_id")
|
| 814 |
+
config.tokenizer.olmo_eos_token_id = config.pop("eos_token_id")
|
| 815 |
+
|
| 816 |
+
if "image_padding_mask" in config:
|
| 817 |
+
assert not config["image_padding_mask"]
|
| 818 |
+
del config["image_padding_mask"]
|
| 819 |
+
config["image_padding_embed"] = None
|
| 820 |
+
elif "image_padding_embed" not in config:
|
| 821 |
+
config["image_padding_embed"] = None
|
| 822 |
+
return config
|
| 823 |
+
|
| 824 |
+
@property
|
| 825 |
+
def effective_n_kv_heads(self) -> int:
|
| 826 |
+
if self.n_kv_heads is None:
|
| 827 |
+
if self.multi_query_attention is True:
|
| 828 |
+
return 1
|
| 829 |
+
else:
|
| 830 |
+
return self.n_heads
|
| 831 |
+
else:
|
| 832 |
+
if self.multi_query_attention is None:
|
| 833 |
+
return self.n_kv_heads
|
| 834 |
+
if self.multi_query_attention:
|
| 835 |
+
n_kv_heads_should_be = 1
|
| 836 |
+
else:
|
| 837 |
+
n_kv_heads_should_be = self.n_heads
|
| 838 |
+
if self.n_kv_heads == n_kv_heads_should_be:
|
| 839 |
+
return n_kv_heads_should_be
|
| 840 |
+
else:
|
| 841 |
+
raise OLMoConfigurationError(
|
| 842 |
+
"You can't set `multi_query_attention` and `n_kv_heads` at the same time."
|
| 843 |
+
)
|
| 844 |
+
|
| 845 |
+
@property
|
| 846 |
+
def image_num_patch(self):
|
| 847 |
+
assert self.vision_backbone is not None
|
| 848 |
+
return self.vision_backbone.image_num_patch
|
| 849 |
+
|
| 850 |
+
@property
|
| 851 |
+
def image_patch_size(self):
|
| 852 |
+
assert self.vision_backbone is not None
|
| 853 |
+
return self.visoin_backbone.image_patch_size
|
| 854 |
+
|
| 855 |
+
def llm_patches_per_crop(self):
|
| 856 |
+
h, w = self.image_num_patch
|
| 857 |
+
# Round up in case we need to pad the image features for pooling
|
| 858 |
+
h = (h + self.image_pooling_h - 1) // self.image_pooling_h
|
| 859 |
+
w = (w + self.image_pooling_w - 1) // self.image_pooling_w
|
| 860 |
+
return h, w
|
| 861 |
+
|
| 862 |
+
def get_max_crops(self) -> int:
|
| 863 |
+
"""Max numbers of that can be built for one image"""
|
| 864 |
+
if self.crop_mode == "resize":
|
| 865 |
+
return 1
|
| 866 |
+
elif "resize" in self.crop_mode:
|
| 867 |
+
return 1 + self.max_crops
|
| 868 |
+
else:
|
| 869 |
+
return self.max_crops
|
| 870 |
+
|
| 871 |
+
|
| 872 |
+
class MolmoConfig(PretrainedConfig):
|
| 873 |
+
model_type = "molmo"
|
| 874 |
+
keys_to_ignore_at_inference = ["past_key_values"] # TODO: confirm
|
| 875 |
+
|
| 876 |
+
def __init__(self, use_cache: bool = False, **kwargs):
|
| 877 |
+
model_config = ModelConfig()
|
| 878 |
+
all_kwargs = model_config.asdict()
|
| 879 |
+
all_kwargs.update(kwargs)
|
| 880 |
+
all_kwargs.update({"use_cache": use_cache})
|
| 881 |
+
all_kwargs.update(
|
| 882 |
+
{"architectures": all_kwargs.get("architectures", ["OLMoForCausalLM"]) or ["OLMoForCausalLM"]}
|
| 883 |
+
)
|
| 884 |
+
super().__init__(**all_kwargs)
|
| 885 |
+
|
| 886 |
+
@property
|
| 887 |
+
def num_attention_heads(self):
|
| 888 |
+
return self.n_heads
|
| 889 |
+
|
| 890 |
+
@property
|
| 891 |
+
def num_hidden_layers(self):
|
| 892 |
+
return self.n_layers
|
| 893 |
+
|
| 894 |
+
@property
|
| 895 |
+
def hidden_size(self):
|
| 896 |
+
return self.d_model
|
| 897 |
+
|
| 898 |
+
@property
|
| 899 |
+
def image_num_patch(self):
|
| 900 |
+
assert self.vision_backbone is not None
|
| 901 |
+
return self.vision_backbone.image_num_patch
|
| 902 |
+
|
| 903 |
+
@property
|
| 904 |
+
def llm_patches_per_crop(self):
|
| 905 |
+
h, w = self.image_num_patch
|
| 906 |
+
# Round up in case we need to pad the image features for pooling
|
| 907 |
+
h = (h + self.image_pooling_h - 1) // self.image_pooling_h
|
| 908 |
+
w = (w + self.image_pooling_w - 1) // self.image_pooling_w
|
| 909 |
+
return h, w
|
modeling_molmoe.py
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
pytorch_model.bin
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b2030e3d4bff2052c9dbe44d592e2929d451bbbdf098524b65f71893a85c51df
|
| 3 |
+
size 28888362419
|
util.py
ADDED
|
@@ -0,0 +1,785 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import io
|
| 2 |
+
import logging
|
| 3 |
+
import os
|
| 4 |
+
import re
|
| 5 |
+
import socket
|
| 6 |
+
import sys
|
| 7 |
+
import time
|
| 8 |
+
import warnings
|
| 9 |
+
from datetime import datetime
|
| 10 |
+
from enum import Enum
|
| 11 |
+
from itertools import cycle, islice
|
| 12 |
+
from pathlib import Path
|
| 13 |
+
from queue import Queue
|
| 14 |
+
from threading import Thread
|
| 15 |
+
from typing import Any, Callable, Dict, Optional, Tuple, Union
|
| 16 |
+
|
| 17 |
+
import boto3
|
| 18 |
+
import botocore.exceptions as boto_exceptions
|
| 19 |
+
import rich
|
| 20 |
+
from botocore.config import Config
|
| 21 |
+
from cached_path.schemes import SchemeClient, add_scheme_client
|
| 22 |
+
from rich.console import Console, ConsoleRenderable
|
| 23 |
+
from rich.highlighter import NullHighlighter
|
| 24 |
+
from rich.progress import Progress
|
| 25 |
+
from rich.text import Text
|
| 26 |
+
from rich.traceback import Traceback
|
| 27 |
+
|
| 28 |
+
from .aliases import PathOrStr
|
| 29 |
+
from .exceptions import (
|
| 30 |
+
OLMoCliError,
|
| 31 |
+
OLMoEnvironmentError,
|
| 32 |
+
OLMoError,
|
| 33 |
+
OLMoNetworkError,
|
| 34 |
+
OLMoThreadError,
|
| 35 |
+
)
|
| 36 |
+
from .torch_util import get_global_rank, get_local_rank, get_node_rank, is_distributed
|
| 37 |
+
|
| 38 |
+
try:
|
| 39 |
+
from functools import cache
|
| 40 |
+
except ImportError:
|
| 41 |
+
from functools import lru_cache as cache
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
class StrEnum(str, Enum):
|
| 45 |
+
"""
|
| 46 |
+
This is equivalent to Python's :class:`enum.StrEnum` since version 3.11.
|
| 47 |
+
We include this here for compatibility with older version of Python.
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
def __str__(self) -> str:
|
| 51 |
+
return self.value
|
| 52 |
+
|
| 53 |
+
def __repr__(self) -> str:
|
| 54 |
+
return f"'{str(self)}'"
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
_log_extra_fields: Dict[str, Any] = {}
|
| 58 |
+
log = logging.getLogger(__name__)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
class LogFilterType(StrEnum):
|
| 62 |
+
rank0_only = "rank0_only"
|
| 63 |
+
local_rank0_only = "local_rank0_only"
|
| 64 |
+
all_ranks = "all_ranks"
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def log_extra_field(field_name: str, field_value: Any) -> None:
|
| 68 |
+
global _log_extra_fields
|
| 69 |
+
if field_value is None:
|
| 70 |
+
if field_name in _log_extra_fields:
|
| 71 |
+
del _log_extra_fields[field_name]
|
| 72 |
+
else:
|
| 73 |
+
_log_extra_fields[field_name] = field_value
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def setup_logging(log_filter_type: LogFilterType = LogFilterType.rank0_only) -> None:
|
| 77 |
+
"""
|
| 78 |
+
:param rank0_only: INFO and below messages will only be emitted on the rank0 process.
|
| 79 |
+
"""
|
| 80 |
+
log_extra_field("hostname", socket.gethostname())
|
| 81 |
+
if is_distributed():
|
| 82 |
+
log_extra_field("node_rank", get_node_rank())
|
| 83 |
+
log_extra_field("local_rank", get_local_rank())
|
| 84 |
+
log_extra_field("global_rank", get_global_rank())
|
| 85 |
+
else:
|
| 86 |
+
log_extra_field("node_rank", 0)
|
| 87 |
+
log_extra_field("local_rank", 0)
|
| 88 |
+
log_extra_field("global_rank", 0)
|
| 89 |
+
|
| 90 |
+
old_log_record_factory = logging.getLogRecordFactory()
|
| 91 |
+
|
| 92 |
+
def log_record_factory(*args, **kwargs) -> logging.LogRecord:
|
| 93 |
+
record = old_log_record_factory(*args, **kwargs)
|
| 94 |
+
for field_name, field_value in _log_extra_fields.items():
|
| 95 |
+
setattr(record, field_name, field_value)
|
| 96 |
+
return record
|
| 97 |
+
|
| 98 |
+
logging.setLogRecordFactory(log_record_factory)
|
| 99 |
+
|
| 100 |
+
handler: logging.Handler
|
| 101 |
+
if (
|
| 102 |
+
os.environ.get("OLMo_NONINTERACTIVE", False)
|
| 103 |
+
or os.environ.get("DEBIAN_FRONTEND", None) == "noninteractive"
|
| 104 |
+
or not sys.stdout.isatty()
|
| 105 |
+
):
|
| 106 |
+
handler = logging.StreamHandler(sys.stdout)
|
| 107 |
+
formatter = logging.Formatter(
|
| 108 |
+
"%(asctime)s\t%(hostname)s:%(local_rank)s\t%(name)s:%(lineno)s\t%(levelname)s\t%(message)s"
|
| 109 |
+
)
|
| 110 |
+
formatter.default_time_format = "%Y-%m-%d %H:%M:%S"
|
| 111 |
+
formatter.default_msec_format = "%s.%03d"
|
| 112 |
+
handler.setFormatter(formatter)
|
| 113 |
+
else:
|
| 114 |
+
handler = RichHandler()
|
| 115 |
+
|
| 116 |
+
def rank0_filter(record: logging.LogRecord) -> int:
|
| 117 |
+
if record.levelno > logging.INFO:
|
| 118 |
+
return 1
|
| 119 |
+
if getattr(record, "global_rank", 0) == 0:
|
| 120 |
+
return 1
|
| 121 |
+
else:
|
| 122 |
+
return 0
|
| 123 |
+
|
| 124 |
+
def local_rank0_filter(record: logging.LogRecord) -> int:
|
| 125 |
+
if record.levelno > logging.INFO:
|
| 126 |
+
return 1
|
| 127 |
+
if getattr(record, "local_rank", 0) == 0:
|
| 128 |
+
return 1
|
| 129 |
+
else:
|
| 130 |
+
return 0
|
| 131 |
+
|
| 132 |
+
if log_filter_type == LogFilterType.rank0_only:
|
| 133 |
+
filter = rank0_filter
|
| 134 |
+
elif log_filter_type == LogFilterType.local_rank0_only:
|
| 135 |
+
filter = local_rank0_filter # type: ignore
|
| 136 |
+
elif log_filter_type == LogFilterType.all_ranks:
|
| 137 |
+
filter = None
|
| 138 |
+
else:
|
| 139 |
+
raise ValueError(log_filter_type)
|
| 140 |
+
|
| 141 |
+
if filter is not None:
|
| 142 |
+
handler.addFilter(filter) # type: ignore
|
| 143 |
+
logging.basicConfig(handlers=[handler], level=logging.INFO)
|
| 144 |
+
|
| 145 |
+
logging.captureWarnings(True)
|
| 146 |
+
logging.getLogger("urllib3").setLevel(logging.ERROR)
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
def excepthook(exctype, value, traceback):
|
| 150 |
+
"""
|
| 151 |
+
Used to patch `sys.excepthook` in order to log exceptions.
|
| 152 |
+
"""
|
| 153 |
+
if issubclass(exctype, KeyboardInterrupt):
|
| 154 |
+
sys.__excepthook__(exctype, value, traceback)
|
| 155 |
+
elif issubclass(exctype, OLMoCliError):
|
| 156 |
+
rich.get_console().print(f"[yellow]{value}[/]", highlight=False)
|
| 157 |
+
elif issubclass(exctype, OLMoError):
|
| 158 |
+
rich.get_console().print(Text(f"{exctype.__name__}:", style="red"), value, highlight=False)
|
| 159 |
+
else:
|
| 160 |
+
log.critical("Uncaught %s: %s", exctype.__name__, value, exc_info=(exctype, value, traceback))
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def install_excepthook():
|
| 164 |
+
sys.excepthook = excepthook
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
def filter_warnings():
|
| 168 |
+
# Filter internal deprecation warnings from torch
|
| 169 |
+
warnings.filterwarnings(
|
| 170 |
+
action="ignore",
|
| 171 |
+
category=UserWarning,
|
| 172 |
+
message="torch.distributed.*_base is a private function and will be deprecated.*",
|
| 173 |
+
)
|
| 174 |
+
warnings.filterwarnings(
|
| 175 |
+
action="ignore",
|
| 176 |
+
category=UserWarning,
|
| 177 |
+
message="TypedStorage is deprecated.*",
|
| 178 |
+
)
|
| 179 |
+
warnings.filterwarnings(
|
| 180 |
+
action="ignore",
|
| 181 |
+
category=UserWarning,
|
| 182 |
+
message="Please use DTensor instead.*",
|
| 183 |
+
)
|
| 184 |
+
# Torchvision warnings. We don't actually use torchvision.
|
| 185 |
+
warnings.filterwarnings(
|
| 186 |
+
action="ignore",
|
| 187 |
+
message="failed to load.*",
|
| 188 |
+
module="torchvision.io.image",
|
| 189 |
+
)
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
def set_env_variables():
|
| 193 |
+
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
def prepare_cli_environment(log_filter_type: Optional[LogFilterType] = None):
|
| 197 |
+
if log_filter_type is None:
|
| 198 |
+
log_filter_type = LogFilterType(os.environ.get("LOG_FILTER_TYPE", "rank0_only"))
|
| 199 |
+
rich.reconfigure(width=max(rich.get_console().width, 180), soft_wrap=True)
|
| 200 |
+
setup_logging(log_filter_type=log_filter_type)
|
| 201 |
+
install_excepthook()
|
| 202 |
+
filter_warnings()
|
| 203 |
+
set_env_variables()
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
def clean_opt(arg: str) -> str:
|
| 207 |
+
if "=" not in arg:
|
| 208 |
+
arg = f"{arg}=True"
|
| 209 |
+
name, val = arg.split("=", 1)
|
| 210 |
+
name = name.strip("-").replace("-", "_")
|
| 211 |
+
return f"{name}={val}"
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
class RichHandler(logging.Handler):
|
| 215 |
+
"""
|
| 216 |
+
A simplified version of rich.logging.RichHandler from
|
| 217 |
+
https://github.com/Textualize/rich/blob/master/rich/logging.py
|
| 218 |
+
"""
|
| 219 |
+
|
| 220 |
+
def __init__(
|
| 221 |
+
self,
|
| 222 |
+
*,
|
| 223 |
+
level: Union[int, str] = logging.NOTSET,
|
| 224 |
+
console: Optional[Console] = None,
|
| 225 |
+
markup: bool = False,
|
| 226 |
+
) -> None:
|
| 227 |
+
super().__init__(level=level)
|
| 228 |
+
self.console = console or rich.get_console()
|
| 229 |
+
self.highlighter = NullHighlighter()
|
| 230 |
+
self.markup = markup
|
| 231 |
+
|
| 232 |
+
def emit(self, record: logging.LogRecord) -> None:
|
| 233 |
+
try:
|
| 234 |
+
if hasattr(record.msg, "__rich__") or hasattr(record.msg, "__rich_console__"):
|
| 235 |
+
self.console.print(record.msg)
|
| 236 |
+
else:
|
| 237 |
+
msg: Any = record.msg
|
| 238 |
+
if isinstance(record.msg, str):
|
| 239 |
+
msg = self.render_message(record=record, message=record.getMessage())
|
| 240 |
+
renderables = [
|
| 241 |
+
self.get_time_text(record),
|
| 242 |
+
self.get_level_text(record),
|
| 243 |
+
self.get_location_text(record),
|
| 244 |
+
msg,
|
| 245 |
+
]
|
| 246 |
+
if record.exc_info is not None:
|
| 247 |
+
tb = Traceback.from_exception(*record.exc_info) # type: ignore
|
| 248 |
+
renderables.append(tb)
|
| 249 |
+
self.console.print(*renderables)
|
| 250 |
+
except Exception:
|
| 251 |
+
self.handleError(record)
|
| 252 |
+
|
| 253 |
+
def render_message(self, *, record: logging.LogRecord, message: str) -> ConsoleRenderable:
|
| 254 |
+
use_markup = getattr(record, "markup", self.markup)
|
| 255 |
+
message_text = Text.from_markup(message) if use_markup else Text(message)
|
| 256 |
+
|
| 257 |
+
highlighter = getattr(record, "highlighter", self.highlighter)
|
| 258 |
+
if highlighter:
|
| 259 |
+
message_text = highlighter(message_text)
|
| 260 |
+
|
| 261 |
+
return message_text
|
| 262 |
+
|
| 263 |
+
def get_time_text(self, record: logging.LogRecord) -> Text:
|
| 264 |
+
log_time = datetime.fromtimestamp(record.created)
|
| 265 |
+
time_str = log_time.strftime("[%Y-%m-%d %X]")
|
| 266 |
+
return Text(time_str, style="log.time", end=" ")
|
| 267 |
+
|
| 268 |
+
def get_level_text(self, record: logging.LogRecord) -> Text:
|
| 269 |
+
level_name = record.levelname
|
| 270 |
+
level_text = Text.styled(level_name.ljust(8), f"logging.level.{level_name.lower()}")
|
| 271 |
+
level_text.style = "log.level"
|
| 272 |
+
level_text.end = " "
|
| 273 |
+
return level_text
|
| 274 |
+
|
| 275 |
+
def get_location_text(self, record: logging.LogRecord) -> Text:
|
| 276 |
+
name_and_line = f"{record.name}:{record.lineno}" if record.name != "root" else "root"
|
| 277 |
+
text = f"[{name_and_line}, rank={record.local_rank}]" # type: ignore
|
| 278 |
+
return Text(text, style="log.path")
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
def wait_for(condition: Callable[[], bool], description: str, timeout: float = 10.0):
|
| 282 |
+
"""Wait for the condition function to return True."""
|
| 283 |
+
start_time = time.monotonic()
|
| 284 |
+
while not condition():
|
| 285 |
+
time.sleep(0.5)
|
| 286 |
+
if time.monotonic() - start_time > timeout:
|
| 287 |
+
raise TimeoutError(f"{description} timed out")
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
def is_url(path: PathOrStr) -> bool:
|
| 291 |
+
return re.match(r"[a-z0-9]+://.*", str(path)) is not None
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
def dir_is_empty(dir: PathOrStr) -> bool:
|
| 295 |
+
dir = Path(dir)
|
| 296 |
+
if not dir.is_dir():
|
| 297 |
+
return True
|
| 298 |
+
try:
|
| 299 |
+
next(dir.glob("*"))
|
| 300 |
+
return False
|
| 301 |
+
except StopIteration:
|
| 302 |
+
return True
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
def get_progress_bar() -> Progress:
|
| 306 |
+
from cached_path import get_download_progress
|
| 307 |
+
|
| 308 |
+
return get_download_progress()
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
def resource_path(
|
| 312 |
+
folder: PathOrStr, fname: str, local_cache: Optional[PathOrStr] = None, progress: Optional[Progress] = None
|
| 313 |
+
) -> Path:
|
| 314 |
+
if local_cache is not None and (local_path := Path(local_cache) / fname).is_file():
|
| 315 |
+
log.info(f"Found local cache of {fname} at {local_path}")
|
| 316 |
+
return local_path
|
| 317 |
+
else:
|
| 318 |
+
from cached_path import cached_path
|
| 319 |
+
|
| 320 |
+
return cached_path(f"{str(folder).rstrip('/')}/{fname}", progress=progress)
|
| 321 |
+
|
| 322 |
+
|
| 323 |
+
def file_size(path: PathOrStr) -> int:
|
| 324 |
+
"""
|
| 325 |
+
Get the size of a local or remote file in bytes.
|
| 326 |
+
"""
|
| 327 |
+
if is_url(path):
|
| 328 |
+
from urllib.parse import urlparse
|
| 329 |
+
|
| 330 |
+
parsed = urlparse(str(path))
|
| 331 |
+
if parsed.scheme == "gs":
|
| 332 |
+
return _gcs_file_size(parsed.netloc, parsed.path.strip("/"))
|
| 333 |
+
elif parsed.scheme in ("s3", "r2", "weka"):
|
| 334 |
+
return _s3_file_size(parsed.scheme, parsed.netloc, parsed.path.strip("/"))
|
| 335 |
+
elif parsed.scheme in ("http", "https"):
|
| 336 |
+
return _http_file_size(parsed.scheme, parsed.netloc, parsed.path.strip("/"))
|
| 337 |
+
elif parsed.scheme == "file":
|
| 338 |
+
return file_size(str(path).replace("file://", "", 1))
|
| 339 |
+
else:
|
| 340 |
+
raise NotImplementedError(f"file size not implemented for '{parsed.scheme}' files")
|
| 341 |
+
else:
|
| 342 |
+
return os.stat(path).st_size
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
def upload(source: PathOrStr, target: str, save_overwrite: bool = False):
|
| 346 |
+
"""Upload source file to a target location on GCS or S3."""
|
| 347 |
+
from urllib.parse import urlparse
|
| 348 |
+
|
| 349 |
+
source = Path(source)
|
| 350 |
+
assert source.is_file()
|
| 351 |
+
parsed = urlparse(target)
|
| 352 |
+
if parsed.scheme == "gs":
|
| 353 |
+
_gcs_upload(source, parsed.netloc, parsed.path.strip("/"), save_overwrite=save_overwrite)
|
| 354 |
+
elif parsed.scheme in ("s3", "r2", "weka"):
|
| 355 |
+
_s3_upload(source, parsed.scheme, parsed.netloc, parsed.path.strip("/"), save_overwrite=save_overwrite)
|
| 356 |
+
else:
|
| 357 |
+
raise NotImplementedError(f"Upload not implemented for '{parsed.scheme}' scheme")
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
def get_bytes_range(source: PathOrStr, bytes_start: int, num_bytes: int) -> bytes:
|
| 361 |
+
if is_url(source):
|
| 362 |
+
from urllib.parse import urlparse
|
| 363 |
+
|
| 364 |
+
parsed = urlparse(str(source))
|
| 365 |
+
if parsed.scheme == "gs":
|
| 366 |
+
return _gcs_get_bytes_range(parsed.netloc, parsed.path.strip("/"), bytes_start, num_bytes)
|
| 367 |
+
elif parsed.scheme in ("s3", "r2", "weka"):
|
| 368 |
+
return _s3_get_bytes_range(
|
| 369 |
+
parsed.scheme, parsed.netloc, parsed.path.strip("/"), bytes_start, num_bytes
|
| 370 |
+
)
|
| 371 |
+
elif parsed.scheme in ("http", "https"):
|
| 372 |
+
return _http_get_bytes_range(
|
| 373 |
+
parsed.scheme, parsed.netloc, parsed.path.strip("/"), bytes_start, num_bytes
|
| 374 |
+
)
|
| 375 |
+
elif parsed.scheme == "file":
|
| 376 |
+
return get_bytes_range(str(source).replace("file://", "", 1), bytes_start, num_bytes)
|
| 377 |
+
else:
|
| 378 |
+
raise NotImplementedError(f"get bytes range not implemented for '{parsed.scheme}' files")
|
| 379 |
+
else:
|
| 380 |
+
with open(source, "rb") as f:
|
| 381 |
+
f.seek(bytes_start)
|
| 382 |
+
return f.read(num_bytes)
|
| 383 |
+
|
| 384 |
+
|
| 385 |
+
def find_latest_checkpoint(dir: PathOrStr) -> Optional[PathOrStr]:
|
| 386 |
+
if is_url(dir):
|
| 387 |
+
from urllib.parse import urlparse
|
| 388 |
+
|
| 389 |
+
parsed = urlparse(str(dir))
|
| 390 |
+
if parsed.scheme == "gs":
|
| 391 |
+
raise NotImplementedError
|
| 392 |
+
elif parsed.scheme in ("s3", "r2", "weka"):
|
| 393 |
+
return _s3_find_latest_checkpoint(parsed.scheme, parsed.netloc, parsed.path.strip("/"))
|
| 394 |
+
elif parsed.scheme == "file":
|
| 395 |
+
return find_latest_checkpoint(str(dir).replace("file://", "", 1))
|
| 396 |
+
else:
|
| 397 |
+
raise NotImplementedError(f"find_latest_checkpoint not implemented for '{parsed.scheme}' files")
|
| 398 |
+
else:
|
| 399 |
+
latest_step = 0
|
| 400 |
+
latest_checkpoint: Optional[Path] = None
|
| 401 |
+
for path in Path(dir).glob("step*"):
|
| 402 |
+
if path.is_dir():
|
| 403 |
+
try:
|
| 404 |
+
step = int(path.name.replace("step", "").replace("-unsharded", ""))
|
| 405 |
+
except ValueError:
|
| 406 |
+
continue
|
| 407 |
+
# We prioritize sharded checkpoints over unsharded checkpoints.
|
| 408 |
+
if step > latest_step or (step == latest_step and not path.name.endswith("-unsharded")):
|
| 409 |
+
latest_step = step
|
| 410 |
+
latest_checkpoint = path
|
| 411 |
+
return latest_checkpoint
|
| 412 |
+
|
| 413 |
+
|
| 414 |
+
def _gcs_upload(source: Path, bucket_name: str, key: str, save_overwrite: bool = False):
|
| 415 |
+
from google.cloud import storage as gcs
|
| 416 |
+
|
| 417 |
+
storage_client = gcs.Client()
|
| 418 |
+
bucket = storage_client.bucket(bucket_name)
|
| 419 |
+
blob = bucket.blob(key)
|
| 420 |
+
if not save_overwrite and blob.exists():
|
| 421 |
+
raise FileExistsError(f"gs://{bucket_name}/{key} already exists. Use save_overwrite to overwrite it.")
|
| 422 |
+
blob.upload_from_filename(source)
|
| 423 |
+
|
| 424 |
+
|
| 425 |
+
def _gcs_file_size(bucket_name: str, key: str) -> int:
|
| 426 |
+
from google.api_core.exceptions import NotFound
|
| 427 |
+
from google.cloud import storage as gcs
|
| 428 |
+
|
| 429 |
+
storage_client = gcs.Client()
|
| 430 |
+
bucket = storage_client.bucket(bucket_name)
|
| 431 |
+
blob = bucket.blob(key)
|
| 432 |
+
try:
|
| 433 |
+
blob.reload()
|
| 434 |
+
except NotFound:
|
| 435 |
+
raise FileNotFoundError(f"gs://{bucket_name}/{key}")
|
| 436 |
+
assert blob.size is not None
|
| 437 |
+
return blob.size
|
| 438 |
+
|
| 439 |
+
|
| 440 |
+
def _gcs_get_bytes_range(bucket_name: str, key: str, bytes_start: int, num_bytes: int) -> bytes:
|
| 441 |
+
from google.api_core.exceptions import NotFound
|
| 442 |
+
from google.cloud import storage as gcs
|
| 443 |
+
|
| 444 |
+
storage_client = gcs.Client()
|
| 445 |
+
bucket = storage_client.bucket(bucket_name)
|
| 446 |
+
blob = bucket.blob(key)
|
| 447 |
+
try:
|
| 448 |
+
blob.reload()
|
| 449 |
+
except NotFound:
|
| 450 |
+
raise FileNotFoundError(f"gs://{bucket_name}/{key}")
|
| 451 |
+
return blob.download_as_bytes(start=bytes_start, end=bytes_start + num_bytes - 1)
|
| 452 |
+
|
| 453 |
+
|
| 454 |
+
def _get_s3_profile_name(scheme: str) -> Optional[str]:
|
| 455 |
+
if scheme == "s3":
|
| 456 |
+
# For backwards compatibility, we assume S3 uses the default profile if S3_PROFILE is not set.
|
| 457 |
+
return os.environ.get("S3_PROFILE")
|
| 458 |
+
if scheme == "r2":
|
| 459 |
+
profile_name = os.environ.get("R2_PROFILE")
|
| 460 |
+
if profile_name is None:
|
| 461 |
+
raise OLMoEnvironmentError(
|
| 462 |
+
"R2 profile name is not set. Did you forget to set the 'R2_PROFILE' env var?"
|
| 463 |
+
)
|
| 464 |
+
|
| 465 |
+
return profile_name
|
| 466 |
+
if scheme == "weka":
|
| 467 |
+
profile_name = os.environ.get("WEKA_PROFILE")
|
| 468 |
+
if profile_name is None:
|
| 469 |
+
raise OLMoEnvironmentError(
|
| 470 |
+
"Weka profile name is not set. Did you forget to set the 'WEKA_PROFILE' env var?"
|
| 471 |
+
)
|
| 472 |
+
|
| 473 |
+
return profile_name
|
| 474 |
+
|
| 475 |
+
raise NotImplementedError(f"Cannot get profile name for scheme {scheme}")
|
| 476 |
+
|
| 477 |
+
|
| 478 |
+
def _get_s3_endpoint_url(scheme: str) -> Optional[str]:
|
| 479 |
+
if scheme == "s3":
|
| 480 |
+
return None
|
| 481 |
+
if scheme == "r2":
|
| 482 |
+
r2_endpoint_url = os.environ.get("R2_ENDPOINT_URL")
|
| 483 |
+
if r2_endpoint_url is None:
|
| 484 |
+
raise OLMoEnvironmentError(
|
| 485 |
+
"R2 endpoint url is not set. Did you forget to set the 'R2_ENDPOINT_URL' env var?"
|
| 486 |
+
)
|
| 487 |
+
|
| 488 |
+
return r2_endpoint_url
|
| 489 |
+
if scheme == "weka":
|
| 490 |
+
weka_endpoint_url = os.environ.get("WEKA_ENDPOINT_URL")
|
| 491 |
+
if weka_endpoint_url is None:
|
| 492 |
+
raise OLMoEnvironmentError(
|
| 493 |
+
"Weka endpoint url is not set. Did you forget to set the 'WEKA_ENDPOINT_URL' env var?"
|
| 494 |
+
)
|
| 495 |
+
|
| 496 |
+
return weka_endpoint_url
|
| 497 |
+
|
| 498 |
+
raise NotImplementedError(f"Cannot get endpoint url for scheme {scheme}")
|
| 499 |
+
|
| 500 |
+
|
| 501 |
+
@cache
|
| 502 |
+
def _get_s3_client(scheme: str):
|
| 503 |
+
session = boto3.Session(profile_name=_get_s3_profile_name(scheme))
|
| 504 |
+
return session.client(
|
| 505 |
+
"s3",
|
| 506 |
+
endpoint_url=_get_s3_endpoint_url(scheme),
|
| 507 |
+
config=Config(retries={"max_attempts": 10, "mode": "standard"}),
|
| 508 |
+
use_ssl=not int(os.environ.get("OLMO_NO_SSL", "0")),
|
| 509 |
+
)
|
| 510 |
+
|
| 511 |
+
|
| 512 |
+
def _wait_before_retry(attempt: int):
|
| 513 |
+
time.sleep(min(0.5 * 2**attempt, 3.0))
|
| 514 |
+
|
| 515 |
+
|
| 516 |
+
def _s3_upload(
|
| 517 |
+
source: Path, scheme: str, bucket_name: str, key: str, save_overwrite: bool = False, max_attempts: int = 3
|
| 518 |
+
):
|
| 519 |
+
err: Optional[Exception] = None
|
| 520 |
+
if not save_overwrite:
|
| 521 |
+
for attempt in range(1, max_attempts + 1):
|
| 522 |
+
try:
|
| 523 |
+
_get_s3_client(scheme).head_object(Bucket=bucket_name, Key=key)
|
| 524 |
+
raise FileExistsError(
|
| 525 |
+
f"s3://{bucket_name}/{key} already exists. Use save_overwrite to overwrite it."
|
| 526 |
+
)
|
| 527 |
+
except boto_exceptions.ClientError as e:
|
| 528 |
+
if e.response["ResponseMetadata"]["HTTPStatusCode"] == 404:
|
| 529 |
+
err = None
|
| 530 |
+
break
|
| 531 |
+
err = e
|
| 532 |
+
|
| 533 |
+
if attempt < max_attempts:
|
| 534 |
+
log.warning("%s failed attempt %d with retriable error: %s", _s3_upload.__name__, attempt, err)
|
| 535 |
+
_wait_before_retry(attempt)
|
| 536 |
+
|
| 537 |
+
if err is not None:
|
| 538 |
+
raise OLMoNetworkError(f"Failed to check object existence during {scheme} upload") from err
|
| 539 |
+
|
| 540 |
+
try:
|
| 541 |
+
_get_s3_client(scheme).upload_file(source, bucket_name, key)
|
| 542 |
+
except boto_exceptions.ClientError as e:
|
| 543 |
+
raise OLMoNetworkError(f"Failed to upload to {scheme}") from e
|
| 544 |
+
|
| 545 |
+
|
| 546 |
+
def _s3_file_size(scheme: str, bucket_name: str, key: str, max_attempts: int = 3) -> int:
|
| 547 |
+
err: Optional[Exception] = None
|
| 548 |
+
for attempt in range(1, max_attempts + 1):
|
| 549 |
+
try:
|
| 550 |
+
return _get_s3_client(scheme).head_object(Bucket=bucket_name, Key=key)["ContentLength"]
|
| 551 |
+
except boto_exceptions.ClientError as e:
|
| 552 |
+
if e.response["ResponseMetadata"]["HTTPStatusCode"] == 404:
|
| 553 |
+
raise FileNotFoundError(f"s3://{bucket_name}/{key}") from e
|
| 554 |
+
err = e
|
| 555 |
+
|
| 556 |
+
if attempt < max_attempts:
|
| 557 |
+
log.warning("%s failed attempt %d with retriable error: %s", _s3_file_size.__name__, attempt, err)
|
| 558 |
+
_wait_before_retry(attempt)
|
| 559 |
+
|
| 560 |
+
raise OLMoNetworkError(f"Failed to get {scheme} file size") from err
|
| 561 |
+
|
| 562 |
+
|
| 563 |
+
def _s3_get_bytes_range(
|
| 564 |
+
scheme: str, bucket_name: str, key: str, bytes_start: int, num_bytes: int, max_attempts: int = 3
|
| 565 |
+
) -> bytes:
|
| 566 |
+
err: Optional[Exception] = None
|
| 567 |
+
for attempt in range(1, max_attempts + 1):
|
| 568 |
+
try:
|
| 569 |
+
return (
|
| 570 |
+
_get_s3_client(scheme)
|
| 571 |
+
.get_object(
|
| 572 |
+
Bucket=bucket_name, Key=key, Range=f"bytes={bytes_start}-{bytes_start + num_bytes - 1}"
|
| 573 |
+
)["Body"]
|
| 574 |
+
.read()
|
| 575 |
+
)
|
| 576 |
+
except boto_exceptions.ClientError as e:
|
| 577 |
+
if e.response["ResponseMetadata"]["HTTPStatusCode"] == 404:
|
| 578 |
+
raise FileNotFoundError(f"{scheme}://{bucket_name}/{key}") from e
|
| 579 |
+
err = e
|
| 580 |
+
except (boto_exceptions.HTTPClientError, boto_exceptions.ConnectionError) as e:
|
| 581 |
+
# ResponseStreamingError (subclass of HTTPClientError) can happen as
|
| 582 |
+
# a result of a failed read from the stream (http.client.IncompleteRead).
|
| 583 |
+
# Retrying can help in this case.
|
| 584 |
+
err = e
|
| 585 |
+
|
| 586 |
+
if attempt < max_attempts:
|
| 587 |
+
log.warning(
|
| 588 |
+
"%s failed attempt %d with retriable error: %s", _s3_get_bytes_range.__name__, attempt, err
|
| 589 |
+
)
|
| 590 |
+
_wait_before_retry(attempt)
|
| 591 |
+
|
| 592 |
+
# When torch's DataLoader intercepts exceptions, it may try to re-raise them
|
| 593 |
+
# by recalling their constructor with a single message arg. Torch has some
|
| 594 |
+
# logic to deal with the absence of a single-parameter constructor, but it
|
| 595 |
+
# doesn't gracefully handle other possible failures in calling such a constructor
|
| 596 |
+
# This can cause an irrelevant exception (e.g. KeyError: 'error'), resulting
|
| 597 |
+
# in us losing the true exception info. To avoid this, we change the exception
|
| 598 |
+
# to a type that has a single-parameter constructor.
|
| 599 |
+
raise OLMoNetworkError(f"Failed to get bytes range from {scheme}") from err
|
| 600 |
+
|
| 601 |
+
|
| 602 |
+
def _s3_find_latest_checkpoint(scheme: str, bucket_name: str, prefix: str) -> Optional[str]:
|
| 603 |
+
if not prefix.endswith("/"):
|
| 604 |
+
prefix = f"{prefix}/"
|
| 605 |
+
response = _get_s3_client(scheme).list_objects(Bucket=bucket_name, Prefix=prefix, Delimiter="/")
|
| 606 |
+
assert not response["IsTruncated"] # need to handle this if it happens
|
| 607 |
+
latest_step = 0
|
| 608 |
+
latest_checkpoint: Optional[str] = None
|
| 609 |
+
for item in response["CommonPrefixes"]:
|
| 610 |
+
prefix = item["Prefix"].strip("/")
|
| 611 |
+
checkpoint_name = os.path.split(prefix)[-1]
|
| 612 |
+
if not checkpoint_name.startswith("step"):
|
| 613 |
+
continue
|
| 614 |
+
try:
|
| 615 |
+
step = int(checkpoint_name.replace("step", "").replace("-unsharded", ""))
|
| 616 |
+
except ValueError:
|
| 617 |
+
continue
|
| 618 |
+
# Make sure the checkpoint dir contains a config, otherwise the checkpoint is incomplete
|
| 619 |
+
# (upload might have have failed part way through).
|
| 620 |
+
try:
|
| 621 |
+
_s3_file_size(scheme, bucket_name, f"{prefix}/config.yaml")
|
| 622 |
+
except FileNotFoundError:
|
| 623 |
+
continue
|
| 624 |
+
# We prioritize sharded checkpoints over unsharded ones.
|
| 625 |
+
if step > latest_step or (step == latest_step and not checkpoint_name.endswith("-unsharded")):
|
| 626 |
+
latest_step = step
|
| 627 |
+
latest_checkpoint = f"{scheme}://{bucket_name}/{prefix}"
|
| 628 |
+
return latest_checkpoint
|
| 629 |
+
|
| 630 |
+
|
| 631 |
+
def _http_file_size(scheme: str, host_name: str, path: str) -> int:
|
| 632 |
+
import requests
|
| 633 |
+
|
| 634 |
+
response = requests.head(f"{scheme}://{host_name}/{path}", allow_redirects=True)
|
| 635 |
+
return int(response.headers.get("content-length"))
|
| 636 |
+
|
| 637 |
+
|
| 638 |
+
def _http_get_bytes_range(scheme: str, host_name: str, path: str, bytes_start: int, num_bytes: int) -> bytes:
|
| 639 |
+
import requests
|
| 640 |
+
|
| 641 |
+
response = requests.get(
|
| 642 |
+
f"{scheme}://{host_name}/{path}", headers={"Range": f"bytes={bytes_start}-{bytes_start+num_bytes-1}"}
|
| 643 |
+
)
|
| 644 |
+
result = response.content
|
| 645 |
+
assert (
|
| 646 |
+
len(result) == num_bytes
|
| 647 |
+
), f"expected {num_bytes} bytes, got {len(result)}" # Some web servers silently ignore range requests and send everything
|
| 648 |
+
return result
|
| 649 |
+
|
| 650 |
+
|
| 651 |
+
def default_thread_count() -> int:
|
| 652 |
+
return int(os.environ.get("OLMO_NUM_THREADS") or min(32, (os.cpu_count() or 1) + 4))
|
| 653 |
+
|
| 654 |
+
|
| 655 |
+
def pass_through_fn(fn, *args, **kwargs):
|
| 656 |
+
return fn(*args, **kwargs)
|
| 657 |
+
|
| 658 |
+
|
| 659 |
+
def threaded_generator(g, maxsize: int = 16, thread_name: Optional[str] = None):
|
| 660 |
+
q: Queue = Queue(maxsize=maxsize)
|
| 661 |
+
|
| 662 |
+
sentinel = object()
|
| 663 |
+
|
| 664 |
+
def fill_queue():
|
| 665 |
+
try:
|
| 666 |
+
for value in g:
|
| 667 |
+
q.put(value)
|
| 668 |
+
except Exception as e:
|
| 669 |
+
q.put(e)
|
| 670 |
+
finally:
|
| 671 |
+
q.put(sentinel)
|
| 672 |
+
|
| 673 |
+
thread_name = thread_name or repr(g)
|
| 674 |
+
thread = Thread(name=thread_name, target=fill_queue, daemon=True)
|
| 675 |
+
thread.start()
|
| 676 |
+
|
| 677 |
+
for x in iter(q.get, sentinel):
|
| 678 |
+
if isinstance(x, Exception):
|
| 679 |
+
raise OLMoThreadError(f"generator thread {thread_name} failed") from x
|
| 680 |
+
else:
|
| 681 |
+
yield x
|
| 682 |
+
|
| 683 |
+
|
| 684 |
+
def split_dict_of_list(batch, split_size):
|
| 685 |
+
out = None
|
| 686 |
+
for key, val in batch.items():
|
| 687 |
+
parts = split_list(val, split_size)
|
| 688 |
+
if out is None:
|
| 689 |
+
out = [{key: part} for part in parts]
|
| 690 |
+
else:
|
| 691 |
+
assert len(out) == len(parts)
|
| 692 |
+
for out_dict, part in zip(out, parts):
|
| 693 |
+
out_dict[key] = part
|
| 694 |
+
return out
|
| 695 |
+
|
| 696 |
+
|
| 697 |
+
def split_list(lst, split_size):
|
| 698 |
+
assert len(lst) % split_size == 0
|
| 699 |
+
n = len(lst) // split_size
|
| 700 |
+
return [lst[i*split_size:(i+1)*split_size] for i in range(n)]
|
| 701 |
+
|
| 702 |
+
|
| 703 |
+
def flatten_list(lst):
|
| 704 |
+
return [x for xs in lst for x in xs]
|
| 705 |
+
|
| 706 |
+
|
| 707 |
+
def roundrobin(*iterables):
|
| 708 |
+
"""
|
| 709 |
+
Call the given iterables in a round-robin fashion. For example:
|
| 710 |
+
``roundrobin('ABC', 'D', 'EF') --> A D E B F C``
|
| 711 |
+
"""
|
| 712 |
+
# Adapted from https://docs.python.org/3/library/itertools.html#itertools-recipes
|
| 713 |
+
num_active = len(iterables)
|
| 714 |
+
nexts = cycle(iter(it).__next__ for it in iterables)
|
| 715 |
+
while num_active:
|
| 716 |
+
try:
|
| 717 |
+
for next in nexts:
|
| 718 |
+
yield next()
|
| 719 |
+
except StopIteration:
|
| 720 |
+
# Remove the iterator we just exhausted from the cycle.
|
| 721 |
+
num_active -= 1
|
| 722 |
+
nexts = cycle(islice(nexts, num_active))
|
| 723 |
+
|
| 724 |
+
|
| 725 |
+
def add_cached_path_clients():
|
| 726 |
+
add_scheme_client(WekaClient)
|
| 727 |
+
|
| 728 |
+
|
| 729 |
+
class WekaClient(SchemeClient):
|
| 730 |
+
recoverable_errors = SchemeClient.recoverable_errors + (
|
| 731 |
+
boto_exceptions.HTTPClientError,
|
| 732 |
+
boto_exceptions.ConnectionError,
|
| 733 |
+
)
|
| 734 |
+
|
| 735 |
+
scheme = "weka"
|
| 736 |
+
|
| 737 |
+
def __init__(self, resource: str) -> None:
|
| 738 |
+
SchemeClient.__init__(self, resource)
|
| 739 |
+
self.bucket_name, self.path = WekaClient._split_cloud_path(resource, "weka")
|
| 740 |
+
self.s3 = _get_s3_client("weka")
|
| 741 |
+
self.object_info = None
|
| 742 |
+
|
| 743 |
+
@staticmethod
|
| 744 |
+
def _split_cloud_path(url: str, provider: str) -> Tuple[str, str]:
|
| 745 |
+
"""Split a full s3 path into the bucket name and path."""
|
| 746 |
+
from urllib.parse import urlparse
|
| 747 |
+
|
| 748 |
+
parsed = urlparse(url)
|
| 749 |
+
if not parsed.netloc or not parsed.path:
|
| 750 |
+
raise ValueError("bad {} path {}".format(provider, url))
|
| 751 |
+
bucket_name = parsed.netloc
|
| 752 |
+
provider_path = parsed.path
|
| 753 |
+
# Remove '/' at beginning of path.
|
| 754 |
+
if provider_path.startswith("/"):
|
| 755 |
+
provider_path = provider_path[1:]
|
| 756 |
+
return bucket_name, provider_path
|
| 757 |
+
|
| 758 |
+
def _ensure_object_info(self):
|
| 759 |
+
if self.object_info is None:
|
| 760 |
+
try:
|
| 761 |
+
self.object_info = self.s3.head_object(Bucket=self.bucket_name, Key=self.path)
|
| 762 |
+
except boto_exceptions.ClientError as e:
|
| 763 |
+
if e.response["ResponseMetadata"]["HTTPStatusCode"] == 404:
|
| 764 |
+
raise FileNotFoundError(f"weka://{self.bucket_name}/{self.path}") from e
|
| 765 |
+
raise e
|
| 766 |
+
|
| 767 |
+
def get_etag(self) -> Optional[str]:
|
| 768 |
+
self._ensure_object_info()
|
| 769 |
+
assert self.object_info is not None
|
| 770 |
+
return self.object_info.get("ETag")
|
| 771 |
+
|
| 772 |
+
def get_size(self) -> Optional[int]:
|
| 773 |
+
self._ensure_object_info()
|
| 774 |
+
assert self.object_info is not None
|
| 775 |
+
return self.object_info.get("ContentLength")
|
| 776 |
+
|
| 777 |
+
def get_resource(self, temp_file: io.BufferedWriter) -> None:
|
| 778 |
+
self.s3.download_fileobj(Fileobj=temp_file, Bucket=self.bucket_name, Key=self.path)
|
| 779 |
+
|
| 780 |
+
def get_bytes_range(self, index: int, length: int) -> bytes:
|
| 781 |
+
response = self.s3.get_object(
|
| 782 |
+
Bucket=self.bucket_name, Key=self.path, Range=f"bytes={index}-{index+length-1}"
|
| 783 |
+
)
|
| 784 |
+
return response["Body"].read()
|
| 785 |
+
|