Qubitium commited on
Commit
5b77379
·
verified ·
1 Parent(s): f6d57a0

Add files using upload-large-folder tool

Browse files
Files changed (1) hide show
  1. configuration_minimax_m2.py +2 -18
configuration_minimax_m2.py CHANGED
@@ -12,19 +12,6 @@ from typing import List, Optional, Union
12
  from transformers.configuration_utils import PretrainedConfig
13
 
14
 
15
- class _QuantizationConfigDict(dict):
16
- """Ensure quantization config always exposes a `quant_method`."""
17
-
18
- def __init__(self, data: Optional[dict] = None):
19
- if data is None:
20
- data = {}
21
- super().__init__(data)
22
- self.setdefault("quant_method", "none")
23
-
24
- def to_dict(self):
25
- return dict(self)
26
-
27
-
28
  class MiniMaxM2Config(PretrainedConfig):
29
  model_type = "minimax"
30
 
@@ -80,10 +67,6 @@ class MiniMaxM2Config(PretrainedConfig):
80
  **kwargs,
81
  ) -> None:
82
  quantization_config = kwargs.pop("quantization_config", None)
83
- if quantization_config is None:
84
- quantization_config = _QuantizationConfigDict()
85
- elif not isinstance(quantization_config, _QuantizationConfigDict):
86
- quantization_config = _QuantizationConfigDict(quantization_config)
87
  transformers_version = kwargs.pop("transformers_version", None)
88
 
89
  super().__init__(
@@ -140,7 +123,8 @@ class MiniMaxM2Config(PretrainedConfig):
140
 
141
  # Convenient accessor used by rotary embedding helper
142
  self.partial_rotary_factor = float(self.rotary_dim) / float(self.head_dim)
143
- self.quantization_config = quantization_config
 
144
  self.transformers_version = transformers_version
145
 
146
 
 
12
  from transformers.configuration_utils import PretrainedConfig
13
 
14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  class MiniMaxM2Config(PretrainedConfig):
16
  model_type = "minimax"
17
 
 
67
  **kwargs,
68
  ) -> None:
69
  quantization_config = kwargs.pop("quantization_config", None)
 
 
 
 
70
  transformers_version = kwargs.pop("transformers_version", None)
71
 
72
  super().__init__(
 
123
 
124
  # Convenient accessor used by rotary embedding helper
125
  self.partial_rotary_factor = float(self.rotary_dim) / float(self.head_dim)
126
+ if quantization_config is not None:
127
+ self.quantization_config = quantization_config
128
  self.transformers_version = transformers_version
129
 
130