vram_state = VRAMState.NO_VRAM set_vram_to = VRAMState.NO_VRAM

#102
MagicQuill/comfy/model_management.py CHANGED
@@ -20,8 +20,8 @@ class CPUState(Enum):
20
  MPS = 2
21
 
22
  # Determine VRAM State
23
- vram_state = VRAMState.NORMAL_VRAM
24
- set_vram_to = VRAMState.NORMAL_VRAM
25
  cpu_state = CPUState.GPU
26
 
27
  total_vram = 0
@@ -453,7 +453,7 @@ def load_models_gpu(models, memory_required=0, force_patch_weights=False):
453
  if vram_set_state == VRAMState.NO_VRAM:
454
  lowvram_model_memory = 64 * 1024 * 1024
455
 
456
- cur_loaded_model = loaded_model.model_load(lowvram_model_memory, force_patch_weights=force_patch_weights)
457
  current_loaded_models.insert(0, loaded_model)
458
  return
459
 
 
20
  MPS = 2
21
 
22
  # Determine VRAM State
23
+ vram_state = VRAMState.NO_VRAM
24
+ set_vram_to = VRAMState.NO_VRAM
25
  cpu_state = CPUState.GPU
26
 
27
  total_vram = 0
 
453
  if vram_set_state == VRAMState.NO_VRAM:
454
  lowvram_model_memory = 64 * 1024 * 1024
455
 
456
+ cur_loaded_model = loaded_model.model_load(64 * 1024 * 1024, force_patch_weights=force_patch_weights)
457
  current_loaded_models.insert(0, loaded_model)
458
  return
459