Skip to content

Commit b8ffb29

Browse files
Memory tweaks.
1 parent ce37c11 commit b8ffb29

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

comfy/model_management.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -438,11 +438,11 @@ def load_models_gpu(models, memory_required=0, force_patch_weights=False, minimu
438438
global vram_state
439439

440440
inference_memory = minimum_inference_memory()
441-
extra_mem = max(inference_memory, memory_required) + 100 * 1024 * 1024
441+
extra_mem = max(inference_memory, memory_required + 300 * 1024 * 1024)
442442
if minimum_memory_required is None:
443443
minimum_memory_required = extra_mem
444444
else:
445-
minimum_memory_required = max(inference_memory, minimum_memory_required) + 100 * 1024 * 1024
445+
minimum_memory_required = max(inference_memory, minimum_memory_required + 300 * 1024 * 1024)
446446

447447
models = set(models)
448448

0 commit comments

Comments
 (0)