-
Notifications
You must be signed in to change notification settings - Fork 2.8k
Description
Is there an existing issue for this problem?
- I have searched the existing issues
Install method
Invoke's Launcher
Operating system
Linux
GPU vendor
Nvidia (CUDA)
GPU model
RTX 3060
GPU VRAM
12GB
Version number
v6.11.0 rc1
Browser
No response
System Information
{
"version": "6.11.0.rc1",
"dependencies": {
"absl-py" : "2.3.1",
"accelerate" : "1.12.0",
"annotated-types" : "0.7.0",
"anyio" : "4.12.1",
"attrs" : "25.4.0",
"bidict" : "0.23.1",
"bitsandbytes" : "0.49.1",
"blake3" : "1.0.8",
"certifi" : "2022.12.7",
"cffi" : "2.0.0",
"charset-normalizer" : "2.1.1",
"click" : "8.3.1",
"coloredlogs" : "15.0.1",
"compel" : "2.1.1",
"contourpy" : "1.3.3",
"CUDA" : "12.8",
"cycler" : "0.12.1",
"Deprecated" : "1.3.1",
"diffusers" : "0.36.0",
"dnspython" : "2.8.0",
"dynamicprompts" : "0.31.0",
"einops" : "0.8.2",
"fastapi" : "0.118.3",
"fastapi-events" : "0.12.2",
"filelock" : "3.20.0",
"flatbuffers" : "25.12.19",
"fonttools" : "4.61.1",
"fsspec" : "2025.12.0",
"gguf" : "0.17.1",
"h11" : "0.16.0",
"hf-xet" : "1.2.0",
"httpcore" : "1.0.9",
"httptools" : "0.7.1",
"httpx" : "0.28.1",
"huggingface-hub" : "0.36.0",
"humanfriendly" : "10.0",
"idna" : "3.4",
"importlib_metadata" : "7.1.0",
"invisible-watermark" : "0.2.0",
"InvokeAI" : "6.11.0rc1",
"jax" : "0.7.1",
"jaxlib" : "0.7.1",
"Jinja2" : "3.1.6",
"kiwisolver" : "1.4.9",
"MarkupSafe" : "2.1.5",
"matplotlib" : "3.10.8",
"mediapipe" : "0.10.14",
"ml_dtypes" : "0.5.4",
"mpmath" : "1.3.0",
"networkx" : "3.6.1",
"numpy" : "1.26.4",
"nvidia-cublas-cu12" : "12.8.3.14",
"nvidia-cuda-cupti-cu12" : "12.8.57",
"nvidia-cuda-nvrtc-cu12" : "12.8.61",
"nvidia-cuda-runtime-cu12": "12.8.57",
"nvidia-cudnn-cu12" : "9.7.1.26",
"nvidia-cufft-cu12" : "11.3.3.41",
"nvidia-cufile-cu12" : "1.13.0.11",
"nvidia-curand-cu12" : "10.3.9.55",
"nvidia-cusolver-cu12" : "11.7.2.55",
"nvidia-cusparse-cu12" : "12.5.7.53",
"nvidia-cusparselt-cu12" : "0.6.3",
"nvidia-nccl-cu12" : "2.26.2",
"nvidia-nvjitlink-cu12" : "12.8.61",
"nvidia-nvtx-cu12" : "12.8.55",
"onnx" : "1.16.1",
"onnxruntime" : "1.19.2",
"opencv-contrib-python" : "4.11.0.86",
"opencv-python" : "4.11.0.86",
"opt_einsum" : "3.4.0",
"packaging" : "24.1",
"picklescan" : "1.0.0",
"pillow" : "12.0.0",
"prompt_toolkit" : "3.0.52",
"protobuf" : "4.25.8",
"psutil" : "7.2.1",
"pycparser" : "3.0",
"pydantic" : "2.12.5",
"pydantic-settings" : "2.12.0",
"pydantic_core" : "2.41.5",
"pyparsing" : "3.3.2",
"PyPatchMatch" : "1.0.2",
"python-dateutil" : "2.9.0.post0",
"python-dotenv" : "1.2.1",
"python-engineio" : "4.13.0",
"python-multipart" : "0.0.22",
"python-socketio" : "5.16.0",
"PyWavelets" : "1.9.0",
"PyYAML" : "6.0.3",
"regex" : "2026.1.15",
"requests" : "2.28.1",
"safetensors" : "0.7.0",
"scipy" : "1.17.0",
"semver" : "3.0.4",
"sentencepiece" : "0.2.0",
"setuptools" : "70.2.0",
"simple-websocket" : "1.1.0",
"six" : "1.17.0",
"sniffio" : "1.3.1",
"sounddevice" : "0.5.5",
"spandrel" : "0.4.1",
"starlette" : "0.48.0",
"sympy" : "1.14.0",
"tokenizers" : "0.22.2",
"torch" : "2.7.1+cu128",
"torchsde" : "0.2.6",
"torchvision" : "0.22.1+cu128",
"tqdm" : "4.66.5",
"trampoline" : "0.1.2",
"transformers" : "4.57.6",
"triton" : "3.3.1",
"typing-inspection" : "0.4.2",
"typing_extensions" : "4.15.0",
"urllib3" : "1.26.13",
"uvicorn" : "0.40.0",
"uvloop" : "0.22.1",
"watchfiles" : "1.1.1",
"wcwidth" : "0.5.0",
"websockets" : "16.0",
"wrapt" : "2.0.1",
"wsproto" : "1.3.2",
"zipp" : "3.19.2"
},
"config": {
"schema_version": "4.0.2",
"legacy_models_yaml_path": null,
"host": "127.0.0.1",
"port": 9090,
"allow_origins": [],
"allow_credentials": true,
"allow_methods": [""],
"allow_headers": [""],
"ssl_certfile": null,
"ssl_keyfile": null,
"log_tokenization": false,
"patchmatch": true,
"models_dir": "models",
"convert_cache_dir": "models/.convert_cache",
"download_cache_dir": "models/.download_cache",
"legacy_conf_dir": "configs",
"db_dir": "databases",
"outputs_dir": "outputs",
"custom_nodes_dir": "nodes",
"style_presets_dir": "style_presets",
"workflow_thumbnails_dir": "workflow_thumbnails",
"log_handlers": ["console"],
"log_format": "color",
"log_level": "info",
"log_sql": false,
"log_level_network": "warning",
"use_memory_db": false,
"dev_reload": false,
"profile_graphs": false,
"profile_prefix": null,
"profiles_dir": "profiles",
"max_cache_ram_gb": null,
"max_cache_vram_gb": null,
"log_memory_usage": false,
"model_cache_keep_alive_min": 0,
"device_working_mem_gb": 3,
"enable_partial_loading": true,
"keep_ram_copy_of_weights": true,
"ram": null,
"vram": null,
"lazy_offload": true,
"pytorch_cuda_alloc_conf": null,
"device": "auto",
"precision": "auto",
"sequential_guidance": false,
"attention_type": "auto",
"attention_slice_size": "auto",
"force_tiled_decode": false,
"pil_compress_level": 1,
"max_queue_size": 10000,
"clear_queue_on_startup": false,
"allow_nodes": null,
"deny_nodes": null,
"node_cache_size": 512,
"hashing_algorithm": "blake3_single",
"remote_api_tokens": null,
"scan_models_on_startup": false,
"unsafe_disable_picklescan": false,
"allow_unknown_models": true
},
"set_config_fields": ["legacy_models_yaml_path", "enable_partial_loading"]
}
What happened
Used F2K 4B FP8. Not sure if this is an issue with Invoke, a limitation of F2K, or user error.
When attempting to inpaint a part of an image while a reference image is active, an error occurs and the task is cancelled.
In this case, it was an image of a dog and a cat in front of a green background. There were 2 reference images, one of the dog and one of the cat, and the inpaint mask covered both of them.
What you expected to happen
When performing img2img with F2K using reference images and partial denoise (30% in this case), the generation is completed as expected. I expected the same task, just with the addition of an inpaint mask, to also complete successfully.
How to reproduce the problem
- Apply F2K generation settings (I used the defaults).
- Add a reference image(s)
- Invoke something using the reference images
- Draw an inpaint mask over any part of the image
- Attempt to invoke with the inpaint mask and reference images active (denoising strength doesn't matter).
Additional context
No response
Discord username
No response