forked from benrugg/AI-Render
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathoperators.py
599 lines (445 loc) · 20.4 KB
/
operators.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
import bpy
import requests
import functools
import random
import time
from . import (
config,
task_queue,
utils,
)
valid_dimensions_tuple_list = utils.generate_valid_dimensions_tuple_list()
def enable_air(scene):
# register the task queue (this also needs to be done post-load,
# because app timers get stopped when loading a new blender file)
task_queue.register()
# ensure that we have our AI Render workspace with a compositor and image viewer,
# so the new rendered image will actually appear
ensure_air_workspace()
# create the ai render compositor nodes
ensure_compositor_node_group(scene)
# clear any possible past errors in the file (this would happen if ai render
# was enabled in a file that we just opened, and it had been saved with
# an error from a past render)
clear_error(scene)
def mute_compositor_node_group(scene):
compositor_nodes = scene.node_tree.nodes
compositor_nodes.get('AIR').mute = True
def unmute_compositor_node_group(scene):
compositor_nodes = scene.node_tree.nodes
compositor_nodes.get('AIR').mute = False
def update_compositor_node_with_image(scene, img):
compositor_nodes = scene.node_tree.nodes
image_node = compositor_nodes.get('AIR').node_tree.nodes.get('AIR_image_node')
image_node.image = img
def get_or_create_composite_node(compositor_nodes):
"""Get the existing Composite node, or create one"""
if compositor_nodes.get('Composite'):
return compositor_nodes.get('Composite')
for node in compositor_nodes:
if node.type == 'COMPOSITE':
return node
return compositor_nodes.new('CompositorNodeComposite')
def get_or_create_render_layers_node(compositor_nodes):
"""Get the existing Render Layers node, or create one"""
if compositor_nodes.get('Render Layers'):
return compositor_nodes.get('Render Layers')
for node in compositor_nodes:
if node.type == 'R_LAYERS':
return node
return compositor_nodes.new('CompositorNodeRLayers')
def ensure_compositor_node_group(scene):
"""Ensure that the compositor node group is created"""
scene.use_nodes = True
compositor_nodes = scene.node_tree.nodes
# if our image node already exists, just quit
if 'AIR' in compositor_nodes:
return {'FINISHED'}
# otherwise, create a new node group
node_tree = bpy.data.node_groups.new('AIR_node_group_v1', 'CompositorNodeTree')
node_group = compositor_nodes.new('CompositorNodeGroup')
node_group.node_tree = node_tree
node_group.location = (400, 500)
node_group.name = 'AIR'
node_group.label = 'AI Render'
group_input = node_tree.nodes.new(type='NodeGroupInput')
group_input.location = (0, 30)
group_output = node_tree.nodes.new(type='NodeGroupOutput')
group_output.location = (620, 0)
# create a new image node and mix rgb node in the group
image_node = node_tree.nodes.new(type='CompositorNodeImage')
image_node.name = 'AIR_image_node'
image_node.location = (60, -100)
image_node.label = 'AI Render Result'
mix_node = node_tree.nodes.new(type='CompositorNodeMixRGB')
mix_node.name = 'AIR_mix_node'
mix_node.location = (350, 75)
# get a reference to the new link functions, for convenience
create_link_in_group = node_tree.links.new
create_link_in_compositor = scene.node_tree.links.new
# create all the links within the group (group input node and image node to
# the mix node, and mix node to the group output node)
create_link_in_group(group_input.outputs[0], mix_node.inputs[1])
create_link_in_group(image_node.outputs.get('Image'), mix_node.inputs[2])
create_link_in_group(mix_node.outputs.get('Image'), group_output.inputs[0])
# get the socket that's currently linked to the compositor, or as a
# fallback, get the rendered image output
composite_node = get_or_create_composite_node(compositor_nodes)
render_layers_node = get_or_create_render_layers_node(compositor_nodes)
if composite_node.inputs.get('Image').is_linked:
original_socket = composite_node.inputs.get('Image').links[0].from_socket
else:
original_socket = render_layers_node.outputs.get('Image')
# link the original socket to the input of the group
create_link_in_compositor(original_socket, node_group.inputs[0])
# link the output of the group to the compositor node
create_link_in_compositor(node_group.outputs[0], composite_node.inputs.get('Image'))
return {'FINISHED'}
def ensure_air_workspace():
"""Ensure we have a compositor window and an image viewer"""
# if the workspace isn't in our file, add it from our own included blend file
if config.workspace_id not in bpy.data.workspaces:
original_workspace = utils.get_current_workspace()
bpy.ops.workspace.append_activate(
idname=config.workspace_id,
filepath=utils.get_workspace_blend_file_filepath()
)
utils.activate_workspace(workspace=original_workspace)
def activate_air_workspace(scene):
"""Activate the special compositor workspace, and make sure it's viewing the render result"""
try:
utils.activate_workspace(workspace_id=config.workspace_id)
utils.view_render_result_in_air_image_editor()
except:
scene.air_props.is_enabled = False
handle_error("Couldn't find the AI Render workspace. Please re-enable AI Render, or deactivate the AI Render add-on.")
def set_image_dimensions(context, width, height):
context.scene.render.resolution_x = width
context.scene.render.resolution_y = height
context.scene.render.resolution_percentage = 100
clear_error(context.scene)
def handle_error(msg, error_key = ''):
"""Show an error popup, and set the error message to be displayed in the ui"""
print("AI Render Error: ", msg)
task_queue.add(functools.partial(bpy.ops.ai_render.show_error_popup, 'INVOKE_DEFAULT', error_message=msg, error_key=error_key))
return False
def clear_error(scene):
"""Clear the error message in the ui"""
scene.air_props.error_message = ''
scene.air_props.error_key = ''
def clear_error_handler(self, context):
clear_error(context.scene)
def generate_new_random_seed(scene):
props = scene.air_props
if (props.use_random_seed):
props.seed = random.randint(1000000000, 2147483647)
def save_render_to_file(scene, timestamp):
try:
temp_file = utils.create_temp_file(f"ai-render-{timestamp}-1-before-")
except:
return handle_error("Couldn't create temp file for image")
try:
orig_render_file_format = scene.render.image_settings.file_format
orig_render_color_mode = scene.render.image_settings.color_mode
orig_render_color_depth = scene.render.image_settings.color_depth
scene.render.image_settings.file_format = 'PNG'
scene.render.image_settings.color_mode = 'RGBA'
scene.render.image_settings.color_depth = '8'
bpy.data.images['Render Result'].save_render(temp_file)
scene.render.image_settings.file_format = orig_render_file_format
scene.render.image_settings.color_mode = orig_render_color_mode
scene.render.image_settings.color_depth = orig_render_color_depth
except:
return handle_error("Couldn't save rendered image")
return temp_file
def save_before_image(scene, timestamp):
ext = utils.get_extension_from_file_format(scene.render.image_settings.file_format)
if ext:
ext = f".{ext}"
filename = f"ai-render-{timestamp}-1-before{ext}"
full_path_and_filename = utils.join_path(scene.air_props.autosave_image_path, filename)
try:
bpy.data.images['Render Result'].save_render(bpy.path.abspath(full_path_and_filename))
except:
return handle_error(f"Couldn't save 'before' image to {bpy.path.abspath(full_path_and_filename)}")
def save_after_image(scene, timestamp, img_file):
filename = f"ai-render-{timestamp}-2-after.png"
full_path_and_filename = utils.join_path(scene.air_props.autosave_image_path, filename)
try:
utils.copy_file(img_file, full_path_and_filename)
return full_path_and_filename
except:
return handle_error(f"Couldn't save 'after' image to {bpy.path.abspath(full_path_and_filename)}")
def do_pre_render_setup(scene, do_mute_node_group=True):
# Lock the user interface when rendering, so that we can change
# compositor nodes in the render_pre handler without causing a crash!
# See: https://docs.blender.org/api/current/bpy.app.handlers.html#note-on-altering-data
scene.render.use_lock_interface = True
# clear any previous errors
clear_error(scene)
# when the render is starting, ensure we have the right compositor nodes
ensure_compositor_node_group(scene)
# then mute the compositor node group, so we get the result of the original render,
# if that's what we want
if do_mute_node_group:
mute_compositor_node_group(scene)
else:
unmute_compositor_node_group(scene)
def do_pre_api_setup(scene):
# switch the workspace to our ai render compositor, so the new rendered image will actually appear
activate_air_workspace(scene)
def validate_params(scene):
props = scene.air_props
if utils.get_api_key().strip() == "":
return handle_error("You must enter an API Key to render with Stable Diffusion", "api_key")
if not utils.are_dimensions_valid(scene):
return handle_error("Please set width and height to valid values", "dimensions")
if utils.are_dimensions_too_large(scene):
return handle_error("Image dimensions are too large. Please decrease width and/or height", "dimensions")
if get_full_prompt(scene) == "":
return handle_error("Please enter a prompt for Stable Diffusion", "prompt")
return True
def get_full_prompt(scene):
props = scene.air_props
prompt = props.prompt_text.strip()
if prompt == config.default_prompt_text:
prompt = ""
if props.use_preset:
if prompt == "":
prompt = props.preset_style
else:
prompt = prompt + f", {props.preset_style}"
return prompt
def send_to_api(scene):
"""Post to the API and process the resulting image"""
props = scene.air_props
# validate the parameters we will send
if not validate_params(scene):
return False
# generate a new seed, if we want a random one
generate_new_random_seed(scene)
# prepare a timestamp for the filenames
timestamp = int(time.time())
# prepare data for the API request
headers = {
"User-Agent": "Blender/" + bpy.app.version_string,
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate, br",
"Dream-Studio-Api-Key": utils.get_api_key(),
}
params = {
"prompt": get_full_prompt(scene),
"width": utils.get_output_width(scene),
"height": utils.get_output_height(scene),
"image_similarity": props.image_similarity,
"seed": props.seed,
"cfg_scale": props.cfg_scale,
"steps": props.steps,
"sampler": props.sampler,
}
# save the rendered image and then read it back in
temp_input_file = save_render_to_file(scene, timestamp)
if not temp_input_file:
return False
img_file = open(temp_input_file, 'rb')
files = {"file": img_file}
# autosave the before image, if we want that
if props.do_autosave_before_images and props.autosave_image_path:
save_before_image(scene, timestamp)
# send the API request
try:
response = requests.post(config.API_URL, params=params, headers=headers, files=files, timeout=config.request_timeout)
except requests.exceptions.ReadTimeout:
return handle_error(f"The server timed out. Try again in a moment, or get help. [Get help with timeouts]({config.HELP_WITH_TIMEOUTS_URL})")
# NOTE: For debugging:
# print("request body:")
# print(response.request.body)
# print("\n")
# print("response body:")
# print(response.content)
# try:
# print(response.json())
# except:
# print("body not json")
# handle a successful response
if response.status_code == 200:
# save the image
try:
output_file = utils.create_temp_file(f"ai-render-{timestamp}-2-after-")
with open(output_file, 'wb') as file:
for chunk in response:
file.write(chunk)
# autosave the after image, if we want that
if props.do_autosave_after_images and props.autosave_image_path:
new_output_file = save_after_image(scene, timestamp, output_file)
if new_output_file:
output_file = new_output_file
except:
return handle_error(f"Couldn't create a temp file to save image")
# load the image into our scene
try:
img = bpy.data.images.load(output_file, check_existing=True)
except:
return handle_error(f"Couldn't load the image from Stable Diffusion")
# load the image into the compositor
update_compositor_node_with_image(scene, img)
# unmute the compositor node group
unmute_compositor_node_group(scene)
# handle 404
elif response.status_code in [403, 404]:
return handle_error("It looks like the web server this add-on relies on is missing. It's possible this is temporary, and you can try again later.")
# handle 500
elif response.status_code == 500:
return handle_error(f"An unknown error occurred in the DreamStudio API. Full server response: {str(response.content)}")
# handle all other errors
else:
import json
error_key = ''
try:
response_obj = response.json()
if response_obj.get('Message', '') in ['Forbidden', None]:
error_message = "It looks like the web server this add-on relies on is missing. It's possible this is temporary, and you can try again later."
else:
error_message = "(Server Error) " + response_obj.get('error', f"An unknown error occurred in the DreamStudio API. Full server response: {json.dumps(response_obj)}")
error_key = response_obj.get('error_key', '')
except:
error_message = f"An unknown error occurred in the DreamStudio API. Full server response: {str(response.content)}"
return handle_error(error_message, error_key)
return True
class AIR_OT_enable(bpy.types.Operator):
"Enable AI Render in this scene"
bl_idname = "ai_render.enable"
bl_label = "Enable AI Render"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
enable_air(context.scene)
context.scene.air_props.is_enabled = True
return {'FINISHED'}
class AIR_OT_set_valid_render_dimensions(bpy.types.Operator):
"Set render width and height to 512 x 512"
bl_idname = "ai_render.set_valid_render_dimensions"
bl_label = "Set Image Size to 512x512"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
set_image_dimensions(context, 512, 512)
return {'FINISHED'}
class AIR_OT_show_other_dimension_options(bpy.types.Operator):
"Other options for image size"
bl_idname = "ai_render.show_other_dimension_options"
bl_label = "Image Size Options"
bl_options = {'REGISTER', 'UNDO'}
panel_width = 250
width: bpy.props.EnumProperty(
name="Image Width",
default="512",
items=valid_dimensions_tuple_list,
description="Image Width"
)
height: bpy.props.EnumProperty(
name="Image Height",
default="512",
items=valid_dimensions_tuple_list,
description="Image Height"
)
def draw(self, context):
layout = self.layout
utils.label_multiline(layout, text="Choose dimensions that Stable Diffusion can work with. (Anything larger than 512x512 risks taking too long and timing out without results)", width=self.panel_width)
layout.separator()
row = layout.row()
col = row.column()
col.label(text="Width:")
col = row.column()
col.prop(self, "width", text="")
row = layout.row()
col = row.column()
col.label(text="Height:")
col = row.column()
col.prop(self, "height", text="")
layout.separator()
def invoke(self, context, event):
return context.window_manager.invoke_props_dialog(self, width=self.panel_width)
def execute(self, context):
set_image_dimensions(context, int(self.width), int(self.height))
return {'FINISHED'}
class AIR_OT_generate_new_image_from_render(bpy.types.Operator):
"Generate a new Stable Diffusion image - without re-rendering - from the last rendered image"
bl_idname = "ai_render.generate_new_image_from_render"
bl_label = "New Image From Last Render"
def execute(self, context):
do_pre_render_setup(context.scene)
do_pre_api_setup(context.scene)
# post to the api (on a different thread, outside the operator)
task_queue.add(functools.partial(send_to_api, context.scene))
return {'FINISHED'}
class AIR_OT_generate_new_image_from_current(bpy.types.Operator):
"Generate a new Stable Diffusion image - without re-rendering - using the latest Stable Diffusion image as the starting point"
bl_idname = "ai_render.generate_new_image_from_current"
bl_label = "New Image From Last AI Image"
def execute(self, context):
do_pre_render_setup(context.scene, do_mute_node_group=False)
do_pre_api_setup(context.scene)
# post to the api (on a different thread, outside the operator)
task_queue.add(functools.partial(send_to_api, context.scene))
return {'FINISHED'}
class AIR_OT_setup_instructions_popup(bpy.types.Operator):
"Show the setup instructions in a popup dialog"
bl_idname = "ai_render.show_setup_instructions_popup"
bl_label = "Stable Diffusion Setup"
width = 350
message: bpy.props.StringProperty(
name="message",
description="Message to display"
)
def draw(self, context):
utils.label_multiline(self.layout, text=self.message, icon="HELP", width=self.width-3, alignment="CENTER")
row = self.layout.row()
row.operator("wm.url_open", text="Sign Up For DreamStudio (free)", icon="URL").url = config.DREAM_STUDIO_URL
def invoke(self, context, event):
self.message = "This add-on uses a service called DreamStudio. You will need to create a DreamStudio account, and get your own API KEY from them. You will get free credits, which will be used when you render. After using your free credits, you will need to sign up for a membership. DreamStudio is unaffiliated with this Blender add-on. It's just a great and easy to use option!"
return context.window_manager.invoke_props_dialog(self, width=self.width)
def execute(self, context):
return {'FINISHED'}
class AIR_OT_show_error_popup(bpy.types.Operator):
"Show an error message in a popup dialog"
bl_idname = "ai_render.show_error_popup"
bl_label = "AI Render Error"
width = 350
error_key: bpy.props.StringProperty(
name="error_key",
default="",
description="Error key code related to specific api param that had an error"
)
error_message: bpy.props.StringProperty(
name="error_message",
description="Error Message to display"
)
def draw(self, context):
utils.label_multiline(self.layout, text=self.error_message, icon="ERROR", width=self.width)
def invoke(self, context, event):
# store the error key and message in the main AIR props
context.scene.air_props.error_key = self.error_key
context.scene.air_props.error_message = self.error_message
# show a popup
return context.window_manager.invoke_props_dialog(self, width=self.width)
def cancel(self, context):
# report the error, for the status bar
self.report({'ERROR'}, self.error_message)
def execute(self, context):
# report the error, for the status bar
self.report({'ERROR'}, self.error_message)
return {'FINISHED'}
classes = [
AIR_OT_enable,
AIR_OT_set_valid_render_dimensions,
AIR_OT_show_other_dimension_options,
AIR_OT_generate_new_image_from_render,
AIR_OT_generate_new_image_from_current,
AIR_OT_setup_instructions_popup,
AIR_OT_show_error_popup,
]
def register():
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
for cls in classes:
bpy.utils.unregister_class(cls)