From eb9c982916537a1cddfd521a570982f05dc27e4b Mon Sep 17 00:00:00 2001 From: rlsn Date: Fri, 22 Mar 2024 14:41:52 +0900 Subject: [PATCH] refactored parameter --- README.md | 2 +- config.json | 2 +- run.py | 8 ++++---- tripper.py | 2 +- utils.py | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 6b6b8bc..86d8924 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ A pipeline that generate consecutive sequence of images with SD1.5. It's quite a # Usage Edit the `config.json` to configure the settings: - `model_path`: path to your SD model safetensors -- `generate_video`: set false to generate a batch of images to choose from as an initial image, then set true to switch to video mode. +- `generate_animation`: set false to generate a batch of images to choose from as an initial image, then set true to switch to animation mode. - `nframes`: total number of frames to generate - `scheduler`: choose one from "euler", "euler a", "DDIM", "DDPM", "DPM++ 2M SDE Karras", "DPM++ 2M Karras" - `num_inference_steps`: per image diff --git a/config.json b/config.json index 3a97449..ad1b229 100644 --- a/config.json +++ b/config.json @@ -1,9 +1,9 @@ { "model_path": "your_model.safetensors", "scheduler": "DPM++ 2M SDE Karras", - "generate_video": false, "init_image" : "preview/your_init_image.jpg", "prompt": "masterpiece, best quality, realistic, detailed background, the forgotten city made of white marble, grand, epic, fantasy, mystical, sunbeam, soft lighting, volumetric lighting, dramatic", + "generate_animation": false, "negative_prompt": "(worst quality:2), (low quality:2), (normal quality:2), lowres, normal quality, ((monochrome)), ((grayscale)), bad anatomy,ng_deepnegative_v1_75t,easynegative, badhandv4, text, watermark,", "strength":0.65, "num_img":6, diff --git a/run.py b/run.py index c9c1849..bf421b7 100644 --- a/run.py +++ b/run.py @@ -9,7 +9,7 @@ import diffusers import argparse from attrdict import AttrDict -from utils import const_schedule, zoom, export_as_gif, timestr, interpolate_video +from utils import const_schedule, zoom, export_as_gif, timestr, interpolate_animation import PIL if __name__=="__main__": @@ -22,14 +22,14 @@ tripper = Tripper(config.model_path) tripper.set_scheduler(schedulers[config.scheduler]) - if config.generate_video: + if config.generate_animation: config.init_image = PIL.Image.open(config.init_image) # strength schedule config.strength_schedule = const_schedule(config.strength,config.nframes) config.transform_fn = lambda img,s: zoom(img, config.zoom) config.nsteps=int(config.nframes//config.diffusion_cadence) - imgs = tripper.generate_video(**config) - imgs = interpolate_video(imgs, config.diffusion_cadence) + imgs = tripper.generate_animation(**config) + imgs = interpolate_animation(imgs, config.diffusion_cadence) export_as_gif(f"{config.out_dir}/{timestr()}.gif", imgs, frames_per_second=config.fps) else: tripper.txt2img(**config) \ No newline at end of file diff --git a/tripper.py b/tripper.py index 49bb667..3abf2ea 100644 --- a/tripper.py +++ b/tripper.py @@ -100,7 +100,7 @@ def img2img(self, image, prompt, negative_prompt, lora_dict, strength=0.5, self.unload_lora(lora_dict) return images - def generate_video(self, init_image, prompt, negative_prompt, + def generate_animation(self, init_image, prompt, negative_prompt, lora_dict, nsteps, strength_schedule, transform_fn, guidance_scale=7, diff --git a/utils.py b/utils.py index 2cebc58..21707fb 100644 --- a/utils.py +++ b/utils.py @@ -188,7 +188,7 @@ def interpolation(img1, img2, num_frame=1): imgs.append(Image.fromarray(im.astype(np.uint8))) return imgs -def interpolate_video(imgs, cadence=2): +def interpolate_animation(imgs, cadence=2): if cadence<=1: return imgs else: