Skip to content

Commit

Permalink
cleanup for docs M-Z
Browse files Browse the repository at this point in the history
DICT supported better in converters
min stride for array/grids
☣️💣 notices
  • Loading branch information
Amorano committed May 29, 2024
1 parent 85fc906 commit 85e7480
Show file tree
Hide file tree
Showing 9 changed files with 82 additions and 54 deletions.
19 changes: 10 additions & 9 deletions core/audio.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,8 @@ class LoadWaveNode(JOVBaseNode):
RETURN_TYPES = ("WAVE",)
RETURN_NAMES = (Lexicon.WAVE,)
DESCRIPTION = """
☣️💣☣️💣☣️💣☣️💣 THIS NODE IS A WORK IN PROGRESS ☣️💣☣️💣☣️💣☣️💣
The Load Wave node imports audio files, converting them to waveforms. Specify the file path to load the audio data.
"""

Expand Down Expand Up @@ -95,11 +97,11 @@ def INPUT_TYPES(cls) -> dict:

def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor]:
wave = parse_param(kw, Lexicon.WAVE, EnumConvertType.ANY, None)
bars = parse_param(kw, Lexicon.VALUE, EnumConvertType.INT, 100, 1, 8192)
thick = parse_param(kw, Lexicon.THICK, EnumConvertType.FLOAT, 0.72, 0, 1)
wihi = parse_param(kw, Lexicon.WH, EnumConvertType.VEC2INT, [(MIN_IMAGE_SIZE, MIN_IMAGE_SIZE)], MIN_IMAGE_SIZE)
rgb_a = parse_param(kw, Lexicon.RGBA_A, EnumConvertType.VEC4INT, [(128, 128, 0, 255)], 0, 255)
matte = parse_param(kw, Lexicon.RGBA_B, EnumConvertType.VEC4INT, [(0, 128, 128, 255)], 0, 255)
bars = parse_param(kw, Lexicon.VALUE, EnumConvertType.INT, 50, 1, 8192)
thick = parse_param(kw, Lexicon.THICK, EnumConvertType.FLOAT, 0.75, 0, 1)
wihi = parse_param(kw, Lexicon.WH, EnumConvertType.VEC2INT, (MIN_IMAGE_SIZE, MIN_IMAGE_SIZE), MIN_IMAGE_SIZE)
rgb_a = parse_param(kw, Lexicon.RGBA_A, EnumConvertType.VEC4INT, (196, 0, 196), 0, 255)
matte = parse_param(kw, Lexicon.MATTE, EnumConvertType.VEC4INT, (42, 12, 42), 0, 255)
params = list(zip_longest_fill(wave, bars, wihi, thick, rgb_a, matte))
images = []
pbar = ProgressBar(len(params))
Expand All @@ -108,8 +110,7 @@ def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor]:
if wave is None:
img = channel_solid(width, height, matte, EnumImageType.BGRA)
else:
img = graph_sausage(wave, bars, width, height, thickness=thick, color_line=rgb_a, color_back=matte)
img = cv2tensor_full(img)
images.append(img)
img = graph_sausage(wave[0], bars, width, height, thickness=thick, color_line=rgb_a, color_back=matte)
images.append(cv2tensor_full(img))
pbar.update_absolute(idx)
return list(zip(*images))
return [torch.stack(i, dim=0).squeeze(1) for i in list(zip(*images))]
49 changes: 27 additions & 22 deletions core/compose.py
Original file line number Diff line number Diff line change
Expand Up @@ -292,7 +292,7 @@ def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor]:
pbar = ProgressBar(len(params))
for idx, (r, g, b, a, mode, wihi, sample, matte) in enumerate(params):
w, h = wihi
ret = [channel_solid(w, h, chan=EnumImageType.GRAYSCALE) if x is None else image_grayscale(tensor2cv(x)) for x in (r, g, b, a)]
ret = [channel_solid(w, h, chan=EnumImageType.GRAYSCALE) if x is None else image_grayscale(tensor2cv(x)) for x in (b, g, r, a)]
h, w = ret[0].shape[:2]
ret = [cv2.resize(r, (w, h)) for r in ret]
img = channel_merge(ret)
Expand Down Expand Up @@ -403,31 +403,36 @@ def INPUT_TYPES(cls) -> dict:
return Lexicon._parse(d, cls)

def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor]:
pA = []
pA.extend([r for r in parse_dynamic(kw, Lexicon.PIXEL, EnumConvertType.IMAGE, None)])
ret = parse_dynamic(kw, Lexicon.PIXEL, EnumConvertType.IMAGE, None)
images = []
for i in ret:
images.extend(i)
if len(images) == 0:
logger.warning("no images to stack")
return
axis = parse_param(kw, Lexicon.AXIS, EnumConvertType.STRING, EnumOrientation.GRID.name)
stride = parse_param(kw, Lexicon.STEP, EnumConvertType.INT, 1)
mode = parse_param(kw, Lexicon.MODE, EnumConvertType.STRING, EnumScaleMode.NONE.name)
wihi = parse_param(kw, Lexicon.WH, EnumConvertType.VEC2INT, [(MIN_IMAGE_SIZE, MIN_IMAGE_SIZE)], MIN_IMAGE_SIZE)
sample = parse_param(kw, Lexicon.SAMPLE, EnumConvertType.STRING, EnumInterpolation.LANCZOS4.name)
matte = parse_param(kw, Lexicon.MATTE, EnumConvertType.VEC4INT, [(0, 0, 0, 255)], 0, 255)
axis = parse_param(kw, Lexicon.AXIS, EnumConvertType.STRING, EnumOrientation.GRID.name)[0]
stride = parse_param(kw, Lexicon.STEP, EnumConvertType.INT, 1, 1)[0]
mode = parse_param(kw, Lexicon.MODE, EnumConvertType.STRING, EnumScaleMode.NONE.name)[0]
wihi = parse_param(kw, Lexicon.WH, EnumConvertType.VEC2INT, (MIN_IMAGE_SIZE, MIN_IMAGE_SIZE), MIN_IMAGE_SIZE)[0]
sample = parse_param(kw, Lexicon.SAMPLE, EnumConvertType.STRING, EnumInterpolation.LANCZOS4.name)[0]
matte = parse_param(kw, Lexicon.MATTE, EnumConvertType.VEC4INT, (0, 0, 0, 255), 0, 255)[0]
images = [tensor2cv(img) for img in images]
params = list(zip_longest_fill(axis, stride, mode, wihi, sample, matte))
images = []
pbar = ProgressBar(len(params))
for idx, (mode, wihi, sample, matte) in enumerate(params):
axis = EnumOrientation[axis]
img = image_stack(pA, axis, stride, matte)
w, h = wihi
mode = EnumScaleMode[mode]
if mode != EnumScaleMode.NONE:
sample = EnumInterpolation[sample]
img = image_scalefit(img, w, h, mode, sample)
images.append(cv2tensor_full(img, matte))
pbar.update_absolute(idx)
#params = list(zip_longest_fill(axis, stride, mode, wihi, sample, matte))
#images = []
#pbar = ProgressBar(len(params))
#print(params)
#for idx, (axis, stride, mode, wihi, sample, matte) in enumerate(params):

axis = EnumOrientation[axis]
img = image_stack(images, axis, stride, matte)
w, h = wihi
mode = EnumScaleMode[mode]
if mode != EnumScaleMode.NONE:
sample = EnumInterpolation[sample]
img = image_scalefit(img, w, h, mode, sample)
# images.append(cv2tensor_full(img, matte))
return cv2tensor_full(img, matte)
#pbar.update_absolute(idx)
return [torch.stack(i, dim=0).squeeze(1) for i in list(zip(*images))]

class CropNode(JOVBaseNode):
Expand Down
17 changes: 12 additions & 5 deletions core/create.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@

from Jovimetrix.sup.image import EnumScaleMode, channel_solid, cv2tensor, cv2tensor_full, \
image_grayscale, image_invert, image_mask_add, \
image_rotate, image_scalefit, image_transform, image_translate, pil2cv, \
image_rotate, image_scalefit, image_stereogram, image_transform, image_translate, pil2cv, \
pixel_eval, tensor2cv, shape_ellipse, shape_polygon, shape_quad, \
EnumInterpolation, EnumEdge, EnumImageType, MIN_IMAGE_SIZE

Expand Down Expand Up @@ -188,6 +188,8 @@ class TextNode(JOVBaseNode):
FONTS = font_names()
FONT_NAMES = sorted(FONTS.keys())
DESCRIPTION = """
☣️💣☣️💣☣️💣☣️💣 THIS NODE IS A WORK IN PROGRESS ☣️💣☣️💣☣️💣☣️💣
The Text Generation node generates images containing text based on user-defined parameters such as font, size, alignment, color, and position. Users can input custom text messages, select fonts from a list of available options, adjust font size, and specify the alignment and justification of the text. Additionally, the node provides options for auto-sizing text to fit within specified dimensions, controlling letter-by-letter rendering, and applying edge effects such as clipping and inversion.
"""

Expand Down Expand Up @@ -312,6 +314,7 @@ def INPUT_TYPES(cls) -> dict:
Lexicon.NOISE: ("FLOAT", {"default": 0.33, "min": 0, "max": 1, "step": 0.01}),
Lexicon.GAMMA: ("FLOAT", {"default": 0.33, "min": 0, "max": 1, "step": 0.01}),
Lexicon.SHIFT: ("FLOAT", {"default": 1., "min": -1, "max": 1, "step": 0.01}),
Lexicon.INVERT: ("BOOLEAN", {"default": False}),
}}
return Lexicon._parse(d, cls)

Expand All @@ -322,13 +325,17 @@ def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor]:
noise = parse_param(kw, Lexicon.NOISE, EnumConvertType.FLOAT, 1, 0)
gamma = parse_param(kw, Lexicon.GAMMA, EnumConvertType.FLOAT, 1, 0)
shift = parse_param(kw, Lexicon.SHIFT, EnumConvertType.FLOAT, 0, 1, -1)
params = list(zip_longest_fill(pA, depth, divisions, noise, gamma, shift))
invert = parse_param(kw, Lexicon.INVERT, EnumConvertType.BOOLEAN, False)
params = list(zip_longest_fill(pA, depth, divisions, noise, gamma, shift, invert))
images = []
pbar = ProgressBar(len(params))
for idx, (pA, depth, divisions, noise, gamma, shift) in enumerate(params):
pA = tensor2cv(pA) if pA is not None else channel_solid(chan=EnumImageType.BGRA)
for idx, (pA, depth, divisions, noise, gamma, shift, invert) in enumerate(params):
pA = channel_solid(chan=EnumImageType.BGRA) if pA is None else tensor2cv(pA)
h, w = pA.shape[:2]
depth = tensor2cv(depth) if depth is not None else channel_solid(w, h, chan=EnumImageType.BGRA)
depth = channel_solid(w, h, chan=EnumImageType.BGRA) if depth is None else tensor2cv(depth)
if invert:
depth = image_invert(depth, 1.0)
pA = image_stereogram(pA, depth, divisions, noise, gamma, shift)
images.append(cv2tensor_full(pA))
pbar.update_absolute(idx)
return [torch.stack(i, dim=0).squeeze(1) for i in list(zip(*images))]
Expand Down
10 changes: 4 additions & 6 deletions core/device_stream.py
Original file line number Diff line number Diff line change
Expand Up @@ -375,12 +375,10 @@ def run(self, **kw) -> Tuple[torch.Tensor]:
for img in images:
# loop_time = time.perf_counter_ns()
w, h = wihi
img = tensor2cv(img) if img is not None else channel_solid(w, h, chan=EnumImageType.BGRA)
img = channel_solid(w, h, chan=EnumImageType.BGRA) if img is None else tensor2cv(img)
img = image_scalefit(img, w, h, mode, sample, matte)
# results.append(cv2tensor(img))
img[:, :, [0, 2]] = img[:, :, [2, 0]]
if len(img.shape) > 2 and img.shape[2] > 2:
img[:, :, [0, 2]] = img[:, :, [2, 0]]
self.__sender.frame = img
# delta = max(0, delta_desired - (time.perf_counter_ns() - loop_time))
# time.sleep(delta)
pbar.update_absolute(idx)
return () # [torch.stack(results, dim=0).squeeze(1)]
return ()
19 changes: 12 additions & 7 deletions core/utility.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
from comfy.utils import ProgressBar
from folder_paths import get_output_directory

from Jovimetrix import comfy_message, parse_reset, JOVBaseNode, \
from Jovimetrix import ROOT_COMFY, comfy_message, parse_reset, JOVBaseNode, \
WILDCARD, ROOT

from Jovimetrix.sup.lexicon import Lexicon
Expand Down Expand Up @@ -291,7 +291,7 @@ def process(q_data: str) -> Tuple[torch.Tensor, torch.Tensor] | str | dict:
self.__q = None
self.__index = 0

if (new_val := parse_param(kw, Lexicon.VALUE, EnumConvertType.INT, self.__index))[0] > 0:
if (new_val := parse_param(kw, Lexicon.VALUE, EnumConvertType.INT, self.__index)[0]) > 0:
self.__index = new_val

if self.__q is None:
Expand Down Expand Up @@ -437,6 +437,8 @@ class ImageDiffNode(JOVBaseNode):
RETURN_NAMES = (Lexicon.IN_A, Lexicon.IN_B, Lexicon.DIFF, Lexicon.THRESHOLD)
SORT = 90
DESCRIPTION = """
☣️💣☣️💣☣️💣☣️💣 THIS NODE IS A WORK IN PROGRESS ☣️💣☣️💣☣️💣☣️💣
The Image Diff node compares two input images pixel by pixel to identify differences between them. It takes two images as input, labeled as Image A and Image B. The node then calculates the absolute difference between the two images, producing two additional outputs: a difference mask and a threshold mask. The threshold parameter determines the sensitivity of the comparison, with higher values indicating more tolerance for differences. The node returns Image A, Image B, the difference mask, and the threshold mask.
"""

Expand Down Expand Up @@ -607,9 +609,10 @@ class RouteNode(JOVBaseNode):
RETURN_TYPES = ()
SORT = 900
DESCRIPTION = """
☣️💣☣️💣☣️💣☣️💣 THIS NODE IS A WORK IN PROGRESS ☣️💣☣️💣☣️💣☣️💣
Routes the input data from the optional input ports to the output port, preserving the order of inputs. The `PASS_IN` optional input is directly passed through to the output, while other optional inputs are collected and returned as tuples, preserving the order of insertion.
"""
CATEGORY = "JOVIMETRIX 🔺🟩🔵/WIP ☣️💣"

@classmethod
def INPUT_TYPES(cls) -> dict:
Expand Down Expand Up @@ -641,7 +644,7 @@ def INPUT_TYPES(cls) -> None:
"fname": ("STRING", {"default": "output", "dynamicPrompts":False}),
"metadata": ("JSON", {}),
"usermeta": ("STRING", {"multiline": True, "dynamicPrompts":False,
"default": json.dumps({"extra": "data"})}),
"default": ""}),
},
"hidden": {
"prompt": "PROMPT",
Expand All @@ -663,11 +666,12 @@ def run(self, **kw) -> dict[str, Any]:
if image is None:
logger.warning("no image")
image = torch.zeros((32, 32, 4), dtype=torch.uint8, device="cpu")

try:
if not isinstance(usermeta, (dict,)):
usermeta = json.loads(usermeta)
metadata.update(usermeta)
except json.decoder.JSONDecodeError:
pass
except Exception as e:
logger.error(e)
logger.error(usermeta)
Expand All @@ -683,13 +687,14 @@ def run(self, **kw) -> dict[str, Any]:
except Exception as e:
logger.error(e)
logger.error(x)

if path == "" or path is None:
path = get_output_directory()
root = Path(path)
if not root.exists():
root = Path(get_output_directory())
root.mkdir(parents=True, exist_ok=True)
fname = (root / fname).with_suffix(".png")
logger.info(fname)
logger.info(f"wrote file: {fname}")
image.save(fname, pnginfo=meta_png)
pbar.update_absolute(idx)
return ()
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
[project]
name = "jovimetrix"
description = "Compose like Substance Designer. Webcams, Media Streams (in/out), Tick animation, Color correction, Geometry manipulation, Pixel shader, Polygonal shape generator, Remap images gometry and color, Heavily inspired by WAS and MTB Node Suites."
version = "1.0.2"
version = "1.0.3"
license = "LICENSE"
dependencies = ["torch", "numpy", "matplotlib", "opencv-contrib-python", "ffmpeg-python", "librosa", "loguru", "moderngl", "mss", "requests", "Pillow", "pywin32==306; platform_system==\"Windows\"", "scikit-image", "blendmodes", "mido[ports-rtmidi]", "pyaudio", "daltonlens", "numba", "PyOpenGL", "PyOpenGL-accelerate", "SpoutGL; platform_system==\"Windows\"", "vnoise", "stereoscopy[auto_align]", "aenum<4,>=3.1.15"]

Expand Down
8 changes: 5 additions & 3 deletions sup/audio.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@

from loguru import logger

from Jovimetrix.sup.image import EnumScaleMode, image_scalefit, pil2cv, TYPE_PIXEL
from Jovimetrix.sup.image import EnumImageType, EnumScaleMode, image_scalefit, pil2cv, TYPE_PIXEL, pixel_convert, pixel_eval

# =============================================================================

Expand Down Expand Up @@ -70,14 +70,16 @@ def graph_sausage(data: np.ndarray, bar_count:int, width:int, height:int,
highest_line = max_array.max()
line_width = (width + bar_count) // bar_count
line_ratio = highest_line / height
image = Image.new('RGBA', (bar_count * line_width, height), color_back)
color_line = pixel_eval(color_line, EnumImageType.BGR)
color_back = pixel_eval(color_back, EnumImageType.BGR)
image = Image.new('RGBA', (bar_count * line_width, height), color_line)
draw = ImageDraw.Draw(image)
for i, item in enumerate(max_array):
item_height = item / line_ratio
current_x = int((i + offset) * line_width)
current_y = int((height - item_height) / 2)
draw.line((current_x, current_y, current_x, current_y + item_height),
fill=color_line, width=int(thickness * line_width))
fill=color_back, width=int(thickness * line_width))
image = pil2cv(image)
return image_scalefit(image, width, height, EnumScaleMode.FIT)

Expand Down
1 change: 1 addition & 0 deletions sup/image.py
Original file line number Diff line number Diff line change
Expand Up @@ -1303,6 +1303,7 @@ def image_stack(images: List[TYPE_IMAGE], axis:EnumOrientation=EnumOrientation.H
stride = np.ceil(np.sqrt(count))
stride = int(stride)
stride = min(stride, count)
stride = max(stride, 1)

rows = []
for i in range(0, count, stride):
Expand Down
11 changes: 10 additions & 1 deletion sup/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,16 @@ def parse_value(val:Any, typ:EnumConvertType, default: Any,
new_val.append(v)
new_val = new_val[0] if size == 1 else tuple(new_val)
elif typ == EnumConvertType.DICT:
new_val = {i: v for i, v in enumerate(new_val)}
try:
if isinstance(new_val, (str,)):
try:
new_val = json.loads(new_val)
except json.decoder.JSONDecodeError:
new_val = {}
else:
new_val = {i: v for i, v in enumerate(new_val)}
except Exception as e:
logger.exception(e)
elif typ == EnumConvertType.LIST:
new_val = list(new_val)
elif typ == EnumConvertType.STRING:
Expand Down

0 comments on commit 85e7480

Please sign in to comment.