diff --git a/core/calc.py b/core/calc.py index a5700c7..58f6a81 100644 --- a/core/calc.py +++ b/core/calc.py @@ -462,9 +462,9 @@ def run(self, **kw) -> Tuple[Any, Any]: # make sure we only interpolate between the longest "stride" we can size = min(3, max(len(A), len(B))) best_type = [EnumConvertType.FLOAT, EnumConvertType.VEC2, EnumConvertType.VEC3, EnumConvertType.VEC4][size] - A = parse_param(A, best_type, A) - B = parse_param(B, best_type, B) - alpha = parse_param(alpha, best_type, [alpha]) + A = parse_value(A, best_type, A) + B = parse_value(B, best_type, B) + alpha = parse_value(alpha, best_type, [alpha]) if op == "NONE": val = [B[x] * alpha[x] + A[x] * (1 - alpha[x]) for x in range(size)] else: diff --git a/core/create.py b/core/create.py index c551999..e893b31 100644 --- a/core/create.py +++ b/core/create.py @@ -347,7 +347,7 @@ def INPUT_TYPES(cls) -> dict: def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor]: pA = parse_param(kw, Lexicon.PIXEL, EnumConvertType.IMAGE, None) - baseline = parse_param(kw, Lexicon.INT, 1, 0.1, EnumConvertType.FLOAT) + baseline = parse_param(kw, Lexicon.INT, EnumConvertType.FLOAT, 0, 0.1, 1) focal_length = parse_dynamic(Lexicon.VALUE, kw, EnumConvertType.FLOAT) images = [] params = list(zip_longest_fill(pA, baseline, focal_length)) diff --git a/core/utility.py b/core/utility.py index c19580a..0f8741e 100644 --- a/core/utility.py +++ b/core/utility.py @@ -373,10 +373,10 @@ def run(self, **kw) -> None: format = parse_param(kw, Lexicon.FORMAT, EnumConvertType.STRING, "gif")[0] overwrite = parse_param(kw, Lexicon.OVERWRITE, EnumConvertType.BOOLEAN, False)[0] optimize = parse_param(kw, Lexicon.OPTIMIZE, EnumConvertType.BOOLEAN, False)[0] - quality = parse_param(kw, Lexicon.QUALITY, 100, 0, EnumConvertType.INT, 1)[0] - motion = parse_param(kw, Lexicon.QUALITY_M, 100, 0, EnumConvertType.INT, 1)[0] - fps = parse_param(kw, Lexicon.FPS, 60, 24, EnumConvertType.INT, 1)[0] - loop = parse_param(kw, Lexicon.LOOP, 0, 0, EnumConvertType.INT)[0] + quality = parse_param(kw, Lexicon.QUALITY, EnumConvertType.INT, 100, 0, 1)[0] + motion = parse_param(kw, Lexicon.QUALITY_M, EnumConvertType.INT, 100, 0, 1)[0] + fps = parse_param(kw, Lexicon.FPS, EnumConvertType.INT, 60, 1, 24)[0] + loop = parse_param(kw, Lexicon.LOOP, EnumConvertType.INT, 0, 0)[0] output_dir = Path(output_dir) output_dir.mkdir(parents=True, exist_ok=True) @@ -538,7 +538,7 @@ def run(self, **kw) -> Tuple[int, list]: latents.append(False) if mode == EnumBatchMode.PICK: - index = parse_param(kw, Lexicon.BATCH_CHUNK, 0, 0, EnumConvertType.INT) + index = parse_param(kw, Lexicon.BATCH_CHUNK, EnumConvertType.INT, 0, 0) index = index if index < len(extract) else -1 extract = [extract[index]] if latents[index]: @@ -564,7 +564,7 @@ def run(self, **kw) -> Tuple[int, list]: if latents[idx]: extract = {"samples": extract} elif mode == EnumBatchMode.INDEX_LIST: - indices = parse_param(kw, Lexicon.STRING, ", "", EnumConvertType.STRING).split(") + indices = parse_param(kw, Lexicon.STRING, EnumConvertType.STRING, "") data = [extract[i:j] for i, j in zip([0]+indices, indices+[None])] latents = [latents[i:j] for i, j in zip([0]+indices, indices+[None])] extract = [] diff --git a/pyproject.toml b/pyproject.toml index acd550e..5d23e75 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,7 @@ [project] name = "jovimetrix" description = "Compose like Substance Designer. Webcams, Media Streams (in/out), Tick animation, Color correction, Geometry manipulation, Pixel shader, Polygonal shape generator, Remap images gometry and color, Heavily inspired by WAS and MTB Node Suites." -version = "1.0.0" +version = "1.0.1" license = "LICENSE" dependencies = ["torch", "numpy", "matplotlib", "opencv-contrib-python", "ffmpeg-python", "librosa", "loguru", "moderngl", "mss", "requests", "Pillow", "pywin32==306; platform_system==\"Windows\"", "scikit-image", "blendmodes", "mido[ports-rtmidi]", "pyaudio", "daltonlens", "numba", "PyOpenGL", "PyOpenGL-accelerate", "SpoutGL; platform_system==\"Windows\"", "vnoise", "stereoscopy[auto_align]", "aenum<4,>=3.1.15"]