diff --git a/.gitattributes b/.gitattributes index f82e402795c9628aabf04940aded23562f0ba2d4..f1b6bb27bd211f9215743ebe24ed701271308ca2 100644 --- a/.gitattributes +++ b/.gitattributes @@ -35,3 +35,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text input/tmpip5bak5z.png filter=lfs diff=lfs merge=lfs -text input/tmpvhslhwc_.png filter=lfs diff=lfs merge=lfs -text +custom_nodes/comfyui-kjnodes/docs/images/2024-04-03_20_49_29-ComfyUI.png filter=lfs diff=lfs merge=lfs -text +custom_nodes/comfyui-kjnodes/fonts/FreeMono.ttf filter=lfs diff=lfs merge=lfs -text +custom_nodes/comfyui-kjnodes/fonts/FreeMonoBoldOblique.otf filter=lfs diff=lfs merge=lfs -text +custom_nodes/comfyui-kjnodes/fonts/TTNorms-Black.otf filter=lfs diff=lfs merge=lfs -text diff --git a/custom_nodes/.DS_Store b/custom_nodes/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..cd8407d0e1dd11f197b605bbbecd44e7d9fddded Binary files /dev/null and b/custom_nodes/.DS_Store differ diff --git a/custom_nodes/ComfyUI-GGUF/.DS_Store b/custom_nodes/ComfyUI-GGUF/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..38734ca2de71d90578b12a191d5ff30a57f26d5c Binary files /dev/null and b/custom_nodes/ComfyUI-GGUF/.DS_Store differ diff --git a/custom_nodes/ComfyUI-GGUF/.github/workflows/registry.yaml b/custom_nodes/ComfyUI-GGUF/.github/workflows/registry.yaml new file mode 100644 index 0000000000000000000000000000000000000000..aaed69c2aa9af79a0c9a9459d971cd36bcfddc6e --- /dev/null +++ b/custom_nodes/ComfyUI-GGUF/.github/workflows/registry.yaml @@ -0,0 +1,21 @@ +name: ComfyUI Registry publish +on: + workflow_dispatch: + push: + branches: + - stable + paths: + - "pyproject.toml" + +jobs: + publish-node: + name: ComfyUI Registry publish + runs-on: ubuntu-latest + if: github.event.repository.fork == false + steps: + - name: Check out code + uses: actions/checkout@v4 + - name: Publish Custom Node + uses: Comfy-Org/publish-node-action@main + with: + personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }} diff --git a/custom_nodes/ComfyUI-GGUF/.gitignore b/custom_nodes/ComfyUI-GGUF/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..44fe82fdbad306e07e27f784d6c906f0708956e2 --- /dev/null +++ b/custom_nodes/ComfyUI-GGUF/.gitignore @@ -0,0 +1,167 @@ +*.bin +*.gguf +*.safetensors +tools/llama.cpp* + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/latest/usage/project/#working-with-version-control +.pdm.toml +.pdm-python +.pdm-build/ + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ diff --git a/custom_nodes/ComfyUI-GGUF/.tracking b/custom_nodes/ComfyUI-GGUF/.tracking new file mode 100644 index 0000000000000000000000000000000000000000..73b53b894d5e4f9e26bb1f191114e7222afcb156 --- /dev/null +++ b/custom_nodes/ComfyUI-GGUF/.tracking @@ -0,0 +1,17 @@ +.github/workflows/registry.yaml +.gitignore +LICENSE +README.md +__init__.py +dequant.py +loader.py +nodes.py +ops.py +pyproject.toml +requirements.txt +tools/README.md +tools/convert.py +tools/fix_5d_tensors.py +tools/fix_lines_ending.py +tools/lcpp.patch +tools/read_tensors.py \ No newline at end of file diff --git a/custom_nodes/ComfyUI-GGUF/LICENSE b/custom_nodes/ComfyUI-GGUF/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 --- /dev/null +++ b/custom_nodes/ComfyUI-GGUF/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/custom_nodes/ComfyUI-GGUF/README.md b/custom_nodes/ComfyUI-GGUF/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6915927e1b61cc3c1113732e4e581e2af627a791 --- /dev/null +++ b/custom_nodes/ComfyUI-GGUF/README.md @@ -0,0 +1,49 @@ +# ComfyUI-GGUF +GGUF Quantization support for native ComfyUI models + +This is currently very much WIP. These custom nodes provide support for model files stored in the GGUF format popularized by [llama.cpp](https://github.com/ggerganov/llama.cpp). + +While quantization wasn't feasible for regular UNET models (conv2d), transformer/DiT models such as flux seem less affected by quantization. This allows running it in much lower bits per weight variable bitrate quants on low-end GPUs. For further VRAM savings, a node to load a quantized version of the T5 text encoder is also included. + +![Comfy_Flux1_dev_Q4_0_GGUF_1024](https://github.com/user-attachments/assets/70d16d97-c522-4ef4-9435-633f128644c8) + +Note: The "Force/Set CLIP Device" is **NOT** part of this node pack. Do not install it if you only have one GPU. Do not set it to cuda:0 then complain about OOM errors if you do not undestand what it is for. There is not need to copy the workflow above, just use your own workflow and replace the stock "Load Diffusion Model" with the "Unet Loader (GGUF)" node. + +## Installation + +> [!IMPORTANT] +> Make sure your ComfyUI is on a recent-enough version to support custom ops when loading the UNET-only. + +To install the custom node normally, git clone this repository into your custom nodes folder (`ComfyUI/custom_nodes`) and install the only dependency for inference (`pip install --upgrade gguf`) + +``` +git clone https://github.com/city96/ComfyUI-GGUF +``` + +To install the custom node on a standalone ComfyUI release, open a CMD inside the "ComfyUI_windows_portable" folder (where your `run_nvidia_gpu.bat` file is) and use the following commands: + +``` +git clone https://github.com/city96/ComfyUI-GGUF ComfyUI/custom_nodes/ComfyUI-GGUF +.\python_embeded\python.exe -s -m pip install -r .\ComfyUI\custom_nodes\ComfyUI-GGUF\requirements.txt +``` + +On MacOS sequoia, torch 2.4.1 seems to be required, as 2.6.X nightly versions cause a "M1 buffer is not large enough" error. See [this issue](https://github.com/city96/ComfyUI-GGUF/issues/107) for more information/workarounds. + +## Usage + +Simply use the GGUF Unet loader found under the `bootleg` category. Place the .gguf model files in your `ComfyUI/models/unet` folder. + +LoRA loading is experimental but it should work with just the built-in LoRA loader node(s). + +Pre-quantized models: + +- [flux1-dev GGUF](https://huggingface.co/city96/FLUX.1-dev-gguf) +- [flux1-schnell GGUF](https://huggingface.co/city96/FLUX.1-schnell-gguf) +- [stable-diffusion-3.5-large GGUF](https://huggingface.co/city96/stable-diffusion-3.5-large-gguf) +- [stable-diffusion-3.5-large-turbo GGUF](https://huggingface.co/city96/stable-diffusion-3.5-large-turbo-gguf) + +Initial support for quantizing T5 has also been added recently, these can be used using the various `*CLIPLoader (gguf)` nodes which can be used inplace of the regular ones. For the CLIP model, use whatever model you were using before for CLIP. The loader can handle both types of files - `gguf` and regular `safetensors`/`bin`. + +- [t5_v1.1-xxl GGUF](https://huggingface.co/city96/t5-v1_1-xxl-encoder-gguf) + +See the instructions in the [tools](https://github.com/city96/ComfyUI-GGUF/tree/main/tools) folder for how to create your own quants. diff --git a/custom_nodes/ComfyUI-GGUF/__init__.py b/custom_nodes/ComfyUI-GGUF/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a03726e3b0a08957ded67cdd21beb9544a3f6e4d --- /dev/null +++ b/custom_nodes/ComfyUI-GGUF/__init__.py @@ -0,0 +1,9 @@ +# only import if running as a custom node +try: + import comfy.utils +except ImportError: + pass +else: + from .nodes import NODE_CLASS_MAPPINGS + NODE_DISPLAY_NAME_MAPPINGS = {k:v.TITLE for k,v in NODE_CLASS_MAPPINGS.items()} + __all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS'] diff --git a/custom_nodes/ComfyUI-GGUF/dequant.py b/custom_nodes/ComfyUI-GGUF/dequant.py new file mode 100644 index 0000000000000000000000000000000000000000..9e545b72c9bc2209c5ee0ae1cc687a905f4a3dc8 --- /dev/null +++ b/custom_nodes/ComfyUI-GGUF/dequant.py @@ -0,0 +1,248 @@ +# (c) City96 || Apache-2.0 (apache.org/licenses/LICENSE-2.0) +import gguf +import torch +from tqdm import tqdm + + +TORCH_COMPATIBLE_QTYPES = (None, gguf.GGMLQuantizationType.F32, gguf.GGMLQuantizationType.F16) + +def is_torch_compatible(tensor): + return tensor is None or getattr(tensor, "tensor_type", None) in TORCH_COMPATIBLE_QTYPES + +def is_quantized(tensor): + return not is_torch_compatible(tensor) + +def dequantize_tensor(tensor, dtype=None, dequant_dtype=None): + qtype = getattr(tensor, "tensor_type", None) + oshape = getattr(tensor, "tensor_shape", tensor.shape) + + if qtype in TORCH_COMPATIBLE_QTYPES: + return tensor.to(dtype) + elif qtype in dequantize_functions: + dequant_dtype = dtype if dequant_dtype == "target" else dequant_dtype + return dequantize(tensor.data, qtype, oshape, dtype=dequant_dtype).to(dtype) + else: + # this is incredibly slow + tqdm.write(f"Falling back to numpy dequant for qtype: {qtype}") + new = gguf.quants.dequantize(tensor.cpu().numpy(), qtype) + return torch.from_numpy(new).to(tensor.device, dtype=dtype) + +def dequantize(data, qtype, oshape, dtype=None): + """ + Dequantize tensor back to usable shape/dtype + """ + block_size, type_size = gguf.GGML_QUANT_SIZES[qtype] + dequantize_blocks = dequantize_functions[qtype] + + rows = data.reshape( + (-1, data.shape[-1]) + ).view(torch.uint8) + + n_blocks = rows.numel() // type_size + blocks = rows.reshape((n_blocks, type_size)) + blocks = dequantize_blocks(blocks, block_size, type_size, dtype) + return blocks.reshape(oshape) + +def to_uint32(x): + # no uint32 :( + x = x.view(torch.uint8).to(torch.int32) + return (x[:, 0] | x[:, 1] << 8 | x[:, 2] << 16 | x[:, 3] << 24).unsqueeze(1) + +def split_block_dims(blocks, *args): + n_max = blocks.shape[1] + dims = list(args) + [n_max - sum(args)] + return torch.split(blocks, dims, dim=1) + +# Full weights # +def dequantize_blocks_BF16(blocks, block_size, type_size, dtype=None): + return (blocks.view(torch.int16).to(torch.int32) << 16).view(torch.float32) + +# Legacy Quants # +def dequantize_blocks_Q8_0(blocks, block_size, type_size, dtype=None): + d, x = split_block_dims(blocks, 2) + d = d.view(torch.float16).to(dtype) + x = x.view(torch.int8) + return (d * x) + +def dequantize_blocks_Q5_1(blocks, block_size, type_size, dtype=None): + n_blocks = blocks.shape[0] + + d, m, qh, qs = split_block_dims(blocks, 2, 2, 4) + d = d.view(torch.float16).to(dtype) + m = m.view(torch.float16).to(dtype) + qh = to_uint32(qh) + + qh = qh.reshape((n_blocks, 1)) >> torch.arange(32, device=d.device, dtype=torch.int32).reshape(1, 32) + ql = qs.reshape((n_blocks, -1, 1, block_size // 2)) >> torch.tensor([0, 4], device=d.device, dtype=torch.uint8).reshape(1, 1, 2, 1) + qh = (qh & 1).to(torch.uint8) + ql = (ql & 0x0F).reshape((n_blocks, -1)) + + qs = (ql | (qh << 4)) + return (d * qs) + m + +def dequantize_blocks_Q5_0(blocks, block_size, type_size, dtype=None): + n_blocks = blocks.shape[0] + + d, qh, qs = split_block_dims(blocks, 2, 4) + d = d.view(torch.float16).to(dtype) + qh = to_uint32(qh) + + qh = qh.reshape(n_blocks, 1) >> torch.arange(32, device=d.device, dtype=torch.int32).reshape(1, 32) + ql = qs.reshape(n_blocks, -1, 1, block_size // 2) >> torch.tensor([0, 4], device=d.device, dtype=torch.uint8).reshape(1, 1, 2, 1) + + qh = (qh & 1).to(torch.uint8) + ql = (ql & 0x0F).reshape(n_blocks, -1) + + qs = (ql | (qh << 4)).to(torch.int8) - 16 + return (d * qs) + +def dequantize_blocks_Q4_1(blocks, block_size, type_size, dtype=None): + n_blocks = blocks.shape[0] + + d, m, qs = split_block_dims(blocks, 2, 2) + d = d.view(torch.float16).to(dtype) + m = m.view(torch.float16).to(dtype) + + qs = qs.reshape((n_blocks, -1, 1, block_size // 2)) >> torch.tensor([0, 4], device=d.device, dtype=torch.uint8).reshape(1, 1, 2, 1) + qs = (qs & 0x0F).reshape(n_blocks, -1) + + return (d * qs) + m + +def dequantize_blocks_Q4_0(blocks, block_size, type_size, dtype=None): + n_blocks = blocks.shape[0] + + d, qs = split_block_dims(blocks, 2) + d = d.view(torch.float16).to(dtype) + + qs = qs.reshape((n_blocks, -1, 1, block_size // 2)) >> torch.tensor([0, 4], device=d.device, dtype=torch.uint8).reshape((1, 1, 2, 1)) + qs = (qs & 0x0F).reshape((n_blocks, -1)).to(torch.int8) - 8 + return (d * qs) + +# K Quants # +QK_K = 256 +K_SCALE_SIZE = 12 + +def get_scale_min(scales): + n_blocks = scales.shape[0] + scales = scales.view(torch.uint8) + scales = scales.reshape((n_blocks, 3, 4)) + + d, m, m_d = torch.split(scales, scales.shape[-2] // 3, dim=-2) + + sc = torch.cat([d & 0x3F, (m_d & 0x0F) | ((d >> 2) & 0x30)], dim=-1) + min = torch.cat([m & 0x3F, (m_d >> 4) | ((m >> 2) & 0x30)], dim=-1) + + return (sc.reshape((n_blocks, 8)), min.reshape((n_blocks, 8))) + +def dequantize_blocks_Q6_K(blocks, block_size, type_size, dtype=None): + n_blocks = blocks.shape[0] + + ql, qh, scales, d, = split_block_dims(blocks, QK_K // 2, QK_K // 4, QK_K // 16) + + scales = scales.view(torch.int8).to(dtype) + d = d.view(torch.float16).to(dtype) + d = (d * scales).reshape((n_blocks, QK_K // 16, 1)) + + ql = ql.reshape((n_blocks, -1, 1, 64)) >> torch.tensor([0, 4], device=d.device, dtype=torch.uint8).reshape((1, 1, 2, 1)) + ql = (ql & 0x0F).reshape((n_blocks, -1, 32)) + qh = qh.reshape((n_blocks, -1, 1, 32)) >> torch.tensor([0, 2, 4, 6], device=d.device, dtype=torch.uint8).reshape((1, 1, 4, 1)) + qh = (qh & 0x03).reshape((n_blocks, -1, 32)) + q = (ql | (qh << 4)).to(torch.int8) - 32 + q = q.reshape((n_blocks, QK_K // 16, -1)) + + return (d * q).reshape((n_blocks, QK_K)) + +def dequantize_blocks_Q5_K(blocks, block_size, type_size, dtype=None): + n_blocks = blocks.shape[0] + + d, dmin, scales, qh, qs = split_block_dims(blocks, 2, 2, K_SCALE_SIZE, QK_K // 8) + + d = d.view(torch.float16).to(dtype) + dmin = dmin.view(torch.float16).to(dtype) + + sc, m = get_scale_min(scales) + + d = (d * sc).reshape((n_blocks, -1, 1)) + dm = (dmin * m).reshape((n_blocks, -1, 1)) + + ql = qs.reshape((n_blocks, -1, 1, 32)) >> torch.tensor([0, 4], device=d.device, dtype=torch.uint8).reshape((1, 1, 2, 1)) + qh = qh.reshape((n_blocks, -1, 1, 32)) >> torch.tensor([i for i in range(8)], device=d.device, dtype=torch.uint8).reshape((1, 1, 8, 1)) + ql = (ql & 0x0F).reshape((n_blocks, -1, 32)) + qh = (qh & 0x01).reshape((n_blocks, -1, 32)) + q = (ql | (qh << 4)) + + return (d * q - dm).reshape((n_blocks, QK_K)) + +def dequantize_blocks_Q4_K(blocks, block_size, type_size, dtype=None): + n_blocks = blocks.shape[0] + + d, dmin, scales, qs = split_block_dims(blocks, 2, 2, K_SCALE_SIZE) + d = d.view(torch.float16).to(dtype) + dmin = dmin.view(torch.float16).to(dtype) + + sc, m = get_scale_min(scales) + + d = (d * sc).reshape((n_blocks, -1, 1)) + dm = (dmin * m).reshape((n_blocks, -1, 1)) + + qs = qs.reshape((n_blocks, -1, 1, 32)) >> torch.tensor([0, 4], device=d.device, dtype=torch.uint8).reshape((1, 1, 2, 1)) + qs = (qs & 0x0F).reshape((n_blocks, -1, 32)) + + return (d * qs - dm).reshape((n_blocks, QK_K)) + +def dequantize_blocks_Q3_K(blocks, block_size, type_size, dtype=None): + n_blocks = blocks.shape[0] + + hmask, qs, scales, d = split_block_dims(blocks, QK_K // 8, QK_K // 4, 12) + d = d.view(torch.float16).to(dtype) + + lscales, hscales = scales[:, :8], scales[:, 8:] + lscales = lscales.reshape((n_blocks, 1, 8)) >> torch.tensor([0, 4], device=d.device, dtype=torch.uint8).reshape((1, 2, 1)) + lscales = lscales.reshape((n_blocks, 16)) + hscales = hscales.reshape((n_blocks, 1, 4)) >> torch.tensor([0, 2, 4, 6], device=d.device, dtype=torch.uint8).reshape((1, 4, 1)) + hscales = hscales.reshape((n_blocks, 16)) + scales = (lscales & 0x0F) | ((hscales & 0x03) << 4) + scales = (scales.to(torch.int8) - 32) + + dl = (d * scales).reshape((n_blocks, 16, 1)) + + ql = qs.reshape((n_blocks, -1, 1, 32)) >> torch.tensor([0, 2, 4, 6], device=d.device, dtype=torch.uint8).reshape((1, 1, 4, 1)) + qh = hmask.reshape(n_blocks, -1, 1, 32) >> torch.tensor([i for i in range(8)], device=d.device, dtype=torch.uint8).reshape((1, 1, 8, 1)) + ql = ql.reshape((n_blocks, 16, QK_K // 16)) & 3 + qh = (qh.reshape((n_blocks, 16, QK_K // 16)) & 1) ^ 1 + q = (ql.to(torch.int8) - (qh << 2).to(torch.int8)) + + return (dl * q).reshape((n_blocks, QK_K)) + +def dequantize_blocks_Q2_K(blocks, block_size, type_size, dtype=None): + n_blocks = blocks.shape[0] + + scales, qs, d, dmin = split_block_dims(blocks, QK_K // 16, QK_K // 4, 2) + d = d.view(torch.float16).to(dtype) + dmin = dmin.view(torch.float16).to(dtype) + + # (n_blocks, 16, 1) + dl = (d * (scales & 0xF)).reshape((n_blocks, QK_K // 16, 1)) + ml = (dmin * (scales >> 4)).reshape((n_blocks, QK_K // 16, 1)) + + shift = torch.tensor([0, 2, 4, 6], device=d.device, dtype=torch.uint8).reshape((1, 1, 4, 1)) + + qs = (qs.reshape((n_blocks, -1, 1, 32)) >> shift) & 3 + qs = qs.reshape((n_blocks, QK_K // 16, 16)) + qs = dl * qs - ml + + return qs.reshape((n_blocks, -1)) + +dequantize_functions = { + gguf.GGMLQuantizationType.BF16: dequantize_blocks_BF16, + gguf.GGMLQuantizationType.Q8_0: dequantize_blocks_Q8_0, + gguf.GGMLQuantizationType.Q5_1: dequantize_blocks_Q5_1, + gguf.GGMLQuantizationType.Q5_0: dequantize_blocks_Q5_0, + gguf.GGMLQuantizationType.Q4_1: dequantize_blocks_Q4_1, + gguf.GGMLQuantizationType.Q4_0: dequantize_blocks_Q4_0, + gguf.GGMLQuantizationType.Q6_K: dequantize_blocks_Q6_K, + gguf.GGMLQuantizationType.Q5_K: dequantize_blocks_Q5_K, + gguf.GGMLQuantizationType.Q4_K: dequantize_blocks_Q4_K, + gguf.GGMLQuantizationType.Q3_K: dequantize_blocks_Q3_K, + gguf.GGMLQuantizationType.Q2_K: dequantize_blocks_Q2_K, +} diff --git a/custom_nodes/ComfyUI-GGUF/loader.py b/custom_nodes/ComfyUI-GGUF/loader.py new file mode 100644 index 0000000000000000000000000000000000000000..fd35e13441d87c0f7a3637d6f0581d4e8f0fc1c1 --- /dev/null +++ b/custom_nodes/ComfyUI-GGUF/loader.py @@ -0,0 +1,353 @@ +# (c) City96 || Apache-2.0 (apache.org/licenses/LICENSE-2.0) +import warnings +import logging +import torch +import gguf +import re +import os + +from .ops import GGMLTensor +from .dequant import is_quantized, dequantize_tensor + +IMG_ARCH_LIST = {"flux", "sd1", "sdxl", "sd3", "aura", "hidream", "cosmos", "ltxv", "hyvid", "wan", "lumina2", "qwen_image"} +TXT_ARCH_LIST = {"t5", "t5encoder", "llama", "qwen2vl"} +VIS_TYPE_LIST = {"clip-vision"} + +def get_orig_shape(reader, tensor_name): + field_key = f"comfy.gguf.orig_shape.{tensor_name}" + field = reader.get_field(field_key) + if field is None: + return None + # Has original shape metadata, so we try to decode it. + if len(field.types) != 2 or field.types[0] != gguf.GGUFValueType.ARRAY or field.types[1] != gguf.GGUFValueType.INT32: + raise TypeError(f"Bad original shape metadata for {field_key}: Expected ARRAY of INT32, got {field.types}") + return torch.Size(tuple(int(field.parts[part_idx][0]) for part_idx in field.data)) + +def get_field(reader, field_name, field_type): + field = reader.get_field(field_name) + if field is None: + return None + elif field_type == str: + # extra check here as this is used for checking arch string + if len(field.types) != 1 or field.types[0] != gguf.GGUFValueType.STRING: + raise TypeError(f"Bad type for GGUF {field_name} key: expected string, got {field.types!r}") + return str(field.parts[field.data[-1]], encoding="utf-8") + elif field_type in [int, float, bool]: + return field_type(field.parts[field.data[-1]]) + else: + raise TypeError(f"Unknown field type {field_type}") + +def get_list_field(reader, field_name, field_type): + field = reader.get_field(field_name) + if field is None: + return None + elif field_type == str: + return tuple(str(field.parts[part_idx], encoding="utf-8") for part_idx in field.data) + elif field_type in [int, float, bool]: + return tuple(field_type(field.parts[part_idx][0]) for part_idx in field.data) + else: + raise TypeError(f"Unknown field type {field_type}") + +def gguf_sd_loader(path, handle_prefix="model.diffusion_model.", return_arch=False, is_text_model=False): + """ + Read state dict as fake tensors + """ + reader = gguf.GGUFReader(path) + + # filter and strip prefix + has_prefix = False + if handle_prefix is not None: + prefix_len = len(handle_prefix) + tensor_names = set(tensor.name for tensor in reader.tensors) + has_prefix = any(s.startswith(handle_prefix) for s in tensor_names) + + tensors = [] + for tensor in reader.tensors: + sd_key = tensor_name = tensor.name + if has_prefix: + if not tensor_name.startswith(handle_prefix): + continue + sd_key = tensor_name[prefix_len:] + tensors.append((sd_key, tensor)) + + # detect and verify architecture + compat = None + arch_str = get_field(reader, "general.architecture", str) + type_str = get_field(reader, "general.type", str) + if arch_str in [None, "pig"]: + if is_text_model: + raise ValueError(f"This text model is incompatible with llama.cpp!\nConsider using the safetensors version\n({path})") + compat = "sd.cpp" if arch_str is None else arch_str + # import here to avoid changes to convert.py breaking regular models + from .tools.convert import detect_arch + try: + arch_str = detect_arch(set(val[0] for val in tensors)).arch + except Exception as e: + raise ValueError(f"This model is not currently supported - ({e})") + elif arch_str not in TXT_ARCH_LIST and is_text_model: + if type_str not in VIS_TYPE_LIST: + raise ValueError(f"Unexpected text model architecture type in GGUF file: {arch_str!r}") + elif arch_str not in IMG_ARCH_LIST and not is_text_model: + raise ValueError(f"Unexpected architecture type in GGUF file: {arch_str!r}") + + if compat: + logging.warning(f"Warning: This gguf model file is loaded in compatibility mode '{compat}' [arch:{arch_str}]") + + # main loading loop + state_dict = {} + qtype_dict = {} + for sd_key, tensor in tensors: + tensor_name = tensor.name + # torch_tensor = torch.from_numpy(tensor.data) # mmap + + # NOTE: line above replaced with this block to avoid persistent numpy warning about mmap + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", message="The given NumPy array is not writable") + torch_tensor = torch.from_numpy(tensor.data) # mmap + + shape = get_orig_shape(reader, tensor_name) + if shape is None: + shape = torch.Size(tuple(int(v) for v in reversed(tensor.shape))) + # Workaround for stable-diffusion.cpp SDXL detection. + if compat == "sd.cpp" and arch_str == "sdxl": + if any([tensor_name.endswith(x) for x in (".proj_in.weight", ".proj_out.weight")]): + while len(shape) > 2 and shape[-1] == 1: + shape = shape[:-1] + + # add to state dict + if tensor.tensor_type in {gguf.GGMLQuantizationType.F32, gguf.GGMLQuantizationType.F16}: + torch_tensor = torch_tensor.view(*shape) + state_dict[sd_key] = GGMLTensor(torch_tensor, tensor_type=tensor.tensor_type, tensor_shape=shape) + + # keep track of loaded tensor types + tensor_type_str = getattr(tensor.tensor_type, "name", repr(tensor.tensor_type)) + qtype_dict[tensor_type_str] = qtype_dict.get(tensor_type_str, 0) + 1 + + # print loaded tensor type counts + logging.info("gguf qtypes: " + ", ".join(f"{k} ({v})" for k, v in qtype_dict.items())) + + # mark largest tensor for vram estimation + qsd = {k:v for k,v in state_dict.items() if is_quantized(v)} + if len(qsd) > 0: + max_key = max(qsd.keys(), key=lambda k: qsd[k].numel()) + state_dict[max_key].is_largest_weight = True + + if return_arch: + return (state_dict, arch_str) + return state_dict + +# for remapping llama.cpp -> original key names +T5_SD_MAP = { + "enc.": "encoder.", + ".blk.": ".block.", + "token_embd": "shared", + "output_norm": "final_layer_norm", + "attn_q": "layer.0.SelfAttention.q", + "attn_k": "layer.0.SelfAttention.k", + "attn_v": "layer.0.SelfAttention.v", + "attn_o": "layer.0.SelfAttention.o", + "attn_norm": "layer.0.layer_norm", + "attn_rel_b": "layer.0.SelfAttention.relative_attention_bias", + "ffn_up": "layer.1.DenseReluDense.wi_1", + "ffn_down": "layer.1.DenseReluDense.wo", + "ffn_gate": "layer.1.DenseReluDense.wi_0", + "ffn_norm": "layer.1.layer_norm", +} + +LLAMA_SD_MAP = { + "blk.": "model.layers.", + "attn_norm": "input_layernorm", + "attn_q": "self_attn.q_proj", + "attn_k": "self_attn.k_proj", + "attn_v": "self_attn.v_proj", + "attn_output": "self_attn.o_proj", + "ffn_up": "mlp.up_proj", + "ffn_down": "mlp.down_proj", + "ffn_gate": "mlp.gate_proj", + "ffn_norm": "post_attention_layernorm", + "token_embd": "model.embed_tokens", + "output_norm": "model.norm", + "output.weight": "lm_head.weight", +} + +CLIP_VISION_SD_MAP = { + "mm.": "visual.merger.mlp.", + "v.post_ln.": "visual.merger.ln_q.", + "v.patch_embd": "visual.patch_embed.proj", + "v.blk.": "visual.blocks.", + "ffn_up": "mlp.up_proj", + "ffn_down": "mlp.down_proj", + "ffn_gate": "mlp.gate_proj", + "attn_out.": "attn.proj.", + "ln1.": "norm1.", + "ln2.": "norm2.", +} + +def sd_map_replace(raw_sd, key_map): + sd = {} + for k,v in raw_sd.items(): + for s,d in key_map.items(): + k = k.replace(s,d) + sd[k] = v + return sd + +def llama_permute(raw_sd, n_head, n_head_kv): + # Reverse version of LlamaModel.permute in llama.cpp convert script + sd = {} + permute = lambda x,h: x.reshape(h, x.shape[0] // h // 2, 2, *x.shape[1:]).swapaxes(1, 2).reshape(x.shape) + for k,v in raw_sd.items(): + if k.endswith(("q_proj.weight", "q_proj.bias")): + v.data = permute(v.data, n_head) + if k.endswith(("k_proj.weight", "k_proj.bias")): + v.data = permute(v.data, n_head_kv) + sd[k] = v + return sd + +def strip_quant_suffix(name): + pattern = r"[-_]?(?:ud-)?i?q[0-9]_[a-z0-9_\-]{1,8}$" + match = re.search(pattern, name, re.IGNORECASE) + if match: + name = name[:match.start()] + return name + +def gguf_mmproj_loader(path): + # Reverse version of Qwen2VLVisionModel.modify_tensors + logging.info("Attenpting to find mmproj file for text encoder...") + + # get name to match w/o quant suffix + tenc_fname = os.path.basename(path) + tenc = os.path.splitext(tenc_fname)[0].lower() + tenc = strip_quant_suffix(tenc) + + # try and find matching mmproj + target = [] + root = os.path.dirname(path) + for fname in os.listdir(root): + name, ext = os.path.splitext(fname) + if ext.lower() != ".gguf": + continue + if "mmproj" not in name.lower(): + continue + if tenc in name.lower(): + target.append(fname) + + if len(target) == 0: + logging.error(f"Error: Can't find mmproj file for '{tenc_fname}' (matching:'{tenc}')! Qwen-Image-Edit will be broken!") + return {} + if len(target) > 1: + logging.error(f"Ambiguous mmproj for text encoder '{tenc_fname}', will use first match.") + + logging.info(f"Using mmproj '{target[0]}' for text encoder '{tenc_fname}'.") + target = os.path.join(root, target[0]) + vsd = gguf_sd_loader(target, is_text_model=True) + + # concat 4D to 5D + if "v.patch_embd.weight.1" in vsd: + w1 = dequantize_tensor(vsd.pop("v.patch_embd.weight"), dtype=torch.float32) + w2 = dequantize_tensor(vsd.pop("v.patch_embd.weight.1"), dtype=torch.float32) + vsd["v.patch_embd.weight"] = torch.stack([w1, w2], dim=2) + + # run main replacement + vsd = sd_map_replace(vsd, CLIP_VISION_SD_MAP) + + # handle split Q/K/V + if "visual.blocks.0.attn_q.weight" in vsd: + attns = {} + # filter out attentions + group + for k,v in vsd.items(): + if any(x in k for x in ["attn_q", "attn_k", "attn_v"]): + k_attn, k_name = k.rsplit(".attn_", 1) + k_attn += ".attn.qkv." + k_name.split(".")[-1] + if k_attn not in attns: + attns[k_attn] = {} + attns[k_attn][k_name] = dequantize_tensor( + v, dtype=(torch.bfloat16 if is_quantized(v) else torch.float16) + ) + + # recombine + for k,v in attns.items(): + suffix = k.split(".")[-1] + vsd[k] = torch.cat([ + v[f"q.{suffix}"], + v[f"k.{suffix}"], + v[f"v.{suffix}"], + ], dim=0) + del attns + + return vsd + +def gguf_tokenizer_loader(path, temb_shape): + # convert gguf tokenizer to spiece + logging.info("Attempting to recreate sentencepiece tokenizer from GGUF file metadata...") + try: + from sentencepiece import sentencepiece_model_pb2 as model + except ImportError: + raise ImportError("Please make sure sentencepiece and protobuf are installed.\npip install sentencepiece protobuf") + spm = model.ModelProto() + + reader = gguf.GGUFReader(path) + + if get_field(reader, "tokenizer.ggml.model", str) == "t5": + if temb_shape == (256384, 4096): # probably UMT5 + spm.trainer_spec.model_type == 1 # Unigram (do we have a T5 w/ BPE?) + else: + raise NotImplementedError("Unknown model, can't set tokenizer!") + else: + raise NotImplementedError("Unknown model, can't set tokenizer!") + + spm.normalizer_spec.add_dummy_prefix = get_field(reader, "tokenizer.ggml.add_space_prefix", bool) + spm.normalizer_spec.remove_extra_whitespaces = get_field(reader, "tokenizer.ggml.remove_extra_whitespaces", bool) + + tokens = get_list_field(reader, "tokenizer.ggml.tokens", str) + scores = get_list_field(reader, "tokenizer.ggml.scores", float) + toktypes = get_list_field(reader, "tokenizer.ggml.token_type", int) + + for idx, (token, score, toktype) in enumerate(zip(tokens, scores, toktypes)): + # # These aren't present in the original? + # if toktype == 5 and idx >= temb_shape[0]%1000): + # continue + + piece = spm.SentencePiece() + piece.piece = token + piece.score = score + piece.type = toktype + spm.pieces.append(piece) + + # unsure if any of these are correct + spm.trainer_spec.byte_fallback = True + spm.trainer_spec.vocab_size = len(tokens) # split off unused? + spm.trainer_spec.max_sentence_length = 4096 + spm.trainer_spec.eos_id = get_field(reader, "tokenizer.ggml.eos_token_id", int) + spm.trainer_spec.pad_id = get_field(reader, "tokenizer.ggml.padding_token_id", int) + + logging.info(f"Created tokenizer with vocab size of {len(spm.pieces)}") + del reader + return torch.ByteTensor(list(spm.SerializeToString())) + +def gguf_clip_loader(path): + sd, arch = gguf_sd_loader(path, return_arch=True, is_text_model=True) + if arch in {"t5", "t5encoder"}: + temb_key = "token_embd.weight" + if temb_key in sd and sd[temb_key].shape == (256384, 4096): + # non-standard Comfy-Org tokenizer + sd["spiece_model"] = gguf_tokenizer_loader(path, sd[temb_key].shape) + # TODO: dequantizing token embed here is janky but otherwise we OOM due to tensor being massive. + logging.warning(f"Dequantizing {temb_key} to prevent runtime OOM.") + sd[temb_key] = dequantize_tensor(sd[temb_key], dtype=torch.float16) + sd = sd_map_replace(sd, T5_SD_MAP) + elif arch in {"llama", "qwen2vl"}: + # TODO: pass model_options["vocab_size"] to loader somehow + temb_key = "token_embd.weight" + if temb_key in sd and sd[temb_key].shape[0] >= (64 * 1024): + # See note above for T5. + logging.warning(f"Dequantizing {temb_key} to prevent runtime OOM.") + sd[temb_key] = dequantize_tensor(sd[temb_key], dtype=torch.float16) + sd = sd_map_replace(sd, LLAMA_SD_MAP) + if arch == "llama": + sd = llama_permute(sd, 32, 8) # L3 + if arch == "qwen2vl": + vsd = gguf_mmproj_loader(path) + sd.update(vsd) + else: + pass + return sd diff --git a/custom_nodes/ComfyUI-GGUF/nodes.py b/custom_nodes/ComfyUI-GGUF/nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..415914234340c2eba5913bf1fdab934df72dc05f --- /dev/null +++ b/custom_nodes/ComfyUI-GGUF/nodes.py @@ -0,0 +1,305 @@ +# (c) City96 || Apache-2.0 (apache.org/licenses/LICENSE-2.0) +import torch +import logging +import collections + +import nodes +import comfy.sd +import comfy.lora +import comfy.float +import comfy.utils +import comfy.model_patcher +import comfy.model_management +import folder_paths + +from .ops import GGMLOps, move_patch_to_device +from .loader import gguf_sd_loader, gguf_clip_loader +from .dequant import is_quantized, is_torch_compatible + +def update_folder_names_and_paths(key, targets=[]): + # check for existing key + base = folder_paths.folder_names_and_paths.get(key, ([], {})) + base = base[0] if isinstance(base[0], (list, set, tuple)) else [] + # find base key & add w/ fallback, sanity check + warning + target = next((x for x in targets if x in folder_paths.folder_names_and_paths), targets[0]) + orig, _ = folder_paths.folder_names_and_paths.get(target, ([], {})) + folder_paths.folder_names_and_paths[key] = (orig or base, {".gguf"}) + if base and base != orig: + logging.warning(f"Unknown file list already present on key {key}: {base}") + +# Add a custom keys for files ending in .gguf +update_folder_names_and_paths("unet_gguf", ["diffusion_models", "unet"]) +update_folder_names_and_paths("clip_gguf", ["text_encoders", "clip"]) + +class GGUFModelPatcher(comfy.model_patcher.ModelPatcher): + patch_on_device = False + + def patch_weight_to_device(self, key, device_to=None, inplace_update=False): + if key not in self.patches: + return + weight = comfy.utils.get_attr(self.model, key) + + patches = self.patches[key] + if is_quantized(weight): + out_weight = weight.to(device_to) + patches = move_patch_to_device(patches, self.load_device if self.patch_on_device else self.offload_device) + # TODO: do we ever have legitimate duplicate patches? (i.e. patch on top of patched weight) + out_weight.patches = [(patches, key)] + else: + inplace_update = self.weight_inplace_update or inplace_update + if key not in self.backup: + self.backup[key] = collections.namedtuple('Dimension', ['weight', 'inplace_update'])( + weight.to(device=self.offload_device, copy=inplace_update), inplace_update + ) + + if device_to is not None: + temp_weight = comfy.model_management.cast_to_device(weight, device_to, torch.float32, copy=True) + else: + temp_weight = weight.to(torch.float32, copy=True) + + out_weight = comfy.lora.calculate_weight(patches, temp_weight, key) + out_weight = comfy.float.stochastic_rounding(out_weight, weight.dtype) + + if inplace_update: + comfy.utils.copy_to_param(self.model, key, out_weight) + else: + comfy.utils.set_attr_param(self.model, key, out_weight) + + def unpatch_model(self, device_to=None, unpatch_weights=True): + if unpatch_weights: + for p in self.model.parameters(): + if is_torch_compatible(p): + continue + patches = getattr(p, "patches", []) + if len(patches) > 0: + p.patches = [] + # TODO: Find another way to not unload after patches + return super().unpatch_model(device_to=device_to, unpatch_weights=unpatch_weights) + + mmap_released = False + def load(self, *args, force_patch_weights=False, **kwargs): + # always call `patch_weight_to_device` even for lowvram + super().load(*args, force_patch_weights=True, **kwargs) + + # make sure nothing stays linked to mmap after first load + if not self.mmap_released: + linked = [] + if kwargs.get("lowvram_model_memory", 0) > 0: + for n, m in self.model.named_modules(): + if hasattr(m, "weight"): + device = getattr(m.weight, "device", None) + if device == self.offload_device: + linked.append((n, m)) + continue + if hasattr(m, "bias"): + device = getattr(m.bias, "device", None) + if device == self.offload_device: + linked.append((n, m)) + continue + if linked and self.load_device != self.offload_device: + logging.info(f"Attempting to release mmap ({len(linked)})") + for n, m in linked: + # TODO: possible to OOM, find better way to detach + m.to(self.load_device).to(self.offload_device) + self.mmap_released = True + + def clone(self, *args, **kwargs): + src_cls = self.__class__ + self.__class__ = GGUFModelPatcher + n = super().clone(*args, **kwargs) + n.__class__ = GGUFModelPatcher + self.__class__ = src_cls + # GGUF specific clone values below + n.patch_on_device = getattr(self, "patch_on_device", False) + if src_cls != GGUFModelPatcher: + n.size = 0 # force recalc + return n + +class UnetLoaderGGUF: + @classmethod + def INPUT_TYPES(s): + unet_names = [x for x in folder_paths.get_filename_list("unet_gguf")] + return { + "required": { + "unet_name": (unet_names,), + } + } + + RETURN_TYPES = ("MODEL",) + FUNCTION = "load_unet" + CATEGORY = "bootleg" + TITLE = "Unet Loader (GGUF)" + + def load_unet(self, unet_name, dequant_dtype=None, patch_dtype=None, patch_on_device=None): + ops = GGMLOps() + + if dequant_dtype in ("default", None): + ops.Linear.dequant_dtype = None + elif dequant_dtype in ["target"]: + ops.Linear.dequant_dtype = dequant_dtype + else: + ops.Linear.dequant_dtype = getattr(torch, dequant_dtype) + + if patch_dtype in ("default", None): + ops.Linear.patch_dtype = None + elif patch_dtype in ["target"]: + ops.Linear.patch_dtype = patch_dtype + else: + ops.Linear.patch_dtype = getattr(torch, patch_dtype) + + # init model + unet_path = folder_paths.get_full_path("unet", unet_name) + sd = gguf_sd_loader(unet_path) + model = comfy.sd.load_diffusion_model_state_dict( + sd, model_options={"custom_operations": ops} + ) + if model is None: + logging.error("ERROR UNSUPPORTED UNET {}".format(unet_path)) + raise RuntimeError("ERROR: Could not detect model type of: {}".format(unet_path)) + model = GGUFModelPatcher.clone(model) + model.patch_on_device = patch_on_device + return (model,) + +class UnetLoaderGGUFAdvanced(UnetLoaderGGUF): + @classmethod + def INPUT_TYPES(s): + unet_names = [x for x in folder_paths.get_filename_list("unet_gguf")] + return { + "required": { + "unet_name": (unet_names,), + "dequant_dtype": (["default", "target", "float32", "float16", "bfloat16"], {"default": "default"}), + "patch_dtype": (["default", "target", "float32", "float16", "bfloat16"], {"default": "default"}), + "patch_on_device": ("BOOLEAN", {"default": False}), + } + } + TITLE = "Unet Loader (GGUF/Advanced)" + +class CLIPLoaderGGUF: + @classmethod + def INPUT_TYPES(s): + base = nodes.CLIPLoader.INPUT_TYPES() + return { + "required": { + "clip_name": (s.get_filename_list(),), + "type": base["required"]["type"], + } + } + + RETURN_TYPES = ("CLIP",) + FUNCTION = "load_clip" + CATEGORY = "bootleg" + TITLE = "CLIPLoader (GGUF)" + + @classmethod + def get_filename_list(s): + files = [] + files += folder_paths.get_filename_list("clip") + files += folder_paths.get_filename_list("clip_gguf") + return sorted(files) + + def load_data(self, ckpt_paths): + clip_data = [] + for p in ckpt_paths: + if p.endswith(".gguf"): + sd = gguf_clip_loader(p) + else: + sd = comfy.utils.load_torch_file(p, safe_load=True) + if "scaled_fp8" in sd: # NOTE: Scaled FP8 would require different custom ops, but only one can be active + raise NotImplementedError(f"Mixing scaled FP8 with GGUF is not supported! Use regular CLIP loader or switch model(s)\n({p})") + clip_data.append(sd) + return clip_data + + def load_patcher(self, clip_paths, clip_type, clip_data): + clip = comfy.sd.load_text_encoder_state_dicts( + clip_type = clip_type, + state_dicts = clip_data, + model_options = { + "custom_operations": GGMLOps, + "initial_device": comfy.model_management.text_encoder_offload_device() + }, + embedding_directory = folder_paths.get_folder_paths("embeddings"), + ) + clip.patcher = GGUFModelPatcher.clone(clip.patcher) + return clip + + def load_clip(self, clip_name, type="stable_diffusion"): + clip_path = folder_paths.get_full_path("clip", clip_name) + clip_type = getattr(comfy.sd.CLIPType, type.upper(), comfy.sd.CLIPType.STABLE_DIFFUSION) + return (self.load_patcher([clip_path], clip_type, self.load_data([clip_path])),) + +class DualCLIPLoaderGGUF(CLIPLoaderGGUF): + @classmethod + def INPUT_TYPES(s): + base = nodes.DualCLIPLoader.INPUT_TYPES() + file_options = (s.get_filename_list(), ) + return { + "required": { + "clip_name1": file_options, + "clip_name2": file_options, + "type": base["required"]["type"], + } + } + + TITLE = "DualCLIPLoader (GGUF)" + + def load_clip(self, clip_name1, clip_name2, type): + clip_path1 = folder_paths.get_full_path("clip", clip_name1) + clip_path2 = folder_paths.get_full_path("clip", clip_name2) + clip_paths = (clip_path1, clip_path2) + clip_type = getattr(comfy.sd.CLIPType, type.upper(), comfy.sd.CLIPType.STABLE_DIFFUSION) + return (self.load_patcher(clip_paths, clip_type, self.load_data(clip_paths)),) + +class TripleCLIPLoaderGGUF(CLIPLoaderGGUF): + @classmethod + def INPUT_TYPES(s): + file_options = (s.get_filename_list(), ) + return { + "required": { + "clip_name1": file_options, + "clip_name2": file_options, + "clip_name3": file_options, + } + } + + TITLE = "TripleCLIPLoader (GGUF)" + + def load_clip(self, clip_name1, clip_name2, clip_name3, type="sd3"): + clip_path1 = folder_paths.get_full_path("clip", clip_name1) + clip_path2 = folder_paths.get_full_path("clip", clip_name2) + clip_path3 = folder_paths.get_full_path("clip", clip_name3) + clip_paths = (clip_path1, clip_path2, clip_path3) + clip_type = getattr(comfy.sd.CLIPType, type.upper(), comfy.sd.CLIPType.STABLE_DIFFUSION) + return (self.load_patcher(clip_paths, clip_type, self.load_data(clip_paths)),) + +class QuadrupleCLIPLoaderGGUF(CLIPLoaderGGUF): + @classmethod + def INPUT_TYPES(s): + file_options = (s.get_filename_list(), ) + return { + "required": { + "clip_name1": file_options, + "clip_name2": file_options, + "clip_name3": file_options, + "clip_name4": file_options, + } + } + + TITLE = "QuadrupleCLIPLoader (GGUF)" + + def load_clip(self, clip_name1, clip_name2, clip_name3, clip_name4, type="stable_diffusion"): + clip_path1 = folder_paths.get_full_path("clip", clip_name1) + clip_path2 = folder_paths.get_full_path("clip", clip_name2) + clip_path3 = folder_paths.get_full_path("clip", clip_name3) + clip_path4 = folder_paths.get_full_path("clip", clip_name4) + clip_paths = (clip_path1, clip_path2, clip_path3, clip_path4) + clip_type = getattr(comfy.sd.CLIPType, type.upper(), comfy.sd.CLIPType.STABLE_DIFFUSION) + return (self.load_patcher(clip_paths, clip_type, self.load_data(clip_paths)),) + +NODE_CLASS_MAPPINGS = { + "UnetLoaderGGUF": UnetLoaderGGUF, + "CLIPLoaderGGUF": CLIPLoaderGGUF, + "DualCLIPLoaderGGUF": DualCLIPLoaderGGUF, + "TripleCLIPLoaderGGUF": TripleCLIPLoaderGGUF, + "QuadrupleCLIPLoaderGGUF": QuadrupleCLIPLoaderGGUF, + "UnetLoaderGGUFAdvanced": UnetLoaderGGUFAdvanced, +} diff --git a/custom_nodes/ComfyUI-GGUF/ops.py b/custom_nodes/ComfyUI-GGUF/ops.py new file mode 100644 index 0000000000000000000000000000000000000000..41d42f5549b8789dac470bc846651833b600056b --- /dev/null +++ b/custom_nodes/ComfyUI-GGUF/ops.py @@ -0,0 +1,281 @@ +# (c) City96 || Apache-2.0 (apache.org/licenses/LICENSE-2.0) +import gguf +import torch +import logging + +import comfy.ops +import comfy.lora +import comfy.model_management +from .dequant import dequantize_tensor, is_quantized + +def chained_hasattr(obj, chained_attr): + probe = obj + for attr in chained_attr.split('.'): + if hasattr(probe, attr): + probe = getattr(probe, attr) + else: + return False + return True + +# A bakcward and forward compatible way to get `torch.compiler.disable`. +def get_torch_compiler_disable_decorator(): + def dummy_decorator(*args, **kwargs): + def noop(x): + return x + return noop + + from packaging import version + + if not chained_hasattr(torch, "compiler.disable"): + logging.info("ComfyUI-GGUF: Torch too old for torch.compile - bypassing") + return dummy_decorator # torch too old + elif version.parse(torch.__version__) >= version.parse("2.8"): + logging.info("ComfyUI-GGUF: Allowing full torch compile") + return dummy_decorator # torch compile works + if chained_hasattr(torch, "_dynamo.config.nontraceable_tensor_subclasses"): + logging.info("ComfyUI-GGUF: Allowing full torch compile (nightly)") + return dummy_decorator # torch compile works, nightly before 2.8 release + else: + logging.info("ComfyUI-GGUF: Partial torch compile only, consider updating pytorch") + return torch.compiler.disable + +torch_compiler_disable = get_torch_compiler_disable_decorator() + +class GGMLTensor(torch.Tensor): + """ + Main tensor-like class for storing quantized weights + """ + def __init__(self, *args, tensor_type, tensor_shape, patches=[], **kwargs): + super().__init__() + self.tensor_type = tensor_type + self.tensor_shape = tensor_shape + self.patches = patches + + def __new__(cls, *args, tensor_type, tensor_shape, patches=[], **kwargs): + return super().__new__(cls, *args, **kwargs) + + def to(self, *args, **kwargs): + new = super().to(*args, **kwargs) + new.tensor_type = getattr(self, "tensor_type", None) + new.tensor_shape = getattr(self, "tensor_shape", new.data.shape) + new.patches = getattr(self, "patches", []).copy() + return new + + def clone(self, *args, **kwargs): + return self + + def detach(self, *args, **kwargs): + return self + + def copy_(self, *args, **kwargs): + # fixes .weight.copy_ in comfy/clip_model/CLIPTextModel + try: + return super().copy_(*args, **kwargs) + except Exception as e: + logging.warning(f"ignoring 'copy_' on tensor: {e}") + + def new_empty(self, size, *args, **kwargs): + # Intel Arc fix, ref#50 + new_tensor = super().new_empty(size, *args, **kwargs) + return GGMLTensor( + new_tensor, + tensor_type = getattr(self, "tensor_type", None), + tensor_shape = size, + patches = getattr(self, "patches", []).copy() + ) + + @property + def shape(self): + if not hasattr(self, "tensor_shape"): + self.tensor_shape = self.size() + return self.tensor_shape + +class GGMLLayer(torch.nn.Module): + """ + This (should) be responsible for de-quantizing on the fly + """ + comfy_cast_weights = True + dequant_dtype = None + patch_dtype = None + largest_layer = False + torch_compatible_tensor_types = {None, gguf.GGMLQuantizationType.F32, gguf.GGMLQuantizationType.F16} + + def is_ggml_quantized(self, *, weight=None, bias=None): + if weight is None: + weight = self.weight + if bias is None: + bias = self.bias + return is_quantized(weight) or is_quantized(bias) + + def _load_from_state_dict(self, state_dict, prefix, *args, **kwargs): + weight, bias = state_dict.get(f"{prefix}weight"), state_dict.get(f"{prefix}bias") + # NOTE: using modified load for linear due to not initializing on creation, see GGMLOps todo + if self.is_ggml_quantized(weight=weight, bias=bias) or isinstance(self, torch.nn.Linear): + return self.ggml_load_from_state_dict(state_dict, prefix, *args, **kwargs) + # Not strictly required, but fixes embedding shape mismatch. Threshold set in loader.py + if isinstance(self, torch.nn.Embedding) and self.weight.shape[0] >= (64 * 1024): + return self.ggml_load_from_state_dict(state_dict, prefix, *args, **kwargs) + return super()._load_from_state_dict(state_dict, prefix, *args, **kwargs) + + def ggml_load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): + prefix_len = len(prefix) + for k,v in state_dict.items(): + if k[prefix_len:] == "weight": + self.weight = torch.nn.Parameter(v, requires_grad=False) + elif k[prefix_len:] == "bias" and v is not None: + self.bias = torch.nn.Parameter(v, requires_grad=False) + else: + unexpected_keys.append(k) + + # For Linear layer with missing weight + if self.weight is None and isinstance(self, torch.nn.Linear): + v = torch.zeros(self.in_features, self.out_features) + self.weight = torch.nn.Parameter(v, requires_grad=False) + missing_keys.append(prefix+"weight") + + # for vram estimation (TODO: less fragile logic?) + if getattr(self.weight, "is_largest_weight", False): + self.largest_layer = True + + def _save_to_state_dict(self, *args, **kwargs): + if self.is_ggml_quantized(): + return self.ggml_save_to_state_dict(*args, **kwargs) + return super()._save_to_state_dict(*args, **kwargs) + + def ggml_save_to_state_dict(self, destination, prefix, keep_vars): + # This is a fake state dict for vram estimation + weight = torch.zeros_like(self.weight, device=torch.device("meta")) + destination[prefix + "weight"] = weight + if self.bias is not None: + bias = torch.zeros_like(self.bias, device=torch.device("meta")) + destination[prefix + "bias"] = bias + + # Take into account space required for dequantizing the largest tensor + if self.largest_layer: + shape = getattr(self.weight, "tensor_shape", self.weight.shape) + dtype = self.dequant_dtype or torch.float16 + temp = torch.empty(*shape, device=torch.device("meta"), dtype=dtype) + destination[prefix + "temp.weight"] = temp + + return + # This would return the dequantized state dict + destination[prefix + "weight"] = self.get_weight(self.weight) + if bias is not None: + destination[prefix + "bias"] = self.get_weight(self.bias) + + def get_weight(self, tensor, dtype): + if tensor is None: + return + + # consolidate and load patches to GPU in async + patch_list = [] + device = tensor.device + for patches, key in getattr(tensor, "patches", []): + patch_list += move_patch_to_device(patches, device) + + # dequantize tensor while patches load + weight = dequantize_tensor(tensor, dtype, self.dequant_dtype) + + # prevent propagating custom tensor class + if isinstance(weight, GGMLTensor): + weight = torch.Tensor(weight) + + # apply patches + if len(patch_list) > 0: + if self.patch_dtype is None: + weight = comfy.lora.calculate_weight(patch_list, weight, key) + else: + # for testing, may degrade image quality + patch_dtype = dtype if self.patch_dtype == "target" else self.patch_dtype + weight = comfy.lora.calculate_weight(patch_list, weight, key, patch_dtype) + return weight + + @torch_compiler_disable() + def cast_bias_weight(s, input=None, dtype=None, device=None, bias_dtype=None): + if input is not None: + if dtype is None: + dtype = getattr(input, "dtype", torch.float32) + if bias_dtype is None: + bias_dtype = dtype + if device is None: + device = input.device + + bias = None + non_blocking = comfy.model_management.device_supports_non_blocking(device) + if s.bias is not None: + bias = s.get_weight(s.bias.to(device), dtype) + bias = comfy.ops.cast_to(bias, bias_dtype, device, non_blocking=non_blocking, copy=False) + + weight = s.get_weight(s.weight.to(device), dtype) + weight = comfy.ops.cast_to(weight, dtype, device, non_blocking=non_blocking, copy=False) + return weight, bias + + def forward_comfy_cast_weights(self, input, *args, **kwargs): + if self.is_ggml_quantized(): + out = self.forward_ggml_cast_weights(input, *args, **kwargs) + else: + out = super().forward_comfy_cast_weights(input, *args, **kwargs) + + # non-ggml forward might still propagate custom tensor class + if isinstance(out, GGMLTensor): + out = torch.Tensor(out) + return out + + def forward_ggml_cast_weights(self, input): + raise NotImplementedError + +class GGMLOps(comfy.ops.manual_cast): + """ + Dequantize weights on the fly before doing the compute + """ + class Linear(GGMLLayer, comfy.ops.manual_cast.Linear): + def __init__(self, in_features, out_features, bias=True, device=None, dtype=None): + torch.nn.Module.__init__(self) + # TODO: better workaround for reserved memory spike on windows + # Issue is with `torch.empty` still reserving the full memory for the layer + # Windows doesn't over-commit memory so without this 24GB+ of pagefile is used + self.in_features = in_features + self.out_features = out_features + self.weight = None + self.bias = None + + def forward_ggml_cast_weights(self, input): + weight, bias = self.cast_bias_weight(input) + return torch.nn.functional.linear(input, weight, bias) + + class Conv2d(GGMLLayer, comfy.ops.manual_cast.Conv2d): + def forward_ggml_cast_weights(self, input): + weight, bias = self.cast_bias_weight(input) + return self._conv_forward(input, weight, bias) + + class Embedding(GGMLLayer, comfy.ops.manual_cast.Embedding): + def forward_ggml_cast_weights(self, input, out_dtype=None): + output_dtype = out_dtype + if self.weight.dtype == torch.float16 or self.weight.dtype == torch.bfloat16: + out_dtype = None + weight, _bias = self.cast_bias_weight(self, device=input.device, dtype=out_dtype) + return torch.nn.functional.embedding( + input, weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse + ).to(dtype=output_dtype) + + class LayerNorm(GGMLLayer, comfy.ops.manual_cast.LayerNorm): + def forward_ggml_cast_weights(self, input): + if self.weight is None: + return super().forward_comfy_cast_weights(input) + weight, bias = self.cast_bias_weight(input) + return torch.nn.functional.layer_norm(input, self.normalized_shape, weight, bias, self.eps) + + class GroupNorm(GGMLLayer, comfy.ops.manual_cast.GroupNorm): + def forward_ggml_cast_weights(self, input): + weight, bias = self.cast_bias_weight(input) + return torch.nn.functional.group_norm(input, self.num_groups, weight, bias, self.eps) + +def move_patch_to_device(item, device): + if isinstance(item, torch.Tensor): + return item.to(device, non_blocking=True) + elif isinstance(item, tuple): + return tuple(move_patch_to_device(x, device) for x in item) + elif isinstance(item, list): + return [move_patch_to_device(x, device) for x in item] + else: + return item diff --git a/custom_nodes/ComfyUI-GGUF/pyproject.toml b/custom_nodes/ComfyUI-GGUF/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..8abefe84ff2a096d0cb335e634a8a239241b9a01 --- /dev/null +++ b/custom_nodes/ComfyUI-GGUF/pyproject.toml @@ -0,0 +1,14 @@ +[project] +name = "ComfyUI-GGUF" +description = "GGUF Quantization support for native ComfyUI models." +version = "1.1.4" # 2.0.0 = GitHub main, 1.X.X = ComfyUI Registry +license = { file = "LICENSE" } +dependencies = ["gguf>=0.13.0", "sentencepiece", "protobuf"] + +[project.urls] +Repository = "https://github.com/city96/ComfyUI-GGUF" + +[tool.comfy] +PublisherId = "city96" +DisplayName = "ComfyUI-GGUF" +Icon = "" diff --git a/custom_nodes/ComfyUI-GGUF/requirements.txt b/custom_nodes/ComfyUI-GGUF/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..f49905c79e1ea579a8e4fde0fbec966dcff2f2f8 --- /dev/null +++ b/custom_nodes/ComfyUI-GGUF/requirements.txt @@ -0,0 +1,5 @@ +# main +gguf>=0.13.0 +# optional - tokenizer +sentencepiece +protobuf diff --git a/custom_nodes/ComfyUI-GGUF/tools/README.md b/custom_nodes/ComfyUI-GGUF/tools/README.md new file mode 100644 index 0000000000000000000000000000000000000000..228bc220593d7878ca854032638bdc2682c76a71 --- /dev/null +++ b/custom_nodes/ComfyUI-GGUF/tools/README.md @@ -0,0 +1,93 @@ +## Converting initial model + +To convert your initial safetensors/ckpt model to FP16/BF16 GGUF, run the following command: + +``` +python convert.py --src E:\models\unet\flux1-dev.safetensors +``` +Make sure `gguf>=0.13.0` is installed for this step. Optionally, specify the output gguf file with the `--dst` arg. + +> [!NOTE] +> Do not use the diffusers UNET format for flux, it won't work, use the default/reference checkpoint key format. This is due to q/k/v being merged into one qkv key. +> You can convert it by loading it in ComfyUI and saving it using the built-in "ModelSave" node. + +> [!WARNING] +> For hunyuan video/wan 2.1, you will see a warning about 5D tensors. This means the script will save a **non functional** model to disk first, that you can quantize. I recommend saving these in a separate `raw` folder to avoid confusion. +> +> After quantization, you will have to run `fix_5d_tensor.py` manually to add back the missing key that was saved by the conversion code. + +## Quantizing using custom llama.cpp + +Depending on your git settings, you may need to run the following script first in order to make sure the patch file is valid. It will convert Windows (CRLF) line endings to Unix (LF) ones. + +``` +python fix_lines_ending.py +``` + +Git clone llama.cpp into the current folder: + +``` +git clone https://github.com/ggerganov/llama.cpp +``` + +Check out the correct branch, then apply the custom patch needed to add image model support to the repo you just cloned. + +``` +cd llama.cpp +git checkout tags/b3962 +git apply ..\lcpp.patch +``` + +Compile the llama-quantize binary. This example uses cmake, on linux you can just use make. + +### Visual Studio 2019, Linux, etc... + +``` +mkdir build +cmake -B build +cmake --build build --config Debug -j10 --target llama-quantize +cd .. +``` + +### Visual Studio 2022 + +``` +mkdir build +cmake -B build -DCMAKE_CXX_STANDARD=17 -DCMAKE_CXX_STANDARD_REQUIRED=ON -DCMAKE_CXX_FLAGS="-std=c++17" +``` + +Edit the `llama.cpp\common\log.cpp` file, inserts two lines after the existing first line: + +``` +#include "log.h" + +#define _SILENCE_CXX23_CHRONO_DEPRECATION_WARNING +#include +``` + +Then you can build the project: +``` +cmake --build build --config Debug -j10 --target llama-quantize +cd .. +``` + +### Quantize your model + + +Now you can use the newly build binary to quantize your model to the desired format: +``` +llama.cpp\build\bin\Debug\llama-quantize.exe E:\models\unet\flux1-dev-BF16.gguf E:\models\unet\flux1-dev-Q4_K_S.gguf Q4_K_S +``` + +You can extract the patch again with `git diff src\llama.cpp > lcpp.patch` if you wish to change something and contribute back. + +> [!WARNING] +> For hunyuan video/wan 2.1, you will have to run `fix_5d_tensor.py` after the quantization step is done. +> +> Example usage: `fix_5d_tensors.py --src E:\models\video\raw\wan2.1-t2v-1.3b-Q8_0.gguf --dst E:\models\video\wan2.1-t2v-1.3b-Q8_0.gguf` +> +> By default, this also saves a `fix_5d_tensors_[arch].safetensors` file in the `ComfyUI-GGUF/tools` folder, it's recommended to delete this after all models have been converted. + +> [!NOTE] +> Do not quantize SDXL / SD1 / other Conv2D heavy models. If you do, make sure to **extract the UNET model first**. +>This should be obvious, but also don't use the resulting llama-quantize binary with LLMs. diff --git a/custom_nodes/ComfyUI-GGUF/tools/convert.py b/custom_nodes/ComfyUI-GGUF/tools/convert.py new file mode 100644 index 0000000000000000000000000000000000000000..5029c874277358559f8855d3f1032963437a3e91 --- /dev/null +++ b/custom_nodes/ComfyUI-GGUF/tools/convert.py @@ -0,0 +1,365 @@ +# (c) City96 || Apache-2.0 (apache.org/licenses/LICENSE-2.0) +import os +import gguf +import torch +import logging +import argparse +from tqdm import tqdm +from safetensors.torch import load_file, save_file + +QUANTIZATION_THRESHOLD = 1024 +REARRANGE_THRESHOLD = 512 +MAX_TENSOR_NAME_LENGTH = 127 +MAX_TENSOR_DIMS = 4 + +class ModelTemplate: + arch = "invalid" # string describing architecture + shape_fix = False # whether to reshape tensors + keys_detect = [] # list of lists to match in state dict + keys_banned = [] # list of keys that should mark model as invalid for conversion + keys_hiprec = [] # list of keys that need to be kept in fp32 for some reason + keys_ignore = [] # list of strings to ignore keys by when found + + def handle_nd_tensor(self, key, data): + raise NotImplementedError(f"Tensor detected that exceeds dims supported by C++ code! ({key} @ {data.shape})") + +class ModelFlux(ModelTemplate): + arch = "flux" + keys_detect = [ + ("transformer_blocks.0.attn.norm_added_k.weight",), + ("double_blocks.0.img_attn.proj.weight",), + ] + keys_banned = ["transformer_blocks.0.attn.norm_added_k.weight",] + +class ModelSD3(ModelTemplate): + arch = "sd3" + keys_detect = [ + ("transformer_blocks.0.attn.add_q_proj.weight",), + ("joint_blocks.0.x_block.attn.qkv.weight",), + ] + keys_banned = ["transformer_blocks.0.attn.add_q_proj.weight",] + +class ModelAura(ModelTemplate): + arch = "aura" + keys_detect = [ + ("double_layers.3.modX.1.weight",), + ("joint_transformer_blocks.3.ff_context.out_projection.weight",), + ] + keys_banned = ["joint_transformer_blocks.3.ff_context.out_projection.weight",] + +class ModelHiDream(ModelTemplate): + arch = "hidream" + keys_detect = [ + ( + "caption_projection.0.linear.weight", + "double_stream_blocks.0.block.ff_i.shared_experts.w3.weight" + ) + ] + keys_hiprec = [ + # nn.parameter, can't load from BF16 ver + ".ff_i.gate.weight", + "img_emb.emb_pos" + ] + +class CosmosPredict2(ModelTemplate): + arch = "cosmos" + keys_detect = [ + ( + "blocks.0.mlp.layer1.weight", + "blocks.0.adaln_modulation_cross_attn.1.weight", + ) + ] + keys_hiprec = ["pos_embedder"] + keys_ignore = ["_extra_state", "accum_"] + +class ModelHyVid(ModelTemplate): + arch = "hyvid" + keys_detect = [ + ( + "double_blocks.0.img_attn_proj.weight", + "txt_in.individual_token_refiner.blocks.1.self_attn_qkv.weight", + ) + ] + + def handle_nd_tensor(self, key, data): + # hacky but don't have any better ideas + path = f"./fix_5d_tensors_{self.arch}.safetensors" # TODO: somehow get a path here?? + if os.path.isfile(path): + raise RuntimeError(f"5D tensor fix file already exists! {path}") + fsd = {key: torch.from_numpy(data)} + tqdm.write(f"5D key found in state dict! Manual fix required! - {key} {data.shape}") + save_file(fsd, path) + +class ModelWan(ModelHyVid): + arch = "wan" + keys_detect = [ + ( + "blocks.0.self_attn.norm_q.weight", + "text_embedding.2.weight", + "head.modulation", + ) + ] + keys_hiprec = [ + ".modulation" # nn.parameter, can't load from BF16 ver + ] + +class ModelLTXV(ModelTemplate): + arch = "ltxv" + keys_detect = [ + ( + "adaln_single.emb.timestep_embedder.linear_2.weight", + "transformer_blocks.27.scale_shift_table", + "caption_projection.linear_2.weight", + ) + ] + keys_hiprec = [ + "scale_shift_table" # nn.parameter, can't load from BF16 base quant + ] + +class ModelSDXL(ModelTemplate): + arch = "sdxl" + shape_fix = True + keys_detect = [ + ("down_blocks.0.downsamplers.0.conv.weight", "add_embedding.linear_1.weight",), + ( + "input_blocks.3.0.op.weight", "input_blocks.6.0.op.weight", + "output_blocks.2.2.conv.weight", "output_blocks.5.2.conv.weight", + ), # Non-diffusers + ("label_emb.0.0.weight",), + ] + +class ModelSD1(ModelTemplate): + arch = "sd1" + shape_fix = True + keys_detect = [ + ("down_blocks.0.downsamplers.0.conv.weight",), + ( + "input_blocks.3.0.op.weight", "input_blocks.6.0.op.weight", "input_blocks.9.0.op.weight", + "output_blocks.2.1.conv.weight", "output_blocks.5.2.conv.weight", "output_blocks.8.2.conv.weight" + ), # Non-diffusers + ] + +class ModelLumina2(ModelTemplate): + arch = "lumina2" + keys_detect = [ + ("cap_embedder.1.weight", "context_refiner.0.attention.qkv.weight") + ] + +arch_list = [ModelFlux, ModelSD3, ModelAura, ModelHiDream, CosmosPredict2, + ModelLTXV, ModelHyVid, ModelWan, ModelSDXL, ModelSD1, ModelLumina2] + +def is_model_arch(model, state_dict): + # check if model is correct + matched = False + invalid = False + for match_list in model.keys_detect: + if all(key in state_dict for key in match_list): + matched = True + invalid = any(key in state_dict for key in model.keys_banned) + break + assert not invalid, "Model architecture not allowed for conversion! (i.e. reference VS diffusers format)" + return matched + +def detect_arch(state_dict): + model_arch = None + for arch in arch_list: + if is_model_arch(arch, state_dict): + model_arch = arch() + break + assert model_arch is not None, "Unknown model architecture!" + return model_arch + +def parse_args(): + parser = argparse.ArgumentParser(description="Generate F16 GGUF files from single UNET") + parser.add_argument("--src", required=True, help="Source model ckpt file.") + parser.add_argument("--dst", help="Output unet gguf file.") + args = parser.parse_args() + + if not os.path.isfile(args.src): + parser.error("No input provided!") + + return args + +def strip_prefix(state_dict): + # prefix for mixed state dict + prefix = None + for pfx in ["model.diffusion_model.", "model."]: + if any([x.startswith(pfx) for x in state_dict.keys()]): + prefix = pfx + break + + # prefix for uniform state dict + if prefix is None: + for pfx in ["net."]: + if all([x.startswith(pfx) for x in state_dict.keys()]): + prefix = pfx + break + + # strip prefix if found + if prefix is not None: + logging.info(f"State dict prefix found: '{prefix}'") + sd = {} + for k, v in state_dict.items(): + if prefix not in k: + continue + k = k.replace(prefix, "") + sd[k] = v + else: + logging.debug("State dict has no prefix") + sd = state_dict + + return sd + +def load_state_dict(path): + if any(path.endswith(x) for x in [".ckpt", ".pt", ".bin", ".pth"]): + state_dict = torch.load(path, map_location="cpu", weights_only=True) + for subkey in ["model", "module"]: + if subkey in state_dict: + state_dict = state_dict[subkey] + break + if len(state_dict) < 20: + raise RuntimeError(f"pt subkey load failed: {state_dict.keys()}") + else: + state_dict = load_file(path) + + return strip_prefix(state_dict) + +def handle_tensors(writer, state_dict, model_arch): + name_lengths = tuple(sorted( + ((key, len(key)) for key in state_dict.keys()), + key=lambda item: item[1], + reverse=True, + )) + if not name_lengths: + return + max_name_len = name_lengths[0][1] + if max_name_len > MAX_TENSOR_NAME_LENGTH: + bad_list = ", ".join(f"{key!r} ({namelen})" for key, namelen in name_lengths if namelen > MAX_TENSOR_NAME_LENGTH) + raise ValueError(f"Can only handle tensor names up to {MAX_TENSOR_NAME_LENGTH} characters. Tensors exceeding the limit: {bad_list}") + for key, data in tqdm(state_dict.items()): + old_dtype = data.dtype + + if any(x in key for x in model_arch.keys_ignore): + tqdm.write(f"Filtering ignored key: '{key}'") + continue + + if data.dtype == torch.bfloat16: + data = data.to(torch.float32).numpy() + # this is so we don't break torch 2.0.X + elif data.dtype in [getattr(torch, "float8_e4m3fn", "_invalid"), getattr(torch, "float8_e5m2", "_invalid")]: + data = data.to(torch.float16).numpy() + else: + data = data.numpy() + + n_dims = len(data.shape) + data_shape = data.shape + if old_dtype == torch.bfloat16: + data_qtype = gguf.GGMLQuantizationType.BF16 + # elif old_dtype == torch.float32: + # data_qtype = gguf.GGMLQuantizationType.F32 + else: + data_qtype = gguf.GGMLQuantizationType.F16 + + # The max no. of dimensions that can be handled by the quantization code is 4 + if len(data.shape) > MAX_TENSOR_DIMS: + model_arch.handle_nd_tensor(key, data) + continue # needs to be added back later + + # get number of parameters (AKA elements) in this tensor + n_params = 1 + for dim_size in data_shape: + n_params *= dim_size + + if old_dtype in (torch.float32, torch.bfloat16): + if n_dims == 1: + # one-dimensional tensors should be kept in F32 + # also speeds up inference due to not dequantizing + data_qtype = gguf.GGMLQuantizationType.F32 + + elif n_params <= QUANTIZATION_THRESHOLD: + # very small tensors + data_qtype = gguf.GGMLQuantizationType.F32 + + elif any(x in key for x in model_arch.keys_hiprec): + # tensors that require max precision + data_qtype = gguf.GGMLQuantizationType.F32 + + if (model_arch.shape_fix # NEVER reshape for models such as flux + and n_dims > 1 # Skip one-dimensional tensors + and n_params >= REARRANGE_THRESHOLD # Only rearrange tensors meeting the size requirement + and (n_params / 256).is_integer() # Rearranging only makes sense if total elements is divisible by 256 + and not (data.shape[-1] / 256).is_integer() # Only need to rearrange if the last dimension is not divisible by 256 + ): + orig_shape = data.shape + data = data.reshape(n_params // 256, 256) + writer.add_array(f"comfy.gguf.orig_shape.{key}", tuple(int(dim) for dim in orig_shape)) + + try: + data = gguf.quants.quantize(data, data_qtype) + except (AttributeError, gguf.QuantError) as e: + tqdm.write(f"falling back to F16: {e}") + data_qtype = gguf.GGMLQuantizationType.F16 + data = gguf.quants.quantize(data, data_qtype) + + new_name = key # do we need to rename? + + shape_str = f"{{{', '.join(str(n) for n in reversed(data.shape))}}}" + tqdm.write(f"{f'%-{max_name_len + 4}s' % f'{new_name}'} {old_dtype} --> {data_qtype.name}, shape = {shape_str}") + + writer.add_tensor(new_name, data, raw_dtype=data_qtype) + +def convert_file(path, dst_path=None, interact=True, overwrite=False): + # load & run model detection logic + state_dict = load_state_dict(path) + model_arch = detect_arch(state_dict) + logging.info(f"* Architecture detected from input: {model_arch.arch}") + + # detect & set dtype for output file + dtypes = [x.dtype for x in state_dict.values()] + dtypes = {x:dtypes.count(x) for x in set(dtypes)} + main_dtype = max(dtypes, key=dtypes.get) + + if main_dtype == torch.bfloat16: + ftype_name = "BF16" + ftype_gguf = gguf.LlamaFileType.MOSTLY_BF16 + # elif main_dtype == torch.float32: + # ftype_name = "F32" + # ftype_gguf = None + else: + ftype_name = "F16" + ftype_gguf = gguf.LlamaFileType.MOSTLY_F16 + + if dst_path is None: + dst_path = f"{os.path.splitext(path)[0]}-{ftype_name}.gguf" + elif "{ftype}" in dst_path: # lcpp logic + dst_path = dst_path.replace("{ftype}", ftype_name) + + if os.path.isfile(dst_path) and not overwrite: + if interact: + input("Output exists enter to continue or ctrl+c to abort!") + else: + raise OSError("Output exists and overwriting is disabled!") + + # handle actual file + writer = gguf.GGUFWriter(path=None, arch=model_arch.arch) + writer.add_quantization_version(gguf.GGML_QUANT_VERSION) + if ftype_gguf is not None: + writer.add_file_type(ftype_gguf) + + handle_tensors(writer, state_dict, model_arch) + writer.write_header_to_file(path=dst_path) + writer.write_kv_data_to_file() + writer.write_tensors_to_file(progress=True) + writer.close() + + fix = f"./fix_5d_tensors_{model_arch.arch}.safetensors" + if os.path.isfile(fix): + logging.warning(f"\n### Warning! Fix file found at '{fix}'") + logging.warning(" you most likely need to run 'fix_5d_tensors.py' after quantization.") + + return dst_path, model_arch + +if __name__ == "__main__": + args = parse_args() + convert_file(args.src, args.dst) + diff --git a/custom_nodes/ComfyUI-GGUF/tools/fix_5d_tensors.py b/custom_nodes/ComfyUI-GGUF/tools/fix_5d_tensors.py new file mode 100644 index 0000000000000000000000000000000000000000..0e61d1c2e5f2572c3d9fa12eba38c84f13689a53 --- /dev/null +++ b/custom_nodes/ComfyUI-GGUF/tools/fix_5d_tensors.py @@ -0,0 +1,82 @@ +# (c) City96 || Apache-2.0 (apache.org/licenses/LICENSE-2.0) +import os +import gguf +import torch +import argparse +from tqdm import tqdm +from safetensors.torch import load_file + +def get_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--src", required=True) + parser.add_argument("--dst", required=True) + parser.add_argument("--fix", required=False, help="Defaults to ./fix_5d_tensors_[arch].pt") + parser.add_argument("--overwrite", action="store_true") + args = parser.parse_args() + + if not os.path.isfile(args.src): + parser.error(f"Invalid source file '{args.src}'") + if not args.overwrite and os.path.exists(args.dst): + parser.error(f"Output exists, use '--overwrite' ({args.dst})") + + return args + +def get_arch_str(reader): + field = reader.get_field("general.architecture") + return str(field.parts[field.data[-1]], encoding="utf-8") + +def get_file_type(reader): + field = reader.get_field("general.file_type") + ft = int(field.parts[field.data[-1]]) + return gguf.LlamaFileType(ft) + +if __name__ == "__main__": + args = get_args() + + # read existing + reader = gguf.GGUFReader(args.src) + arch = get_arch_str(reader) + file_type = get_file_type(reader) + print(f"Detected arch: '{arch}' (ftype: {str(file_type)})") + + # prep fix + if args.fix is None: + args.fix = f"./fix_5d_tensors_{arch}.safetensors" + + if not os.path.isfile(args.fix): + raise OSError(f"No 5D tensor fix file: {args.fix}") + + sd5d = load_file(args.fix) + sd5d = {k:v.numpy() for k,v in sd5d.items()} + print("5D tensors:", sd5d.keys()) + + # prep output + writer = gguf.GGUFWriter(path=None, arch=arch) + writer.add_quantization_version(gguf.GGML_QUANT_VERSION) + writer.add_file_type(file_type) + + added = [] + def add_extra_key(writer, key, data): + global added + data_qtype = gguf.GGMLQuantizationType.F32 + data = gguf.quants.quantize(data, data_qtype) + tqdm.write(f"Adding key {key} ({data.shape})") + writer.add_tensor(key, data, raw_dtype=data_qtype) + added.append(key) + + # main loop to add missing 5D tensor(s) + for tensor in tqdm(reader.tensors): + writer.add_tensor(tensor.name, tensor.data, raw_dtype=tensor.tensor_type) + key5d = tensor.name.replace(".bias", ".weight") + if key5d in sd5d.keys(): + add_extra_key(writer, key5d, sd5d[key5d]) + + # brute force for any missed + for key, data in sd5d.items(): + if key not in added: + add_extra_key(writer, key, data) + + writer.write_header_to_file(path=args.dst) + writer.write_kv_data_to_file() + writer.write_tensors_to_file(progress=True) + writer.close() diff --git a/custom_nodes/ComfyUI-GGUF/tools/fix_lines_ending.py b/custom_nodes/ComfyUI-GGUF/tools/fix_lines_ending.py new file mode 100644 index 0000000000000000000000000000000000000000..346e3501fb7682fa3754f175965aa750241fe4ac --- /dev/null +++ b/custom_nodes/ComfyUI-GGUF/tools/fix_lines_ending.py @@ -0,0 +1,31 @@ +import os + +files = ["lcpp.patch", "lcpp_sd3.patch"] + +def has_unix_line_endings(file_path): + try: + with open(file_path, 'rb') as file: + content = file.read() + return b'\r\n' not in content + except Exception as e: + print(f"Error checking '{file_path}': {e}") + return False + +def convert_to_linux_format(file_path): + try: + with open(file_path, 'rb') as file: + content = file.read().replace(b'\r\n', b'\n') + with open(file_path, 'wb') as file: + file.write(content) + print(f"'{file_path}' converted to Linux line endings (LF).") + except Exception as e: + print(f"Error processing '{file_path}': {e}") + +for file in files: + if os.path.exists(file): + if has_unix_line_endings(file): + print(f"'{file}' already has Unix line endings (LF). No conversion needed.") + else: + convert_to_linux_format(file) + else: + print(f"File '{file}' does not exist.") diff --git a/custom_nodes/ComfyUI-GGUF/tools/lcpp.patch b/custom_nodes/ComfyUI-GGUF/tools/lcpp.patch new file mode 100644 index 0000000000000000000000000000000000000000..92396e17b4ecf281265f07603232d23111ee9baa --- /dev/null +++ b/custom_nodes/ComfyUI-GGUF/tools/lcpp.patch @@ -0,0 +1,451 @@ +diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h +index de3c706f..0267c1fa 100644 +--- a/ggml/include/ggml.h ++++ b/ggml/include/ggml.h +@@ -223,7 +223,7 @@ + #define GGML_MAX_OP_PARAMS 64 + + #ifndef GGML_MAX_NAME +-# define GGML_MAX_NAME 64 ++# define GGML_MAX_NAME 128 + #endif + + #define GGML_DEFAULT_N_THREADS 4 +@@ -2449,6 +2449,7 @@ extern "C" { + + // manage tensor info + GGML_API void gguf_add_tensor(struct gguf_context * ctx, const struct ggml_tensor * tensor); ++ GGML_API void gguf_set_tensor_ndim(struct gguf_context * ctx, const char * name, int n_dim); + GGML_API void gguf_set_tensor_type(struct gguf_context * ctx, const char * name, enum ggml_type type); + GGML_API void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const void * data, size_t size); + +diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c +index b16c462f..6d1568f1 100644 +--- a/ggml/src/ggml.c ++++ b/ggml/src/ggml.c +@@ -22960,6 +22960,14 @@ void gguf_add_tensor( + ctx->header.n_tensors++; + } + ++void gguf_set_tensor_ndim(struct gguf_context * ctx, const char * name, const int n_dim) { ++ const int idx = gguf_find_tensor(ctx, name); ++ if (idx < 0) { ++ GGML_ABORT("tensor not found"); ++ } ++ ctx->infos[idx].n_dims = n_dim; ++} ++ + void gguf_set_tensor_type(struct gguf_context * ctx, const char * name, enum ggml_type type) { + const int idx = gguf_find_tensor(ctx, name); + if (idx < 0) { +diff --git a/src/llama.cpp b/src/llama.cpp +index 24e1f1f0..25db4c69 100644 +--- a/src/llama.cpp ++++ b/src/llama.cpp +@@ -205,6 +205,17 @@ enum llm_arch { + LLM_ARCH_GRANITE, + LLM_ARCH_GRANITE_MOE, + LLM_ARCH_CHAMELEON, ++ LLM_ARCH_FLUX, ++ LLM_ARCH_SD1, ++ LLM_ARCH_SDXL, ++ LLM_ARCH_SD3, ++ LLM_ARCH_AURA, ++ LLM_ARCH_LTXV, ++ LLM_ARCH_HYVID, ++ LLM_ARCH_WAN, ++ LLM_ARCH_HIDREAM, ++ LLM_ARCH_COSMOS, ++ LLM_ARCH_LUMINA2, + LLM_ARCH_UNKNOWN, + }; + +@@ -258,6 +269,17 @@ static const std::map LLM_ARCH_NAMES = { + { LLM_ARCH_GRANITE, "granite" }, + { LLM_ARCH_GRANITE_MOE, "granitemoe" }, + { LLM_ARCH_CHAMELEON, "chameleon" }, ++ { LLM_ARCH_FLUX, "flux" }, ++ { LLM_ARCH_SD1, "sd1" }, ++ { LLM_ARCH_SDXL, "sdxl" }, ++ { LLM_ARCH_SD3, "sd3" }, ++ { LLM_ARCH_AURA, "aura" }, ++ { LLM_ARCH_LTXV, "ltxv" }, ++ { LLM_ARCH_HYVID, "hyvid" }, ++ { LLM_ARCH_WAN, "wan" }, ++ { LLM_ARCH_HIDREAM, "hidream" }, ++ { LLM_ARCH_COSMOS, "cosmos" }, ++ { LLM_ARCH_LUMINA2, "lumina2" }, + { LLM_ARCH_UNKNOWN, "(unknown)" }, + }; + +@@ -1531,6 +1553,17 @@ static const std::map> LLM_TENSOR_N + { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" }, + }, + }, ++ { LLM_ARCH_FLUX, {}}, ++ { LLM_ARCH_SD1, {}}, ++ { LLM_ARCH_SDXL, {}}, ++ { LLM_ARCH_SD3, {}}, ++ { LLM_ARCH_AURA, {}}, ++ { LLM_ARCH_LTXV, {}}, ++ { LLM_ARCH_HYVID, {}}, ++ { LLM_ARCH_WAN, {}}, ++ { LLM_ARCH_HIDREAM, {}}, ++ { LLM_ARCH_COSMOS, {}}, ++ { LLM_ARCH_LUMINA2, {}}, + { + LLM_ARCH_UNKNOWN, + { +@@ -5403,6 +5436,25 @@ static void llm_load_hparams( + // get general kv + ml.get_key(LLM_KV_GENERAL_NAME, model.name, false); + ++ // Disable LLM metadata for image models ++ switch (model.arch) { ++ case LLM_ARCH_FLUX: ++ case LLM_ARCH_SD1: ++ case LLM_ARCH_SDXL: ++ case LLM_ARCH_SD3: ++ case LLM_ARCH_AURA: ++ case LLM_ARCH_LTXV: ++ case LLM_ARCH_HYVID: ++ case LLM_ARCH_WAN: ++ case LLM_ARCH_HIDREAM: ++ case LLM_ARCH_COSMOS: ++ case LLM_ARCH_LUMINA2: ++ model.ftype = ml.ftype; ++ return; ++ default: ++ break; ++ } ++ + // get hparams kv + ml.get_key(LLM_KV_VOCAB_SIZE, hparams.n_vocab, false) || ml.get_arr_n(LLM_KV_TOKENIZER_LIST, hparams.n_vocab); + +@@ -18016,6 +18068,134 @@ static void llama_tensor_dequantize_internal( + workers.clear(); + } + ++static ggml_type img_tensor_get_type(quantize_state_internal & qs, ggml_type new_type, const ggml_tensor * tensor, llama_ftype ftype) { ++ // Special function for quantizing image model tensors ++ const std::string name = ggml_get_name(tensor); ++ const llm_arch arch = qs.model.arch; ++ ++ // Sanity check ++ if ( ++ (name.find("model.diffusion_model.") != std::string::npos) || ++ (name.find("first_stage_model.") != std::string::npos) || ++ (name.find("single_transformer_blocks.") != std::string::npos) || ++ (name.find("joint_transformer_blocks.") != std::string::npos) ++ ) { ++ throw std::runtime_error("Invalid input GGUF file. This is not a supported UNET model"); ++ } ++ ++ // Unsupported quant types - exclude all IQ quants for now ++ if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ++ ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M || ++ ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ1_S || ++ ftype == LLAMA_FTYPE_MOSTLY_IQ1_M || ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL || ++ ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_S || ++ ftype == LLAMA_FTYPE_MOSTLY_IQ3_M || ftype == LLAMA_FTYPE_MOSTLY_Q4_0_4_4 || ++ ftype == LLAMA_FTYPE_MOSTLY_Q4_0_4_8 || ftype == LLAMA_FTYPE_MOSTLY_Q4_0_8_8) { ++ throw std::runtime_error("Invalid quantization type for image model (Not supported)"); ++ } ++ ++ if ( // Rules for to_v attention ++ (name.find("attn_v.weight") != std::string::npos) || ++ (name.find(".to_v.weight") != std::string::npos) || ++ (name.find(".v.weight") != std::string::npos) || ++ (name.find(".attn.w1v.weight") != std::string::npos) || ++ (name.find(".attn.w2v.weight") != std::string::npos) || ++ (name.find("_attn.v_proj.weight") != std::string::npos) ++ ){ ++ if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) { ++ new_type = GGML_TYPE_Q3_K; ++ } ++ else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) { ++ new_type = qs.i_attention_wv < 2 ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K; ++ } ++ else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) { ++ new_type = GGML_TYPE_Q5_K; ++ } ++ else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) { ++ new_type = GGML_TYPE_Q6_K; ++ } ++ else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && qs.i_attention_wv < 4) { ++ new_type = GGML_TYPE_Q5_K; ++ } ++ ++qs.i_attention_wv; ++ } else if ( // Rules for fused qkv attention ++ (name.find("attn_qkv.weight") != std::string::npos) || ++ (name.find("attn.qkv.weight") != std::string::npos) || ++ (name.find("attention.qkv.weight") != std::string::npos) ++ ) { ++ if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) { ++ new_type = GGML_TYPE_Q4_K; ++ } ++ else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) { ++ new_type = GGML_TYPE_Q5_K; ++ } ++ else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) { ++ new_type = GGML_TYPE_Q6_K; ++ } ++ } else if ( // Rules for ffn ++ (name.find("ffn_down") != std::string::npos) || ++ ((name.find("experts.") != std::string::npos) && (name.find(".w2.weight") != std::string::npos)) || ++ (name.find(".ffn.2.weight") != std::string::npos) || // is this even the right way around? ++ (name.find(".ff.net.2.weight") != std::string::npos) || ++ (name.find(".mlp.layer2.weight") != std::string::npos) || ++ (name.find(".adaln_modulation_mlp.2.weight") != std::string::npos) || ++ (name.find(".feed_forward.w2.weight") != std::string::npos) ++ ) { ++ // TODO: add back `layer_info` with some model specific logic + logic further down ++ if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) { ++ new_type = GGML_TYPE_Q4_K; ++ } ++ else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) { ++ new_type = GGML_TYPE_Q5_K; ++ } ++ else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S) { ++ new_type = GGML_TYPE_Q5_K; ++ } ++ else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) { ++ new_type = GGML_TYPE_Q6_K; ++ } ++ else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) { ++ new_type = GGML_TYPE_Q6_K; ++ } ++ else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_0) { ++ new_type = GGML_TYPE_Q4_1; ++ } ++ else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_0) { ++ new_type = GGML_TYPE_Q5_1; ++ } ++ ++qs.i_ffn_down; ++ } ++ ++ // Sanity check for row shape ++ bool convert_incompatible_tensor = false; ++ if (new_type == GGML_TYPE_Q2_K || new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K || ++ new_type == GGML_TYPE_Q5_K || new_type == GGML_TYPE_Q6_K) { ++ int nx = tensor->ne[0]; ++ int ny = tensor->ne[1]; ++ if (nx % QK_K != 0) { ++ LLAMA_LOG_WARN("\n\n%s : tensor cols %d x %d are not divisible by %d, required for %s", __func__, nx, ny, QK_K, ggml_type_name(new_type)); ++ convert_incompatible_tensor = true; ++ } else { ++ ++qs.n_k_quantized; ++ } ++ } ++ if (convert_incompatible_tensor) { ++ // TODO: Possibly reenable this in the future ++ // switch (new_type) { ++ // case GGML_TYPE_Q2_K: ++ // case GGML_TYPE_Q3_K: ++ // case GGML_TYPE_Q4_K: new_type = GGML_TYPE_Q5_0; break; ++ // case GGML_TYPE_Q5_K: new_type = GGML_TYPE_Q5_1; break; ++ // case GGML_TYPE_Q6_K: new_type = GGML_TYPE_Q8_0; break; ++ // default: throw std::runtime_error("\nUnsupported tensor size encountered\n"); ++ // } ++ new_type = GGML_TYPE_F16; ++ LLAMA_LOG_WARN(" - using fallback quantization %s\n", ggml_type_name(new_type)); ++ ++qs.n_fallback; ++ } ++ return new_type; ++} ++ + static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type new_type, const ggml_tensor * tensor, llama_ftype ftype) { + const std::string name = ggml_get_name(tensor); + +@@ -18513,7 +18693,9 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s + if (llama_model_has_encoder(&model)) { + n_attn_layer *= 3; + } +- GGML_ASSERT((qs.n_attention_wv == n_attn_layer) && "n_attention_wv is unexpected"); ++ if (model.arch != LLM_ARCH_HYVID) { // TODO: Check why this fails ++ GGML_ASSERT((qs.n_attention_wv == n_attn_layer) && "n_attention_wv is unexpected"); ++ } + } + + size_t total_size_org = 0; +@@ -18547,6 +18729,51 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s + ctx_outs[i_split] = gguf_init_empty(); + } + gguf_add_tensor(ctx_outs[i_split], tensor); ++ // SD3 pos_embed needs special fix as first dim is 1, which gets truncated here ++ if (model.arch == LLM_ARCH_SD3) { ++ const std::string name = ggml_get_name(tensor); ++ if (name == "pos_embed" && tensor->ne[2] == 1) { ++ const int n_dim = 3; ++ gguf_set_tensor_ndim(ctx_outs[i_split], "pos_embed", n_dim); ++ LLAMA_LOG_INFO("\n%s: Correcting pos_embed shape for SD3: [key:%s]\n", __func__, tensor->name); ++ } ++ } ++ // same goes for auraflow ++ if (model.arch == LLM_ARCH_AURA) { ++ const std::string name = ggml_get_name(tensor); ++ if (name == "positional_encoding" && tensor->ne[2] == 1) { ++ const int n_dim = 3; ++ gguf_set_tensor_ndim(ctx_outs[i_split], "positional_encoding", n_dim); ++ LLAMA_LOG_INFO("\n%s: Correcting positional_encoding shape for AuraFlow: [key:%s]\n", __func__, tensor->name); ++ } ++ if (name == "register_tokens" && tensor->ne[2] == 1) { ++ const int n_dim = 3; ++ gguf_set_tensor_ndim(ctx_outs[i_split], "register_tokens", n_dim); ++ LLAMA_LOG_INFO("\n%s: Correcting register_tokens shape for AuraFlow: [key:%s]\n", __func__, tensor->name); ++ } ++ } ++ // conv3d fails due to max dims - unsure what to do here as we never even reach this check ++ if (model.arch == LLM_ARCH_HYVID) { ++ const std::string name = ggml_get_name(tensor); ++ if (name == "img_in.proj.weight" && tensor->ne[5] != 1 ) { ++ throw std::runtime_error("img_in.proj.weight size failed for HyVid"); ++ } ++ } ++ // All the modulation layers also have dim1, and I think conv3d fails here too but we segfaul way before that... ++ if (model.arch == LLM_ARCH_WAN) { ++ const std::string name = ggml_get_name(tensor); ++ if (name.find(".modulation") != std::string::npos && tensor->ne[2] == 1) { ++ const int n_dim = 3; ++ gguf_set_tensor_ndim(ctx_outs[i_split], tensor->name, n_dim); ++ LLAMA_LOG_INFO("\n%s: Correcting shape for Wan: [key:%s]\n", __func__, tensor->name); ++ } ++ // FLF2V model only ++ if (name == "img_emb.emb_pos") { ++ const int n_dim = 3; ++ gguf_set_tensor_ndim(ctx_outs[i_split], tensor->name, n_dim); ++ LLAMA_LOG_INFO("\n%s: Correcting shape for Wan FLF2V: [key:%s]\n", __func__, tensor->name); ++ } ++ } + } + + // Set split info if needed +@@ -18647,6 +18874,110 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s + // do not quantize relative position bias (T5) + quantize &= name.find("attn_rel_b.weight") == std::string::npos; + ++ // rules for image models ++ bool image_model = false; ++ if (model.arch == LLM_ARCH_FLUX) { ++ image_model = true; ++ quantize &= name.find("txt_in.") == std::string::npos; ++ quantize &= name.find("img_in.") == std::string::npos; ++ quantize &= name.find("time_in.") == std::string::npos; ++ quantize &= name.find("vector_in.") == std::string::npos; ++ quantize &= name.find("guidance_in.") == std::string::npos; ++ quantize &= name.find("final_layer.") == std::string::npos; ++ } ++ if (model.arch == LLM_ARCH_SD1 || model.arch == LLM_ARCH_SDXL) { ++ image_model = true; ++ quantize &= name.find("class_embedding.") == std::string::npos; ++ quantize &= name.find("time_embedding.") == std::string::npos; ++ quantize &= name.find("add_embedding.") == std::string::npos; ++ quantize &= name.find("time_embed.") == std::string::npos; ++ quantize &= name.find("label_emb.") == std::string::npos; ++ quantize &= name.find("conv_in.") == std::string::npos; ++ quantize &= name.find("conv_out.") == std::string::npos; ++ quantize &= name != "input_blocks.0.0.weight"; ++ quantize &= name != "out.2.weight"; ++ } ++ if (model.arch == LLM_ARCH_SD3) { ++ image_model = true; ++ quantize &= name.find("final_layer.") == std::string::npos; ++ quantize &= name.find("time_text_embed.") == std::string::npos; ++ quantize &= name.find("context_embedder.") == std::string::npos; ++ quantize &= name.find("t_embedder.") == std::string::npos; ++ quantize &= name.find("y_embedder.") == std::string::npos; ++ quantize &= name.find("x_embedder.") == std::string::npos; ++ quantize &= name != "proj_out.weight"; ++ quantize &= name != "pos_embed"; ++ } ++ if (model.arch == LLM_ARCH_AURA) { ++ image_model = true; ++ quantize &= name.find("t_embedder.") == std::string::npos; ++ quantize &= name.find("init_x_linear.") == std::string::npos; ++ quantize &= name != "modF.1.weight"; ++ quantize &= name != "cond_seq_linear.weight"; ++ quantize &= name != "final_linear.weight"; ++ quantize &= name != "final_linear.weight"; ++ quantize &= name != "positional_encoding"; ++ quantize &= name != "register_tokens"; ++ } ++ if (model.arch == LLM_ARCH_LTXV) { ++ image_model = true; ++ quantize &= name.find("adaln_single.") == std::string::npos; ++ quantize &= name.find("caption_projection.") == std::string::npos; ++ quantize &= name.find("patchify_proj.") == std::string::npos; ++ quantize &= name.find("proj_out.") == std::string::npos; ++ quantize &= name.find("scale_shift_table") == std::string::npos; // last block too ++ } ++ if (model.arch == LLM_ARCH_HYVID) { ++ image_model = true; ++ quantize &= name.find("txt_in.") == std::string::npos; ++ quantize &= name.find("img_in.") == std::string::npos; ++ quantize &= name.find("time_in.") == std::string::npos; ++ quantize &= name.find("vector_in.") == std::string::npos; ++ quantize &= name.find("guidance_in.") == std::string::npos; ++ quantize &= name.find("final_layer.") == std::string::npos; ++ } ++ if (model.arch == LLM_ARCH_WAN) { ++ image_model = true; ++ quantize &= name.find("modulation.") == std::string::npos; ++ quantize &= name.find("patch_embedding.") == std::string::npos; ++ quantize &= name.find("text_embedding.") == std::string::npos; ++ quantize &= name.find("time_projection.") == std::string::npos; ++ quantize &= name.find("time_embedding.") == std::string::npos; ++ quantize &= name.find("img_emb.") == std::string::npos; ++ quantize &= name.find("head.") == std::string::npos; ++ } ++ if (model.arch == LLM_ARCH_HIDREAM) { ++ image_model = true; ++ quantize &= name.find("p_embedder.") == std::string::npos; ++ quantize &= name.find("t_embedder.") == std::string::npos; ++ quantize &= name.find("x_embedder.") == std::string::npos; ++ quantize &= name.find("final_layer.") == std::string::npos; ++ quantize &= name.find(".ff_i.gate.weight") == std::string::npos; ++ quantize &= name.find("caption_projection.") == std::string::npos; ++ } ++ if (model.arch == LLM_ARCH_COSMOS) { ++ image_model = true; ++ quantize &= name.find("p_embedder.") == std::string::npos; ++ quantize &= name.find("t_embedder.") == std::string::npos; ++ quantize &= name.find("t_embedding_norm.") == std::string::npos; ++ quantize &= name.find("x_embedder.") == std::string::npos; ++ quantize &= name.find("pos_embedder.") == std::string::npos; ++ quantize &= name.find("final_layer.") == std::string::npos; ++ } ++ if (model.arch == LLM_ARCH_LUMINA2) { ++ image_model = true; ++ quantize &= name.find("t_embedder.") == std::string::npos; ++ quantize &= name.find("x_embedder.") == std::string::npos; ++ quantize &= name.find("final_layer.") == std::string::npos; ++ quantize &= name.find("cap_embedder.") == std::string::npos; ++ quantize &= name.find("context_refiner.") == std::string::npos; ++ quantize &= name.find("noise_refiner.") == std::string::npos; ++ } ++ // ignore 3D/4D tensors for image models as the code was never meant to handle these ++ if (image_model) { ++ quantize &= ggml_n_dims(tensor) == 2; ++ } ++ + enum ggml_type new_type; + void * new_data; + size_t new_size; +@@ -18655,6 +18986,9 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s + new_type = default_type; + + // get more optimal quantization type based on the tensor shape, layer, etc. ++ if (image_model) { ++ new_type = img_tensor_get_type(qs, new_type, tensor, ftype); ++ } else { + if (!params->pure && ggml_is_quantized(default_type)) { + new_type = llama_tensor_get_type(qs, new_type, tensor, ftype); + } +@@ -18664,6 +18998,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s + if (params->output_tensor_type < GGML_TYPE_COUNT && strcmp(tensor->name, "output.weight") == 0) { + new_type = params->output_tensor_type; + } ++ } + + // If we've decided to quantize to the same type the tensor is already + // in then there's nothing to do. diff --git a/custom_nodes/ComfyUI-GGUF/tools/read_tensors.py b/custom_nodes/ComfyUI-GGUF/tools/read_tensors.py new file mode 100644 index 0000000000000000000000000000000000000000..1bdff028a787c09b38e5616ef75a2f070c672445 --- /dev/null +++ b/custom_nodes/ComfyUI-GGUF/tools/read_tensors.py @@ -0,0 +1,21 @@ +#!/usr/bin/python3 +import os +import sys +import gguf + +def read_tensors(path): + reader = gguf.GGUFReader(path) + for tensor in reader.tensors: + if tensor.tensor_type == gguf.GGMLQuantizationType.F32: + continue + print(f"{str(tensor.tensor_type):32}: {tensor.name}") + +try: + path = sys.argv[1] + assert os.path.isfile(path), "Invalid path" + print(f"input: {path}") +except Exception as e: + input(f"failed: {e}") +else: + read_tensors(path) + input() diff --git a/custom_nodes/comfyui-kjnodes/.DS_Store b/custom_nodes/comfyui-kjnodes/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..5008ddfcf53c02e82d7eee2e57c38e5672ef89f6 Binary files /dev/null and b/custom_nodes/comfyui-kjnodes/.DS_Store differ diff --git a/custom_nodes/comfyui-kjnodes/.github/FUNDING.yml b/custom_nodes/comfyui-kjnodes/.github/FUNDING.yml new file mode 100644 index 0000000000000000000000000000000000000000..f475f425424d9172d049b4c016ebe8817987149e --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/.github/FUNDING.yml @@ -0,0 +1,2 @@ +github: [kijai] +custom: ["https://www.paypal.me/kijaidesign"] diff --git a/custom_nodes/comfyui-kjnodes/.github/workflows/publish.yml b/custom_nodes/comfyui-kjnodes/.github/workflows/publish.yml new file mode 100644 index 0000000000000000000000000000000000000000..e155f5f40e46fa83942dee5a9460e8093f3b4208 --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/.github/workflows/publish.yml @@ -0,0 +1,25 @@ +name: Publish to Comfy registry +on: + workflow_dispatch: + push: + branches: + - main + paths: + - "pyproject.toml" + +permissions: + issues: write + +jobs: + publish-node: + name: Publish Custom Node to registry + runs-on: ubuntu-latest + if: ${{ github.repository_owner == 'kijai' }} + steps: + - name: Check out code + uses: actions/checkout@v4 + - name: Publish Custom Node + uses: Comfy-Org/publish-node-action@v1 + with: + ## Add your own personal access token to your Github Repository secrets and reference it here. + personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }} diff --git a/custom_nodes/comfyui-kjnodes/.gitignore b/custom_nodes/comfyui-kjnodes/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..d462a62fdd67858a783934170db4091ea95eb18f --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/.gitignore @@ -0,0 +1,11 @@ +__pycache__ +/venv +*.code-workspace +.history +.vscode +*.ckpt +*.pth +types +models +jsconfig.json +custom_dimensions.json diff --git a/custom_nodes/comfyui-kjnodes/.tracking b/custom_nodes/comfyui-kjnodes/.tracking new file mode 100644 index 0000000000000000000000000000000000000000..b99092643f12e91e7a785bfb3a24e47440cd6ac9 --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/.tracking @@ -0,0 +1,49 @@ +.github/FUNDING.yml +.github/workflows/publish.yml +.gitignore +LICENSE +README.md +__init__.py +custom_dimensions_example.json +docs/images/2024-04-03_20_49_29-ComfyUI.png +docs/images/319121566-05f66385-7568-4b1f-8bbc-11053660b02f.png +docs/images/319121636-706b5081-9120-4a29-bd76-901691ada688.png +example_workflows/leapfusion_hunyuuanvideo_i2v_native_testing.json +fonts/FreeMono.ttf +fonts/FreeMonoBoldOblique.otf +fonts/TTNorms-Black.otf +intrinsic_loras/intrinsic_lora_sd15_albedo.safetensors +intrinsic_loras/intrinsic_lora_sd15_depth.safetensors +intrinsic_loras/intrinsic_lora_sd15_normal.safetensors +intrinsic_loras/intrinsic_lora_sd15_shading.safetensors +intrinsic_loras/intrinsic_loras.txt +kjweb_async/marked.min.js +kjweb_async/protovis.min.js +kjweb_async/purify.min.js +kjweb_async/svg-path-properties.min.js +nodes/audioscheduler_nodes.py +nodes/batchcrop_nodes.py +nodes/curve_nodes.py +nodes/image_nodes.py +nodes/intrinsic_lora_nodes.py +nodes/lora_nodes.py +nodes/mask_nodes.py +nodes/model_optimization_nodes.py +nodes/nodes.py +pyproject.toml +requirements.txt +utility/fluid.py +utility/magictex.py +utility/numerical.py +utility/utility.py +web/green.png +web/js/appearance.js +web/js/browserstatus.js +web/js/contextmenu.js +web/js/fast_preview.js +web/js/help_popup.js +web/js/jsnodes.js +web/js/point_editor.js +web/js/setgetnodes.js +web/js/spline_editor.js +web/red.png \ No newline at end of file diff --git a/custom_nodes/comfyui-kjnodes/LICENSE b/custom_nodes/comfyui-kjnodes/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..f288702d2fa16d3cdf0035b15a9fcbc552cd88e7 --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/custom_nodes/comfyui-kjnodes/README.md b/custom_nodes/comfyui-kjnodes/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6371f5014823bc66ffd1f378fea8715a7dd590ff --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/README.md @@ -0,0 +1,65 @@ +# KJNodes for ComfyUI + +Various quality of life and masking related -nodes and scripts made by combining functionality of existing nodes for ComfyUI. + +I know I'm bad at documentation, especially this project that has grown from random practice nodes to... too many lines in one file. +I have however started to add descriptions to the nodes themselves, there's a small ? you can click for info what the node does. +This is still work in progress, like everything else. + +# Installation +1. Clone this repo into `custom_nodes` folder. +2. Install dependencies: `pip install -r requirements.txt` + or if you use the portable install, run this in ComfyUI_windows_portable -folder: + + `python_embeded\python.exe -m pip install -r ComfyUI\custom_nodes\ComfyUI-KJNodes\requirements.txt` + + +## Javascript + +### browserstatus.js +Sets the favicon to green circle when not processing anything, sets it to red when processing and shows progress percentage and the length of your queue. +Default off, needs to be enabled from options, overrides Custom-Scripts favicon when enabled. + +## Nodes: + +### Set/Get + +Javascript nodes to set and get constants to reduce unnecessary lines. Takes in and returns anything, purely visual nodes. +On the right click menu of these nodes there's now an options to visualize the paths, as well as option to jump to the corresponding node on the other end. + +**Known limitations**: + - Will not work with any node that dynamically sets it's outpute, such as reroute or other Set/Get node + - Will not work when directly connected to a bypassed node + - Other possible conflicts with javascript based nodes. + +### ColorToMask + +RBG color value to mask, works with batches and AnimateDiff. + +### ConditioningMultiCombine + +Combine any number of conditions, saves space. + +### ConditioningSetMaskAndCombine + +Mask and combine two sets of conditions, saves space. + +### GrowMaskWithBlur + +Grows or shrinks (with negative values) mask, option to invert input, returns mask and inverted mask. Additionally Blurs the mask, this is a slow operation especially with big batches. + +### RoundMask + +![image](https://github.com/kijai/ComfyUI-KJNodes/assets/40791699/52c85202-f74e-4b96-9dac-c8bda5ddcc40) + +### WidgetToString +Outputs the value of a widget on any node as a string +![example of use](docs/images/2024-04-03_20_49_29-ComfyUI.png) + +Enable node id display from Manager menu, to get the ID of the node you want to read a widget from: +![enable node id display](docs/images/319121636-706b5081-9120-4a29-bd76-901691ada688.png) + +Use the node id of the target node, and add the name of the widget to read from +![use node id and widget name](docs/images/319121566-05f66385-7568-4b1f-8bbc-11053660b02f.png) + +Recreating or reloading the target node will change its id, and the WidgetToString node will no longer be able to find it until you update the node id value with the new id. diff --git a/custom_nodes/comfyui-kjnodes/__init__.py b/custom_nodes/comfyui-kjnodes/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d3d1325ce6118a0a9468f46de1366da51d9f71b5 --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/__init__.py @@ -0,0 +1,245 @@ +from .nodes.nodes import * +from .nodes.curve_nodes import * +from .nodes.batchcrop_nodes import * +from .nodes.audioscheduler_nodes import * +from .nodes.image_nodes import * +from .nodes.intrinsic_lora_nodes import * +from .nodes.mask_nodes import * +from .nodes.model_optimization_nodes import * +from .nodes.lora_nodes import * +NODE_CONFIG = { + #constants + "BOOLConstant": {"class": BOOLConstant, "name": "BOOL Constant"}, + "INTConstant": {"class": INTConstant, "name": "INT Constant"}, + "FloatConstant": {"class": FloatConstant, "name": "Float Constant"}, + "StringConstant": {"class": StringConstant, "name": "String Constant"}, + "StringConstantMultiline": {"class": StringConstantMultiline, "name": "String Constant Multiline"}, + #conditioning + "ConditioningMultiCombine": {"class": ConditioningMultiCombine, "name": "Conditioning Multi Combine"}, + "ConditioningSetMaskAndCombine": {"class": ConditioningSetMaskAndCombine, "name": "ConditioningSetMaskAndCombine"}, + "ConditioningSetMaskAndCombine3": {"class": ConditioningSetMaskAndCombine3, "name": "ConditioningSetMaskAndCombine3"}, + "ConditioningSetMaskAndCombine4": {"class": ConditioningSetMaskAndCombine4, "name": "ConditioningSetMaskAndCombine4"}, + "ConditioningSetMaskAndCombine5": {"class": ConditioningSetMaskAndCombine5, "name": "ConditioningSetMaskAndCombine5"}, + "CondPassThrough": {"class": CondPassThrough}, + #masking + "DownloadAndLoadCLIPSeg": {"class": DownloadAndLoadCLIPSeg, "name": "(Down)load CLIPSeg"}, + "BatchCLIPSeg": {"class": BatchCLIPSeg, "name": "Batch CLIPSeg"}, + "ColorToMask": {"class": ColorToMask, "name": "Color To Mask"}, + "CreateGradientMask": {"class": CreateGradientMask, "name": "Create Gradient Mask"}, + "CreateTextMask": {"class": CreateTextMask, "name": "Create Text Mask"}, + "CreateAudioMask": {"class": CreateAudioMask, "name": "Create Audio Mask"}, + "CreateFadeMask": {"class": CreateFadeMask, "name": "Create Fade Mask"}, + "CreateFadeMaskAdvanced": {"class": CreateFadeMaskAdvanced, "name": "Create Fade Mask Advanced"}, + "CreateFluidMask": {"class": CreateFluidMask, "name": "Create Fluid Mask"}, + "CreateShapeMask": {"class": CreateShapeMask, "name": "Create Shape Mask"}, + "CreateVoronoiMask": {"class": CreateVoronoiMask, "name": "Create Voronoi Mask"}, + "CreateMagicMask": {"class": CreateMagicMask, "name": "Create Magic Mask"}, + "GetMaskSizeAndCount": {"class": GetMaskSizeAndCount, "name": "Get Mask Size & Count"}, + "GrowMaskWithBlur": {"class": GrowMaskWithBlur, "name": "Grow Mask With Blur"}, + "MaskBatchMulti": {"class": MaskBatchMulti, "name": "Mask Batch Multi"}, + "OffsetMask": {"class": OffsetMask, "name": "Offset Mask"}, + "RemapMaskRange": {"class": RemapMaskRange, "name": "Remap Mask Range"}, + "ResizeMask": {"class": ResizeMask, "name": "Resize Mask"}, + "RoundMask": {"class": RoundMask, "name": "Round Mask"}, + "SeparateMasks": {"class": SeparateMasks, "name": "Separate Masks"}, + #images + "AddLabel": {"class": AddLabel, "name": "Add Label"}, + "ColorMatch": {"class": ColorMatch, "name": "Color Match"}, + "ImageTensorList": {"class": ImageTensorList, "name": "Image Tensor List"}, + "CrossFadeImages": {"class": CrossFadeImages, "name": "Cross Fade Images"}, + "CrossFadeImagesMulti": {"class": CrossFadeImagesMulti, "name": "Cross Fade Images Multi"}, + "GetImagesFromBatchIndexed": {"class": GetImagesFromBatchIndexed, "name": "Get Images From Batch Indexed"}, + "GetImageRangeFromBatch": {"class": GetImageRangeFromBatch, "name": "Get Image or Mask Range From Batch"}, + "GetLatentRangeFromBatch": {"class": GetLatentRangeFromBatch, "name": "Get Latent Range From Batch"}, + "GetLatentSizeAndCount": {"class": GetLatentSizeAndCount, "name": "Get Latent Size & Count"}, + "GetImageSizeAndCount": {"class": GetImageSizeAndCount, "name": "Get Image Size & Count"}, + "FastPreview": {"class": FastPreview, "name": "Fast Preview"}, + "ImageBatchFilter": {"class": ImageBatchFilter, "name": "Image Batch Filter"}, + "ImageAndMaskPreview": {"class": ImageAndMaskPreview}, + "ImageAddMulti": {"class": ImageAddMulti, "name": "Image Add Multi"}, + "ImageBatchJoinWithTransition": {"class": ImageBatchJoinWithTransition, "name": "Image Batch Join With Transition"}, + "ImageBatchMulti": {"class": ImageBatchMulti, "name": "Image Batch Multi"}, + "ImageBatchRepeatInterleaving": {"class": ImageBatchRepeatInterleaving}, + "ImageBatchTestPattern": {"class": ImageBatchTestPattern, "name": "Image Batch Test Pattern"}, + "ImageConcanate": {"class": ImageConcanate, "name": "Image Concatenate"}, + "ImageConcatFromBatch": {"class": ImageConcatFromBatch, "name": "Image Concatenate From Batch"}, + "ImageConcatMulti": {"class": ImageConcatMulti, "name": "Image Concatenate Multi"}, + "ImageCropByMask": {"class": ImageCropByMask, "name": "Image Crop By Mask"}, + "ImageCropByMaskAndResize": {"class": ImageCropByMaskAndResize, "name": "Image Crop By Mask And Resize"}, + "ImageCropByMaskBatch": {"class": ImageCropByMaskBatch, "name": "Image Crop By Mask Batch"}, + "ImageUncropByMask": {"class": ImageUncropByMask, "name": "Image Uncrop By Mask"}, + "ImageGrabPIL": {"class": ImageGrabPIL, "name": "Image Grab PIL"}, + "ImageGridComposite2x2": {"class": ImageGridComposite2x2, "name": "Image Grid Composite 2x2"}, + "ImageGridComposite3x3": {"class": ImageGridComposite3x3, "name": "Image Grid Composite 3x3"}, + "ImageGridtoBatch": {"class": ImageGridtoBatch, "name": "Image Grid To Batch"}, + "ImageNoiseAugmentation": {"class": ImageNoiseAugmentation, "name": "Image Noise Augmentation"}, + "ImageNormalize_Neg1_To_1": {"class": ImageNormalize_Neg1_To_1, "name": "Image Normalize -1 to 1"}, + "ImagePass": {"class": ImagePass}, + "ImagePadKJ": {"class": ImagePadKJ, "name": "ImagePad KJ"}, + "ImagePadForOutpaintMasked": {"class": ImagePadForOutpaintMasked, "name": "Image Pad For Outpaint Masked"}, + "ImagePadForOutpaintTargetSize": {"class": ImagePadForOutpaintTargetSize, "name": "Image Pad For Outpaint Target Size"}, + "ImagePrepForICLora": {"class": ImagePrepForICLora, "name": "Image Prep For ICLora"}, + "ImageResizeKJ": {"class": ImageResizeKJ, "name": "Resize Image (deprecated)"}, + "ImageResizeKJv2": {"class": ImageResizeKJv2, "name": "Resize Image v2"}, + "ImageUpscaleWithModelBatched": {"class": ImageUpscaleWithModelBatched, "name": "Image Upscale With Model Batched"}, + "InsertImagesToBatchIndexed": {"class": InsertImagesToBatchIndexed, "name": "Insert Images To Batch Indexed"}, + "InsertLatentToIndexed": {"class": InsertLatentToIndex, "name": "Insert Latent To Index"}, + "LoadAndResizeImage": {"class": LoadAndResizeImage, "name": "Load & Resize Image"}, + "LoadImagesFromFolderKJ": {"class": LoadImagesFromFolderKJ, "name": "Load Images From Folder (KJ)"}, + "LoadVideosFromFolder": {"class": LoadVideosFromFolder, "name": "Load Videos From Folder"}, + "MergeImageChannels": {"class": MergeImageChannels, "name": "Merge Image Channels"}, + "PadImageBatchInterleaved": {"class": PadImageBatchInterleaved, "name": "Pad Image Batch Interleaved"}, + "PreviewAnimation": {"class": PreviewAnimation, "name": "Preview Animation"}, + "RemapImageRange": {"class": RemapImageRange, "name": "Remap Image Range"}, + "ReverseImageBatch": {"class": ReverseImageBatch, "name": "Reverse Image Batch"}, + "ReplaceImagesInBatch": {"class": ReplaceImagesInBatch, "name": "Replace Images In Batch"}, + "SaveImageWithAlpha": {"class": SaveImageWithAlpha, "name": "Save Image With Alpha"}, + "SaveImageKJ": {"class": SaveImageKJ, "name": "Save Image KJ"}, + "ShuffleImageBatch": {"class": ShuffleImageBatch, "name": "Shuffle Image Batch"}, + "SplitImageChannels": {"class": SplitImageChannels, "name": "Split Image Channels"}, + "TransitionImagesMulti": {"class": TransitionImagesMulti, "name": "Transition Images Multi"}, + "TransitionImagesInBatch": {"class": TransitionImagesInBatch, "name": "Transition Images In Batch"}, + #batch cropping + "BatchCropFromMask": {"class": BatchCropFromMask, "name": "Batch Crop From Mask"}, + "BatchCropFromMaskAdvanced": {"class": BatchCropFromMaskAdvanced, "name": "Batch Crop From Mask Advanced"}, + "FilterZeroMasksAndCorrespondingImages": {"class": FilterZeroMasksAndCorrespondingImages}, + "InsertImageBatchByIndexes": {"class": InsertImageBatchByIndexes, "name": "Insert Image Batch By Indexes"}, + "BatchUncrop": {"class": BatchUncrop, "name": "Batch Uncrop"}, + "BatchUncropAdvanced": {"class": BatchUncropAdvanced, "name": "Batch Uncrop Advanced"}, + "SplitBboxes": {"class": SplitBboxes, "name": "Split Bboxes"}, + "BboxToInt": {"class": BboxToInt, "name": "Bbox To Int"}, + "BboxVisualize": {"class": BboxVisualize, "name": "Bbox Visualize"}, + #noise + "GenerateNoise": {"class": GenerateNoise, "name": "Generate Noise"}, + "FlipSigmasAdjusted": {"class": FlipSigmasAdjusted, "name": "Flip Sigmas Adjusted"}, + "InjectNoiseToLatent": {"class": InjectNoiseToLatent, "name": "Inject Noise To Latent"}, + "CustomSigmas": {"class": CustomSigmas, "name": "Custom Sigmas"}, + #utility + "StringToFloatList": {"class": StringToFloatList, "name": "String to Float List"}, + "WidgetToString": {"class": WidgetToString, "name": "Widget To String"}, + "SaveStringKJ": {"class": SaveStringKJ, "name": "Save String KJ"}, + "DummyOut": {"class": DummyOut, "name": "Dummy Out"}, + "GetLatentsFromBatchIndexed": {"class": GetLatentsFromBatchIndexed, "name": "Get Latents From Batch Indexed"}, + "ScaleBatchPromptSchedule": {"class": ScaleBatchPromptSchedule, "name": "Scale Batch Prompt Schedule"}, + "CameraPoseVisualizer": {"class": CameraPoseVisualizer, "name": "Camera Pose Visualizer"}, + "AppendStringsToList": {"class": AppendStringsToList, "name": "Append Strings To List"}, + "JoinStrings": {"class": JoinStrings, "name": "Join Strings"}, + "JoinStringMulti": {"class": JoinStringMulti, "name": "Join String Multi"}, + "SomethingToString": {"class": SomethingToString, "name": "Something To String"}, + "Sleep": {"class": Sleep, "name": "Sleep"}, + "VRAM_Debug": {"class": VRAM_Debug, "name": "VRAM Debug"}, + "SomethingToString": {"class": SomethingToString, "name": "Something To String"}, + "EmptyLatentImagePresets": {"class": EmptyLatentImagePresets, "name": "Empty Latent Image Presets"}, + "EmptyLatentImageCustomPresets": {"class": EmptyLatentImageCustomPresets, "name": "Empty Latent Image Custom Presets"}, + "ModelPassThrough": {"class": ModelPassThrough, "name": "ModelPass"}, + "ModelSaveKJ": {"class": ModelSaveKJ, "name": "Model Save KJ"}, + "SetShakkerLabsUnionControlNetType": {"class": SetShakkerLabsUnionControlNetType, "name": "Set Shakker Labs Union ControlNet Type"}, + "StyleModelApplyAdvanced": {"class": StyleModelApplyAdvanced, "name": "Style Model Apply Advanced"}, + "DiffusionModelSelector": {"class": DiffusionModelSelector, "name": "Diffusion Model Selector"}, + "LazySwitchKJ": {"class": LazySwitchKJ, "name": "Lazy Switch KJ"}, + #audioscheduler stuff + "NormalizedAmplitudeToMask": {"class": NormalizedAmplitudeToMask}, + "NormalizedAmplitudeToFloatList": {"class": NormalizedAmplitudeToFloatList}, + "OffsetMaskByNormalizedAmplitude": {"class": OffsetMaskByNormalizedAmplitude}, + "ImageTransformByNormalizedAmplitude": {"class": ImageTransformByNormalizedAmplitude}, + "AudioConcatenate": {"class": AudioConcatenate}, + #curve nodes + "SplineEditor": {"class": SplineEditor, "name": "Spline Editor"}, + "CreateShapeImageOnPath": {"class": CreateShapeImageOnPath, "name": "Create Shape Image On Path"}, + "CreateShapeMaskOnPath": {"class": CreateShapeMaskOnPath, "name": "Create Shape Mask On Path"}, + "CreateTextOnPath": {"class": CreateTextOnPath, "name": "Create Text On Path"}, + "CreateGradientFromCoords": {"class": CreateGradientFromCoords, "name": "Create Gradient From Coords"}, + "CutAndDragOnPath": {"class": CutAndDragOnPath, "name": "Cut And Drag On Path"}, + "GradientToFloat": {"class": GradientToFloat, "name": "Gradient To Float"}, + "WeightScheduleExtend": {"class": WeightScheduleExtend, "name": "Weight Schedule Extend"}, + "MaskOrImageToWeight": {"class": MaskOrImageToWeight, "name": "Mask Or Image To Weight"}, + "WeightScheduleConvert": {"class": WeightScheduleConvert, "name": "Weight Schedule Convert"}, + "FloatToMask": {"class": FloatToMask, "name": "Float To Mask"}, + "FloatToSigmas": {"class": FloatToSigmas, "name": "Float To Sigmas"}, + "SigmasToFloat": {"class": SigmasToFloat, "name": "Sigmas To Float"}, + "PlotCoordinates": {"class": PlotCoordinates, "name": "Plot Coordinates"}, + "InterpolateCoords": {"class": InterpolateCoords, "name": "Interpolate Coords"}, + "PointsEditor": {"class": PointsEditor, "name": "Points Editor"}, + #experimental + "SoundReactive": {"class": SoundReactive, "name": "Sound Reactive"}, + "StableZero123_BatchSchedule": {"class": StableZero123_BatchSchedule, "name": "Stable Zero123 Batch Schedule"}, + "SV3D_BatchSchedule": {"class": SV3D_BatchSchedule, "name": "SV3D Batch Schedule"}, + "LoadResAdapterNormalization": {"class": LoadResAdapterNormalization}, + "Superprompt": {"class": Superprompt, "name": "Superprompt"}, + "GLIGENTextBoxApplyBatchCoords": {"class": GLIGENTextBoxApplyBatchCoords}, + "Intrinsic_lora_sampling": {"class": Intrinsic_lora_sampling, "name": "Intrinsic Lora Sampling"}, + "CheckpointPerturbWeights": {"class": CheckpointPerturbWeights, "name": "CheckpointPerturbWeights"}, + "Screencap_mss": {"class": Screencap_mss, "name": "Screencap mss"}, + "WebcamCaptureCV2": {"class": WebcamCaptureCV2, "name": "Webcam Capture CV2"}, + "DifferentialDiffusionAdvanced": {"class": DifferentialDiffusionAdvanced, "name": "Differential Diffusion Advanced"}, + "DiTBlockLoraLoader": {"class": DiTBlockLoraLoader, "name": "DiT Block Lora Loader"}, + "FluxBlockLoraSelect": {"class": FluxBlockLoraSelect, "name": "Flux Block Lora Select"}, + "HunyuanVideoBlockLoraSelect": {"class": HunyuanVideoBlockLoraSelect, "name": "Hunyuan Video Block Lora Select"}, + "Wan21BlockLoraSelect": {"class": Wan21BlockLoraSelect, "name": "Wan21 Block Lora Select"}, + "CustomControlNetWeightsFluxFromList": {"class": CustomControlNetWeightsFluxFromList, "name": "Custom ControlNet Weights Flux From List"}, + "CheckpointLoaderKJ": {"class": CheckpointLoaderKJ, "name": "CheckpointLoaderKJ"}, + "DiffusionModelLoaderKJ": {"class": DiffusionModelLoaderKJ, "name": "Diffusion Model Loader KJ"}, + "TorchCompileModelFluxAdvanced": {"class": TorchCompileModelFluxAdvanced, "name": "TorchCompileModelFluxAdvanced"}, + "TorchCompileModelFluxAdvancedV2": {"class": TorchCompileModelFluxAdvancedV2, "name": "TorchCompileModelFluxAdvancedV2"}, + "TorchCompileModelHyVideo": {"class": TorchCompileModelHyVideo, "name": "TorchCompileModelHyVideo"}, + "TorchCompileVAE": {"class": TorchCompileVAE, "name": "TorchCompileVAE"}, + "TorchCompileControlNet": {"class": TorchCompileControlNet, "name": "TorchCompileControlNet"}, + "PatchModelPatcherOrder": {"class": PatchModelPatcherOrder, "name": "Patch Model Patcher Order"}, + "TorchCompileLTXModel": {"class": TorchCompileLTXModel, "name": "TorchCompileLTXModel"}, + "TorchCompileCosmosModel": {"class": TorchCompileCosmosModel, "name": "TorchCompileCosmosModel"}, + "TorchCompileModelQwenImage": {"class": TorchCompileModelQwenImage, "name": "TorchCompileModelQwenImage"}, + "TorchCompileModelWanVideo": {"class": TorchCompileModelWanVideo, "name": "TorchCompileModelWanVideo"}, + "TorchCompileModelWanVideoV2": {"class": TorchCompileModelWanVideoV2, "name": "TorchCompileModelWanVideoV2"}, + "PathchSageAttentionKJ": {"class": PathchSageAttentionKJ, "name": "Patch Sage Attention KJ"}, + "LeapfusionHunyuanI2VPatcher": {"class": LeapfusionHunyuanI2V, "name": "Leapfusion Hunyuan I2V Patcher"}, + "VAELoaderKJ": {"class": VAELoaderKJ, "name": "VAELoader KJ"}, + "ScheduledCFGGuidance": {"class": ScheduledCFGGuidance, "name": "Scheduled CFG Guidance"}, + "ApplyRifleXRoPE_HunuyanVideo": {"class": ApplyRifleXRoPE_HunuyanVideo, "name": "Apply RifleXRoPE HunuyanVideo"}, + "ApplyRifleXRoPE_WanVideo": {"class": ApplyRifleXRoPE_WanVideo, "name": "Apply RifleXRoPE WanVideo"}, + "WanVideoTeaCacheKJ": {"class": WanVideoTeaCacheKJ, "name": "WanVideo Tea Cache (native)"}, + "WanVideoEnhanceAVideoKJ": {"class": WanVideoEnhanceAVideoKJ, "name": "WanVideo Enhance A Video (native)"}, + "SkipLayerGuidanceWanVideo": {"class": SkipLayerGuidanceWanVideo, "name": "Skip Layer Guidance WanVideo"}, + "TimerNodeKJ": {"class": TimerNodeKJ, "name": "Timer Node KJ"}, + "HunyuanVideoEncodeKeyframesToCond": {"class": HunyuanVideoEncodeKeyframesToCond, "name": "HunyuanVideo Encode Keyframes To Cond"}, + "CFGZeroStarAndInit": {"class": CFGZeroStarAndInit, "name": "CFG Zero Star/Init"}, + "ModelPatchTorchSettings": {"class": ModelPatchTorchSettings, "name": "Model Patch Torch Settings"}, + "WanVideoNAG": {"class": WanVideoNAG, "name": "WanVideoNAG"}, + + #instance diffusion + "CreateInstanceDiffusionTracking": {"class": CreateInstanceDiffusionTracking}, + "AppendInstanceDiffusionTracking": {"class": AppendInstanceDiffusionTracking}, + "DrawInstanceDiffusionTracking": {"class": DrawInstanceDiffusionTracking}, + + #lora + "LoraExtractKJ": {"class": LoraExtractKJ, "name": "LoraExtractKJ"}, +} + +def generate_node_mappings(node_config): + node_class_mappings = {} + node_display_name_mappings = {} + + for node_name, node_info in node_config.items(): + node_class_mappings[node_name] = node_info["class"] + node_display_name_mappings[node_name] = node_info.get("name", node_info["class"].__name__) + + return node_class_mappings, node_display_name_mappings + +NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS = generate_node_mappings(NODE_CONFIG) + +__all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS", "WEB_DIRECTORY"] + +WEB_DIRECTORY = "./web" + +from aiohttp import web +from server import PromptServer +from pathlib import Path + +if hasattr(PromptServer, "instance"): + try: + # NOTE: we add an extra static path to avoid comfy mechanism + # that loads every script in web. + PromptServer.instance.app.add_routes( + [web.static("/kjweb_async", (Path(__file__).parent.absolute() / "kjweb_async").as_posix())] + ) + except: + pass \ No newline at end of file diff --git a/custom_nodes/comfyui-kjnodes/custom_dimensions_example.json b/custom_nodes/comfyui-kjnodes/custom_dimensions_example.json new file mode 100644 index 0000000000000000000000000000000000000000..5c4814d377d44916a14b4d4a83b7cba72ae2958b --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/custom_dimensions_example.json @@ -0,0 +1,22 @@ +[ + { + "label": "SD", + "value": "512x512" + }, + { + "label": "HD", + "value": "768x768" + }, + { + "label": "Full HD", + "value": "1024x1024" + }, + { + "label": "4k", + "value": "2048x2048" + }, + { + "label": "SVD", + "value": "1024x576" + } +] diff --git a/custom_nodes/comfyui-kjnodes/docs/images/2024-04-03_20_49_29-ComfyUI.png b/custom_nodes/comfyui-kjnodes/docs/images/2024-04-03_20_49_29-ComfyUI.png new file mode 100644 index 0000000000000000000000000000000000000000..b42cfe56374c6db142326761a2a3d96211519664 --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/docs/images/2024-04-03_20_49_29-ComfyUI.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85805d3c7ca8f5d281886ea0ad61f9a78edad755ef8014b3870f91b871807ac9 +size 176158 diff --git a/custom_nodes/comfyui-kjnodes/docs/images/319121566-05f66385-7568-4b1f-8bbc-11053660b02f.png b/custom_nodes/comfyui-kjnodes/docs/images/319121566-05f66385-7568-4b1f-8bbc-11053660b02f.png new file mode 100644 index 0000000000000000000000000000000000000000..e749239c1c4ffd5ab29b51695dd8d8b51ed3597f Binary files /dev/null and b/custom_nodes/comfyui-kjnodes/docs/images/319121566-05f66385-7568-4b1f-8bbc-11053660b02f.png differ diff --git a/custom_nodes/comfyui-kjnodes/docs/images/319121636-706b5081-9120-4a29-bd76-901691ada688.png b/custom_nodes/comfyui-kjnodes/docs/images/319121636-706b5081-9120-4a29-bd76-901691ada688.png new file mode 100644 index 0000000000000000000000000000000000000000..b53ad666ff060d87971f3962e74101f0cb2a5c3f Binary files /dev/null and b/custom_nodes/comfyui-kjnodes/docs/images/319121636-706b5081-9120-4a29-bd76-901691ada688.png differ diff --git a/custom_nodes/comfyui-kjnodes/example_workflows/leapfusion_hunyuuanvideo_i2v_native_testing.json b/custom_nodes/comfyui-kjnodes/example_workflows/leapfusion_hunyuuanvideo_i2v_native_testing.json new file mode 100644 index 0000000000000000000000000000000000000000..134a83788815d04b5574a807db67eb6e45bf9263 --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/example_workflows/leapfusion_hunyuuanvideo_i2v_native_testing.json @@ -0,0 +1,1188 @@ +{ + "last_node_id": 86, + "last_link_id": 144, + "nodes": [ + { + "id": 62, + "type": "FluxGuidance", + "pos": [ + -630, + -170 + ], + "size": [ + 317.4000244140625, + 58 + ], + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "link": 82 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 83 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "FluxGuidance" + }, + "widgets_values": [ + 6 + ] + }, + { + "id": 51, + "type": "KSamplerSelect", + "pos": [ + -610, + -480 + ], + "size": [ + 315, + 58 + ], + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "SAMPLER", + "type": "SAMPLER", + "links": [ + 61 + ] + } + ], + "properties": { + "Node name for S&R": "KSamplerSelect" + }, + "widgets_values": [ + "euler" + ] + }, + { + "id": 57, + "type": "VAEDecodeTiled", + "pos": [ + -200, + 90 + ], + "size": [ + 315, + 150 + ], + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 142 + }, + { + "name": "vae", + "type": "VAE", + "link": 74 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 105 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecodeTiled" + }, + "widgets_values": [ + 128, + 64, + 64, + 8 + ] + }, + { + "id": 65, + "type": "LoadImage", + "pos": [ + -2212.498779296875, + -632.4085083007812 + ], + "size": [ + 315, + 314 + ], + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 86 + ], + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "Mona-Lisa-oil-wood-panel-Leonardo-da.webp", + "image" + ] + }, + { + "id": 64, + "type": "VAEEncode", + "pos": [ + -1336.7884521484375, + -492.5806884765625 + ], + "size": [ + 210, + 46 + ], + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "pixels", + "type": "IMAGE", + "link": 144 + }, + { + "name": "vae", + "type": "VAE", + "link": 88 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 137 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEEncode" + }, + "widgets_values": [] + }, + { + "id": 44, + "type": "UNETLoader", + "pos": [ + -2373.55029296875, + -193.91510009765625 + ], + "size": [ + 459.56060791015625, + 82 + ], + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 135 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "UNETLoader" + }, + "widgets_values": [ + "hyvideo\\hunyuan_video_720_fp8_e4m3fn.safetensors", + "fp8_e4m3fn_fast" + ] + }, + { + "id": 49, + "type": "VAELoader", + "pos": [ + -1876.39306640625, + -35.19633865356445 + ], + "size": [ + 433.7603454589844, + 58.71116256713867 + ], + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 74, + 88 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "hyvid\\hunyuan_video_vae_bf16.safetensors" + ] + }, + { + "id": 47, + "type": "DualCLIPLoader", + "pos": [ + -2284.893798828125, + 150.4042205810547 + ], + "size": [ + 343.3958435058594, + 106.86042785644531 + ], + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 56 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "DualCLIPLoader" + }, + "widgets_values": [ + "clip_l.safetensors", + "llava_llama3_fp16.safetensors", + "hunyuan_video", + "default" + ] + }, + { + "id": 45, + "type": "CLIPTextEncode", + "pos": [ + -1839.1649169921875, + 143.5203094482422 + ], + "size": [ + 400, + 200 + ], + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 56 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 69, + 82 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "woman puts on sunglasses" + ] + }, + { + "id": 53, + "type": "EmptyHunyuanLatentVideo", + "pos": [ + -1120, + 90 + ], + "size": [ + 315, + 130 + ], + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "width", + "type": "INT", + "link": 89, + "widget": { + "name": "width" + } + }, + { + "name": "height", + "type": "INT", + "link": 90, + "widget": { + "name": "height" + } + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 119 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyHunyuanLatentVideo" + }, + "widgets_values": [ + 960, + 544, + 65, + 1 + ] + }, + { + "id": 55, + "type": "ConditioningZeroOut", + "pos": [ + -910, + 300 + ], + "size": [ + 251.14309692382812, + 26 + ], + "flags": { + "collapsed": true + }, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "link": 69 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 70 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ConditioningZeroOut" + }, + "widgets_values": [] + }, + { + "id": 52, + "type": "BasicScheduler", + "pos": [ + -600, + -350 + ], + "size": [ + 315, + 106 + ], + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 78 + } + ], + "outputs": [ + { + "name": "SIGMAS", + "type": "SIGMAS", + "links": [ + 62 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "BasicScheduler" + }, + "widgets_values": [ + "simple", + 20, + 1 + ] + }, + { + "id": 42, + "type": "SamplerCustom", + "pos": [ + -640, + 10 + ], + "size": [ + 355.20001220703125, + 467.4666748046875 + ], + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 77 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 83 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 70 + }, + { + "name": "sampler", + "type": "SAMPLER", + "link": 61 + }, + { + "name": "sigmas", + "type": "SIGMAS", + "link": 62 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 119 + } + ], + "outputs": [ + { + "name": "output", + "type": "LATENT", + "links": null + }, + { + "name": "denoised_output", + "type": "LATENT", + "links": [ + 141 + ], + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "SamplerCustom" + }, + "widgets_values": [ + true, + 6, + "fixed", + 1, + null + ] + }, + { + "id": 84, + "type": "GetLatentRangeFromBatch", + "pos": [ + -240, + -100 + ], + "size": [ + 340.20001220703125, + 82 + ], + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "latents", + "type": "LATENT", + "link": 141 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 142 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "GetLatentRangeFromBatch" + }, + "widgets_values": [ + 1, + -1 + ] + }, + { + "id": 50, + "type": "VHS_VideoCombine", + "pos": [ + 165.77645874023438, + -619.0606079101562 + ], + "size": [ + 1112.6898193359375, + 1076.4598388671875 + ], + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 105 + }, + { + "name": "audio", + "type": "AUDIO", + "link": null, + "shape": 7 + }, + { + "name": "meta_batch", + "type": "VHS_BatchManager", + "link": null, + "shape": 7 + }, + { + "name": "vae", + "type": "VAE", + "link": null, + "shape": 7 + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null + } + ], + "properties": { + "Node name for S&R": "VHS_VideoCombine" + }, + "widgets_values": { + "frame_rate": 24, + "loop_count": 0, + "filename_prefix": "hyvidcomfy", + "format": "video/h264-mp4", + "pix_fmt": "yuv420p", + "crf": 19, + "save_metadata": true, + "trim_to_audio": false, + "pingpong": false, + "save_output": false, + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "hyvidcomfy_00001.mp4", + "subfolder": "", + "type": "temp", + "format": "video/h264-mp4", + "frame_rate": 24, + "workflow": "hyvidcomfy_00001.png", + "fullpath": "N:\\AI\\ComfyUI\\temp\\hyvidcomfy_00001.mp4" + }, + "muted": false + } + } + }, + { + "id": 54, + "type": "ModelSamplingSD3", + "pos": [ + -1079.9112548828125, + -146.69448852539062 + ], + "size": [ + 315, + 58 + ], + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 117 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 77, + 78 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ModelSamplingSD3" + }, + "widgets_values": [ + 9 + ] + }, + { + "id": 80, + "type": "PathchSageAttentionKJ", + "pos": [ + -2273.926513671875, + -36.720542907714844 + ], + "size": [ + 315, + 58 + ], + "flags": {}, + "order": 7, + "mode": 4, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 135 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 136 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "PathchSageAttentionKJ" + }, + "widgets_values": [ + "auto" + ] + }, + { + "id": 85, + "type": "Note", + "pos": [ + -1838.572265625, + -302.1575927734375 + ], + "size": [ + 408.4594421386719, + 58 + ], + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [], + "outputs": [], + "properties": {}, + "widgets_values": [ + "https://huggingface.co/Kijai/Leapfusion-image2vid-comfy/blob/main/leapfusion_img2vid544p_comfy.safetensors" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 74, + "type": "LeapfusionHunyuanI2VPatcher", + "pos": [ + -1059.552978515625, + -459.34674072265625 + ], + "size": [ + 277.3238525390625, + 150 + ], + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 123 + }, + { + "name": "latent", + "type": "LATENT", + "link": 137 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 117 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "LeapfusionHunyuanI2VPatcher" + }, + "widgets_values": [ + 0, + 0, + 1, + 0.8 + ] + }, + { + "id": 59, + "type": "LoraLoaderModelOnly", + "pos": [ + -1870.3748779296875, + -194.6091766357422 + ], + "size": [ + 442.8438720703125, + 82 + ], + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 136 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 123 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "LoraLoaderModelOnly" + }, + "widgets_values": [ + "hyvid\\musubi-tuner\\img2vid544p.safetensors", + 1 + ] + }, + { + "id": 66, + "type": "ImageResizeKJ", + "pos": [ + -1821.1531982421875, + -632.925048828125 + ], + "size": [ + 315, + 266 + ], + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 86 + }, + { + "name": "get_image_size", + "type": "IMAGE", + "link": null, + "shape": 7 + }, + { + "name": "width_input", + "type": "INT", + "link": null, + "widget": { + "name": "width_input" + }, + "shape": 7 + }, + { + "name": "height_input", + "type": "INT", + "link": null, + "widget": { + "name": "height_input" + }, + "shape": 7 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 143 + ], + "slot_index": 0 + }, + { + "name": "width", + "type": "INT", + "links": [ + 89 + ], + "slot_index": 1 + }, + { + "name": "height", + "type": "INT", + "links": [ + 90 + ], + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "ImageResizeKJ" + }, + "widgets_values": [ + 960, + 640, + "lanczos", + false, + 2, + 0, + 0, + "center" + ] + }, + { + "id": 86, + "type": "ImageNoiseAugmentation", + "pos": [ + -1361.111572265625, + -667.0104370117188 + ], + "size": [ + 315, + 106 + ], + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 143 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 144 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImageNoiseAugmentation" + }, + "widgets_values": [ + 0.05, + 123, + "fixed" + ] + } + ], + "links": [ + [ + 56, + 47, + 0, + 45, + 0, + "CLIP" + ], + [ + 61, + 51, + 0, + 42, + 3, + "SAMPLER" + ], + [ + 62, + 52, + 0, + 42, + 4, + "SIGMAS" + ], + [ + 69, + 45, + 0, + 55, + 0, + "CONDITIONING" + ], + [ + 70, + 55, + 0, + 42, + 2, + "CONDITIONING" + ], + [ + 74, + 49, + 0, + 57, + 1, + "VAE" + ], + [ + 77, + 54, + 0, + 42, + 0, + "MODEL" + ], + [ + 78, + 54, + 0, + 52, + 0, + "MODEL" + ], + [ + 82, + 45, + 0, + 62, + 0, + "CONDITIONING" + ], + [ + 83, + 62, + 0, + 42, + 1, + "CONDITIONING" + ], + [ + 86, + 65, + 0, + 66, + 0, + "IMAGE" + ], + [ + 88, + 49, + 0, + 64, + 1, + "VAE" + ], + [ + 89, + 66, + 1, + 53, + 0, + "INT" + ], + [ + 90, + 66, + 2, + 53, + 1, + "INT" + ], + [ + 105, + 57, + 0, + 50, + 0, + "IMAGE" + ], + [ + 117, + 74, + 0, + 54, + 0, + "MODEL" + ], + [ + 119, + 53, + 0, + 42, + 5, + "LATENT" + ], + [ + 123, + 59, + 0, + 74, + 0, + "MODEL" + ], + [ + 135, + 44, + 0, + 80, + 0, + "MODEL" + ], + [ + 136, + 80, + 0, + 59, + 0, + "MODEL" + ], + [ + 137, + 64, + 0, + 74, + 1, + "LATENT" + ], + [ + 141, + 42, + 1, + 84, + 0, + "LATENT" + ], + [ + 142, + 84, + 0, + 57, + 0, + "LATENT" + ], + [ + 143, + 66, + 0, + 86, + 0, + "IMAGE" + ], + [ + 144, + 86, + 0, + 64, + 0, + "IMAGE" + ] + ], + "groups": [], + "config": {}, + "extra": { + "ds": { + "scale": 0.740024994425854, + "offset": [ + 2525.036093151529, + 802.59123935694 + ] + }, + "node_versions": { + "comfy-core": "0.3.13", + "ComfyUI-KJNodes": "a8aeef670b3f288303f956bf94385cb87978ea93", + "ComfyUI-VideoHelperSuite": "c47b10ca1798b4925ff5a5f07d80c51ca80a837d" + }, + "VHS_latentpreview": true, + "VHS_latentpreviewrate": 0 + }, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/comfyui-kjnodes/fonts/FreeMono.ttf b/custom_nodes/comfyui-kjnodes/fonts/FreeMono.ttf new file mode 100644 index 0000000000000000000000000000000000000000..1e35d08261b9a61f87b6f1e7393d5c1221828ed1 --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/fonts/FreeMono.ttf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7c692ad545c308b7b8fc2db770760c4a5d15ca50f12addf58c8f5360370e831 +size 343980 diff --git a/custom_nodes/comfyui-kjnodes/fonts/FreeMonoBoldOblique.otf b/custom_nodes/comfyui-kjnodes/fonts/FreeMonoBoldOblique.otf new file mode 100644 index 0000000000000000000000000000000000000000..3aafa30ff9d9f31df006993f3c3d5c2eb5953f4f --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/fonts/FreeMonoBoldOblique.otf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:96187651ee033d0d9791dc2beeebfba5d1f070ab410fce1a5c16483ca249c588 +size 237600 diff --git a/custom_nodes/comfyui-kjnodes/fonts/TTNorms-Black.otf b/custom_nodes/comfyui-kjnodes/fonts/TTNorms-Black.otf new file mode 100644 index 0000000000000000000000000000000000000000..2cd91a4fc9637af834c5e2793ab6487c5067063d --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/fonts/TTNorms-Black.otf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:710977e683bf0db6416d6d41b427e0363c914e6c503a5291fcb330f30b8448ea +size 152736 diff --git a/custom_nodes/comfyui-kjnodes/intrinsic_loras/intrinsic_lora_sd15_albedo.safetensors b/custom_nodes/comfyui-kjnodes/intrinsic_loras/intrinsic_lora_sd15_albedo.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..3d84a8b75363549dff202eb2f2353e63d5245a04 --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/intrinsic_loras/intrinsic_lora_sd15_albedo.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d897f04ff2bb452e29a8f2a3c5c3cd5c55e95f314242cd645fbbe24a5ac59961 +size 6416109 diff --git a/custom_nodes/comfyui-kjnodes/intrinsic_loras/intrinsic_lora_sd15_depth.safetensors b/custom_nodes/comfyui-kjnodes/intrinsic_loras/intrinsic_lora_sd15_depth.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..6048b84f9e5348240d84d1c0d24e96c9655032e2 --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/intrinsic_loras/intrinsic_lora_sd15_depth.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f199d6bf3180fe7271073c3769dcb764b40f35f41b30fcb183ae5bf4b6a9997f +size 6416109 diff --git a/custom_nodes/comfyui-kjnodes/intrinsic_loras/intrinsic_lora_sd15_normal.safetensors b/custom_nodes/comfyui-kjnodes/intrinsic_loras/intrinsic_lora_sd15_normal.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..506b1dd0a3b9a07c423f6cda497fa6a196014c18 --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/intrinsic_loras/intrinsic_lora_sd15_normal.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02934db0a0b92a9cdda402e42548560beda7d31b268e561dbc6815551e876268 +size 6416109 diff --git a/custom_nodes/comfyui-kjnodes/intrinsic_loras/intrinsic_lora_sd15_shading.safetensors b/custom_nodes/comfyui-kjnodes/intrinsic_loras/intrinsic_lora_sd15_shading.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..5b8bbfcf7926ac3ecefe84229ca6de2fc1b523eb --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/intrinsic_loras/intrinsic_lora_sd15_shading.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:635e998063a10211633edd3e4b1676201822cd67f790ec71dba5f32d8b625c8b +size 6416109 diff --git a/custom_nodes/comfyui-kjnodes/intrinsic_loras/intrinsic_loras.txt b/custom_nodes/comfyui-kjnodes/intrinsic_loras/intrinsic_loras.txt new file mode 100644 index 0000000000000000000000000000000000000000..62ee933763a8aa9e1b232d228717ac754ab22751 --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/intrinsic_loras/intrinsic_loras.txt @@ -0,0 +1,4 @@ +source for the loras: +https://github.com/duxiaodan/intrinsic-lora + +Renamed and conveted to .safetensors \ No newline at end of file diff --git a/custom_nodes/comfyui-kjnodes/kjweb_async/marked.min.js b/custom_nodes/comfyui-kjnodes/kjweb_async/marked.min.js new file mode 100644 index 0000000000000000000000000000000000000000..2e66c369c388c135cc68d399861a737f4c5e68cd --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/kjweb_async/marked.min.js @@ -0,0 +1,6 @@ +/** + * marked v12.0.1 - a markdown parser + * Copyright (c) 2011-2024, Christopher Jeffrey. (MIT Licensed) + * https://github.com/markedjs/marked + */ +!function(e,t){"object"==typeof exports&&"undefined"!=typeof module?t(exports):"function"==typeof define&&define.amd?define(["exports"],t):t((e="undefined"!=typeof globalThis?globalThis:e||self).marked={})}(this,(function(e){"use strict";function t(){return{async:!1,breaks:!1,extensions:null,gfm:!0,hooks:null,pedantic:!1,renderer:null,silent:!1,tokenizer:null,walkTokens:null}}function n(t){e.defaults=t}e.defaults={async:!1,breaks:!1,extensions:null,gfm:!0,hooks:null,pedantic:!1,renderer:null,silent:!1,tokenizer:null,walkTokens:null};const s=/[&<>"']/,r=new RegExp(s.source,"g"),i=/[<>"']|&(?!(#\d{1,7}|#[Xx][a-fA-F0-9]{1,6}|\w+);)/,l=new RegExp(i.source,"g"),o={"&":"&","<":"<",">":">",'"':""","'":"'"},a=e=>o[e];function c(e,t){if(t){if(s.test(e))return e.replace(r,a)}else if(i.test(e))return e.replace(l,a);return e}const h=/&(#(?:\d+)|(?:#x[0-9A-Fa-f]+)|(?:\w+));?/gi;function p(e){return e.replace(h,((e,t)=>"colon"===(t=t.toLowerCase())?":":"#"===t.charAt(0)?"x"===t.charAt(1)?String.fromCharCode(parseInt(t.substring(2),16)):String.fromCharCode(+t.substring(1)):""))}const u=/(^|[^\[])\^/g;function k(e,t){let n="string"==typeof e?e:e.source;t=t||"";const s={replace:(e,t)=>{let r="string"==typeof t?t:t.source;return r=r.replace(u,"$1"),n=n.replace(e,r),s},getRegex:()=>new RegExp(n,t)};return s}function g(e){try{e=encodeURI(e).replace(/%25/g,"%")}catch(e){return null}return e}const f={exec:()=>null};function d(e,t){const n=e.replace(/\|/g,((e,t,n)=>{let s=!1,r=t;for(;--r>=0&&"\\"===n[r];)s=!s;return s?"|":" |"})).split(/ \|/);let s=0;if(n[0].trim()||n.shift(),n.length>0&&!n[n.length-1].trim()&&n.pop(),t)if(n.length>t)n.splice(t);else for(;n.length0)return{type:"space",raw:t[0]}}code(e){const t=this.rules.block.code.exec(e);if(t){const e=t[0].replace(/^ {1,4}/gm,"");return{type:"code",raw:t[0],codeBlockStyle:"indented",text:this.options.pedantic?e:x(e,"\n")}}}fences(e){const t=this.rules.block.fences.exec(e);if(t){const e=t[0],n=function(e,t){const n=e.match(/^(\s+)(?:```)/);if(null===n)return t;const s=n[1];return t.split("\n").map((e=>{const t=e.match(/^\s+/);if(null===t)return e;const[n]=t;return n.length>=s.length?e.slice(s.length):e})).join("\n")}(e,t[3]||"");return{type:"code",raw:e,lang:t[2]?t[2].trim().replace(this.rules.inline.anyPunctuation,"$1"):t[2],text:n}}}heading(e){const t=this.rules.block.heading.exec(e);if(t){let e=t[2].trim();if(/#$/.test(e)){const t=x(e,"#");this.options.pedantic?e=t.trim():t&&!/ $/.test(t)||(e=t.trim())}return{type:"heading",raw:t[0],depth:t[1].length,text:e,tokens:this.lexer.inline(e)}}}hr(e){const t=this.rules.block.hr.exec(e);if(t)return{type:"hr",raw:t[0]}}blockquote(e){const t=this.rules.block.blockquote.exec(e);if(t){const e=x(t[0].replace(/^ *>[ \t]?/gm,""),"\n"),n=this.lexer.state.top;this.lexer.state.top=!0;const s=this.lexer.blockTokens(e);return this.lexer.state.top=n,{type:"blockquote",raw:t[0],tokens:s,text:e}}}list(e){let t=this.rules.block.list.exec(e);if(t){let n=t[1].trim();const s=n.length>1,r={type:"list",raw:"",ordered:s,start:s?+n.slice(0,-1):"",loose:!1,items:[]};n=s?`\\d{1,9}\\${n.slice(-1)}`:`\\${n}`,this.options.pedantic&&(n=s?n:"[*+-]");const i=new RegExp(`^( {0,3}${n})((?:[\t ][^\\n]*)?(?:\\n|$))`);let l="",o="",a=!1;for(;e;){let n=!1;if(!(t=i.exec(e)))break;if(this.rules.block.hr.test(e))break;l=t[0],e=e.substring(l.length);let s=t[2].split("\n",1)[0].replace(/^\t+/,(e=>" ".repeat(3*e.length))),c=e.split("\n",1)[0],h=0;this.options.pedantic?(h=2,o=s.trimStart()):(h=t[2].search(/[^ ]/),h=h>4?1:h,o=s.slice(h),h+=t[1].length);let p=!1;if(!s&&/^ *$/.test(c)&&(l+=c+"\n",e=e.substring(c.length+1),n=!0),!n){const t=new RegExp(`^ {0,${Math.min(3,h-1)}}(?:[*+-]|\\d{1,9}[.)])((?:[ \t][^\\n]*)?(?:\\n|$))`),n=new RegExp(`^ {0,${Math.min(3,h-1)}}((?:- *){3,}|(?:_ *){3,}|(?:\\* *){3,})(?:\\n+|$)`),r=new RegExp(`^ {0,${Math.min(3,h-1)}}(?:\`\`\`|~~~)`),i=new RegExp(`^ {0,${Math.min(3,h-1)}}#`);for(;e;){const a=e.split("\n",1)[0];if(c=a,this.options.pedantic&&(c=c.replace(/^ {1,4}(?=( {4})*[^ ])/g," ")),r.test(c))break;if(i.test(c))break;if(t.test(c))break;if(n.test(e))break;if(c.search(/[^ ]/)>=h||!c.trim())o+="\n"+c.slice(h);else{if(p)break;if(s.search(/[^ ]/)>=4)break;if(r.test(s))break;if(i.test(s))break;if(n.test(s))break;o+="\n"+c}p||c.trim()||(p=!0),l+=a+"\n",e=e.substring(a.length+1),s=c.slice(h)}}r.loose||(a?r.loose=!0:/\n *\n *$/.test(l)&&(a=!0));let u,k=null;this.options.gfm&&(k=/^\[[ xX]\] /.exec(o),k&&(u="[ ] "!==k[0],o=o.replace(/^\[[ xX]\] +/,""))),r.items.push({type:"list_item",raw:l,task:!!k,checked:u,loose:!1,text:o,tokens:[]}),r.raw+=l}r.items[r.items.length-1].raw=l.trimEnd(),r.items[r.items.length-1].text=o.trimEnd(),r.raw=r.raw.trimEnd();for(let e=0;e"space"===e.type)),n=t.length>0&&t.some((e=>/\n.*\n/.test(e.raw)));r.loose=n}if(r.loose)for(let e=0;e$/,"$1").replace(this.rules.inline.anyPunctuation,"$1"):"",s=t[3]?t[3].substring(1,t[3].length-1).replace(this.rules.inline.anyPunctuation,"$1"):t[3];return{type:"def",tag:e,raw:t[0],href:n,title:s}}}table(e){const t=this.rules.block.table.exec(e);if(!t)return;if(!/[:|]/.test(t[2]))return;const n=d(t[1]),s=t[2].replace(/^\||\| *$/g,"").split("|"),r=t[3]&&t[3].trim()?t[3].replace(/\n[ \t]*$/,"").split("\n"):[],i={type:"table",raw:t[0],header:[],align:[],rows:[]};if(n.length===s.length){for(const e of s)/^ *-+: *$/.test(e)?i.align.push("right"):/^ *:-+: *$/.test(e)?i.align.push("center"):/^ *:-+ *$/.test(e)?i.align.push("left"):i.align.push(null);for(const e of n)i.header.push({text:e,tokens:this.lexer.inline(e)});for(const e of r)i.rows.push(d(e,i.header.length).map((e=>({text:e,tokens:this.lexer.inline(e)}))));return i}}lheading(e){const t=this.rules.block.lheading.exec(e);if(t)return{type:"heading",raw:t[0],depth:"="===t[2].charAt(0)?1:2,text:t[1],tokens:this.lexer.inline(t[1])}}paragraph(e){const t=this.rules.block.paragraph.exec(e);if(t){const e="\n"===t[1].charAt(t[1].length-1)?t[1].slice(0,-1):t[1];return{type:"paragraph",raw:t[0],text:e,tokens:this.lexer.inline(e)}}}text(e){const t=this.rules.block.text.exec(e);if(t)return{type:"text",raw:t[0],text:t[0],tokens:this.lexer.inline(t[0])}}escape(e){const t=this.rules.inline.escape.exec(e);if(t)return{type:"escape",raw:t[0],text:c(t[1])}}tag(e){const t=this.rules.inline.tag.exec(e);if(t)return!this.lexer.state.inLink&&/^/i.test(t[0])&&(this.lexer.state.inLink=!1),!this.lexer.state.inRawBlock&&/^<(pre|code|kbd|script)(\s|>)/i.test(t[0])?this.lexer.state.inRawBlock=!0:this.lexer.state.inRawBlock&&/^<\/(pre|code|kbd|script)(\s|>)/i.test(t[0])&&(this.lexer.state.inRawBlock=!1),{type:"html",raw:t[0],inLink:this.lexer.state.inLink,inRawBlock:this.lexer.state.inRawBlock,block:!1,text:t[0]}}link(e){const t=this.rules.inline.link.exec(e);if(t){const e=t[2].trim();if(!this.options.pedantic&&/^$/.test(e))return;const t=x(e.slice(0,-1),"\\");if((e.length-t.length)%2==0)return}else{const e=function(e,t){if(-1===e.indexOf(t[1]))return-1;let n=0;for(let s=0;s-1){const n=(0===t[0].indexOf("!")?5:4)+t[1].length+e;t[2]=t[2].substring(0,e),t[0]=t[0].substring(0,n).trim(),t[3]=""}}let n=t[2],s="";if(this.options.pedantic){const e=/^([^'"]*[^\s])\s+(['"])(.*)\2/.exec(n);e&&(n=e[1],s=e[3])}else s=t[3]?t[3].slice(1,-1):"";return n=n.trim(),/^$/.test(e)?n.slice(1):n.slice(1,-1)),b(t,{href:n?n.replace(this.rules.inline.anyPunctuation,"$1"):n,title:s?s.replace(this.rules.inline.anyPunctuation,"$1"):s},t[0],this.lexer)}}reflink(e,t){let n;if((n=this.rules.inline.reflink.exec(e))||(n=this.rules.inline.nolink.exec(e))){const e=t[(n[2]||n[1]).replace(/\s+/g," ").toLowerCase()];if(!e){const e=n[0].charAt(0);return{type:"text",raw:e,text:e}}return b(n,e,n[0],this.lexer)}}emStrong(e,t,n=""){let s=this.rules.inline.emStrongLDelim.exec(e);if(!s)return;if(s[3]&&n.match(/[\p{L}\p{N}]/u))return;if(!(s[1]||s[2]||"")||!n||this.rules.inline.punctuation.exec(n)){const n=[...s[0]].length-1;let r,i,l=n,o=0;const a="*"===s[0][0]?this.rules.inline.emStrongRDelimAst:this.rules.inline.emStrongRDelimUnd;for(a.lastIndex=0,t=t.slice(-1*e.length+n);null!=(s=a.exec(t));){if(r=s[1]||s[2]||s[3]||s[4]||s[5]||s[6],!r)continue;if(i=[...r].length,s[3]||s[4]){l+=i;continue}if((s[5]||s[6])&&n%3&&!((n+i)%3)){o+=i;continue}if(l-=i,l>0)continue;i=Math.min(i,i+l+o);const t=[...s[0]][0].length,a=e.slice(0,n+s.index+t+i);if(Math.min(n,i)%2){const e=a.slice(1,-1);return{type:"em",raw:a,text:e,tokens:this.lexer.inlineTokens(e)}}const c=a.slice(2,-2);return{type:"strong",raw:a,text:c,tokens:this.lexer.inlineTokens(c)}}}}codespan(e){const t=this.rules.inline.code.exec(e);if(t){let e=t[2].replace(/\n/g," ");const n=/[^ ]/.test(e),s=/^ /.test(e)&&/ $/.test(e);return n&&s&&(e=e.substring(1,e.length-1)),e=c(e,!0),{type:"codespan",raw:t[0],text:e}}}br(e){const t=this.rules.inline.br.exec(e);if(t)return{type:"br",raw:t[0]}}del(e){const t=this.rules.inline.del.exec(e);if(t)return{type:"del",raw:t[0],text:t[2],tokens:this.lexer.inlineTokens(t[2])}}autolink(e){const t=this.rules.inline.autolink.exec(e);if(t){let e,n;return"@"===t[2]?(e=c(t[1]),n="mailto:"+e):(e=c(t[1]),n=e),{type:"link",raw:t[0],text:e,href:n,tokens:[{type:"text",raw:e,text:e}]}}}url(e){let t;if(t=this.rules.inline.url.exec(e)){let e,n;if("@"===t[2])e=c(t[0]),n="mailto:"+e;else{let s;do{s=t[0],t[0]=this.rules.inline._backpedal.exec(t[0])?.[0]??""}while(s!==t[0]);e=c(t[0]),n="www."===t[1]?"http://"+t[0]:t[0]}return{type:"link",raw:t[0],text:e,href:n,tokens:[{type:"text",raw:e,text:e}]}}}inlineText(e){const t=this.rules.inline.text.exec(e);if(t){let e;return e=this.lexer.state.inRawBlock?t[0]:c(t[0]),{type:"text",raw:t[0],text:e}}}}const m=/^ {0,3}((?:-[\t ]*){3,}|(?:_[ \t]*){3,}|(?:\*[ \t]*){3,})(?:\n+|$)/,y=/(?:[*+-]|\d{1,9}[.)])/,$=k(/^(?!bull |blockCode|fences|blockquote|heading|html)((?:.|\n(?!\s*?\n|bull |blockCode|fences|blockquote|heading|html))+?)\n {0,3}(=+|-+) *(?:\n+|$)/).replace(/bull/g,y).replace(/blockCode/g,/ {4}/).replace(/fences/g,/ {0,3}(?:`{3,}|~{3,})/).replace(/blockquote/g,/ {0,3}>/).replace(/heading/g,/ {0,3}#{1,6}/).replace(/html/g,/ {0,3}<[^\n>]+>\n/).getRegex(),z=/^([^\n]+(?:\n(?!hr|heading|lheading|blockquote|fences|list|html|table| +\n)[^\n]+)*)/,T=/(?!\s*\])(?:\\.|[^\[\]\\])+/,R=k(/^ {0,3}\[(label)\]: *(?:\n *)?([^<\s][^\s]*|<.*?>)(?:(?: +(?:\n *)?| *\n *)(title))? *(?:\n+|$)/).replace("label",T).replace("title",/(?:"(?:\\"?|[^"\\])*"|'[^'\n]*(?:\n[^'\n]+)*\n?'|\([^()]*\))/).getRegex(),_=k(/^( {0,3}bull)([ \t][^\n]+?)?(?:\n|$)/).replace(/bull/g,y).getRegex(),A="address|article|aside|base|basefont|blockquote|body|caption|center|col|colgroup|dd|details|dialog|dir|div|dl|dt|fieldset|figcaption|figure|footer|form|frame|frameset|h[1-6]|head|header|hr|html|iframe|legend|li|link|main|menu|menuitem|meta|nav|noframes|ol|optgroup|option|p|param|search|section|summary|table|tbody|td|tfoot|th|thead|title|tr|track|ul",S=/|$))/,I=k("^ {0,3}(?:<(script|pre|style|textarea)[\\s>][\\s\\S]*?(?:[^\\n]*\\n+|$)|comment[^\\n]*(\\n+|$)|<\\?[\\s\\S]*?(?:\\?>\\n*|$)|\\n*|$)|\\n*|$)|)[\\s\\S]*?(?:(?:\\n *)+\\n|$)|<(?!script|pre|style|textarea)([a-z][\\w-]*)(?:attribute)*? */?>(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:(?:\\n *)+\\n|$)|(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:(?:\\n *)+\\n|$))","i").replace("comment",S).replace("tag",A).replace("attribute",/ +[a-zA-Z:_][\w.:-]*(?: *= *"[^"\n]*"| *= *'[^'\n]*'| *= *[^\s"'=<>`]+)?/).getRegex(),E=k(z).replace("hr",m).replace("heading"," {0,3}#{1,6}(?:\\s|$)").replace("|lheading","").replace("|table","").replace("blockquote"," {0,3}>").replace("fences"," {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n").replace("list"," {0,3}(?:[*+-]|1[.)]) ").replace("html",")|<(?:script|pre|style|textarea|!--)").replace("tag",A).getRegex(),q={blockquote:k(/^( {0,3}> ?(paragraph|[^\n]*)(?:\n|$))+/).replace("paragraph",E).getRegex(),code:/^( {4}[^\n]+(?:\n(?: *(?:\n|$))*)?)+/,def:R,fences:/^ {0,3}(`{3,}(?=[^`\n]*(?:\n|$))|~{3,})([^\n]*)(?:\n|$)(?:|([\s\S]*?)(?:\n|$))(?: {0,3}\1[~`]* *(?=\n|$)|$)/,heading:/^ {0,3}(#{1,6})(?=\s|$)(.*)(?:\n+|$)/,hr:m,html:I,lheading:$,list:_,newline:/^(?: *(?:\n|$))+/,paragraph:E,table:f,text:/^[^\n]+/},Z=k("^ *([^\\n ].*)\\n {0,3}((?:\\| *)?:?-+:? *(?:\\| *:?-+:? *)*(?:\\| *)?)(?:\\n((?:(?! *\\n|hr|heading|blockquote|code|fences|list|html).*(?:\\n|$))*)\\n*|$)").replace("hr",m).replace("heading"," {0,3}#{1,6}(?:\\s|$)").replace("blockquote"," {0,3}>").replace("code"," {4}[^\\n]").replace("fences"," {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n").replace("list"," {0,3}(?:[*+-]|1[.)]) ").replace("html",")|<(?:script|pre|style|textarea|!--)").replace("tag",A).getRegex(),L={...q,table:Z,paragraph:k(z).replace("hr",m).replace("heading"," {0,3}#{1,6}(?:\\s|$)").replace("|lheading","").replace("table",Z).replace("blockquote"," {0,3}>").replace("fences"," {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n").replace("list"," {0,3}(?:[*+-]|1[.)]) ").replace("html",")|<(?:script|pre|style|textarea|!--)").replace("tag",A).getRegex()},P={...q,html:k("^ *(?:comment *(?:\\n|\\s*$)|<(tag)[\\s\\S]+? *(?:\\n{2,}|\\s*$)|\\s]*)*?/?> *(?:\\n{2,}|\\s*$))").replace("comment",S).replace(/tag/g,"(?!(?:a|em|strong|small|s|cite|q|dfn|abbr|data|time|code|var|samp|kbd|sub|sup|i|b|u|mark|ruby|rt|rp|bdi|bdo|span|br|wbr|ins|del|img)\\b)\\w+(?!:|[^\\w\\s@]*@)\\b").getRegex(),def:/^ *\[([^\]]+)\]: *]+)>?(?: +(["(][^\n]+[")]))? *(?:\n+|$)/,heading:/^(#{1,6})(.*)(?:\n+|$)/,fences:f,lheading:/^(.+?)\n {0,3}(=+|-+) *(?:\n+|$)/,paragraph:k(z).replace("hr",m).replace("heading"," *#{1,6} *[^\n]").replace("lheading",$).replace("|table","").replace("blockquote"," {0,3}>").replace("|fences","").replace("|list","").replace("|html","").replace("|tag","").getRegex()},Q=/^\\([!"#$%&'()*+,\-./:;<=>?@\[\]\\^_`{|}~])/,v=/^( {2,}|\\)\n(?!\s*$)/,B="\\p{P}\\p{S}",C=k(/^((?![*_])[\spunctuation])/,"u").replace(/punctuation/g,B).getRegex(),M=k(/^(?:\*+(?:((?!\*)[punct])|[^\s*]))|^_+(?:((?!_)[punct])|([^\s_]))/,"u").replace(/punct/g,B).getRegex(),O=k("^[^_*]*?__[^_*]*?\\*[^_*]*?(?=__)|[^*]+(?=[^*])|(?!\\*)[punct](\\*+)(?=[\\s]|$)|[^punct\\s](\\*+)(?!\\*)(?=[punct\\s]|$)|(?!\\*)[punct\\s](\\*+)(?=[^punct\\s])|[\\s](\\*+)(?!\\*)(?=[punct])|(?!\\*)[punct](\\*+)(?!\\*)(?=[punct])|[^punct\\s](\\*+)(?=[^punct\\s])","gu").replace(/punct/g,B).getRegex(),D=k("^[^_*]*?\\*\\*[^_*]*?_[^_*]*?(?=\\*\\*)|[^_]+(?=[^_])|(?!_)[punct](_+)(?=[\\s]|$)|[^punct\\s](_+)(?!_)(?=[punct\\s]|$)|(?!_)[punct\\s](_+)(?=[^punct\\s])|[\\s](_+)(?!_)(?=[punct])|(?!_)[punct](_+)(?!_)(?=[punct])","gu").replace(/punct/g,B).getRegex(),j=k(/\\([punct])/,"gu").replace(/punct/g,B).getRegex(),H=k(/^<(scheme:[^\s\x00-\x1f<>]*|email)>/).replace("scheme",/[a-zA-Z][a-zA-Z0-9+.-]{1,31}/).replace("email",/[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+(@)[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)+(?![-_])/).getRegex(),U=k(S).replace("(?:--\x3e|$)","--\x3e").getRegex(),X=k("^comment|^|^<[a-zA-Z][\\w-]*(?:attribute)*?\\s*/?>|^<\\?[\\s\\S]*?\\?>|^|^").replace("comment",U).replace("attribute",/\s+[a-zA-Z:_][\w.:-]*(?:\s*=\s*"[^"]*"|\s*=\s*'[^']*'|\s*=\s*[^\s"'=<>`]+)?/).getRegex(),F=/(?:\[(?:\\.|[^\[\]\\])*\]|\\.|`[^`]*`|[^\[\]\\`])*?/,N=k(/^!?\[(label)\]\(\s*(href)(?:\s+(title))?\s*\)/).replace("label",F).replace("href",/<(?:\\.|[^\n<>\\])+>|[^\s\x00-\x1f]*/).replace("title",/"(?:\\"?|[^"\\])*"|'(?:\\'?|[^'\\])*'|\((?:\\\)?|[^)\\])*\)/).getRegex(),G=k(/^!?\[(label)\]\[(ref)\]/).replace("label",F).replace("ref",T).getRegex(),J=k(/^!?\[(ref)\](?:\[\])?/).replace("ref",T).getRegex(),K={_backpedal:f,anyPunctuation:j,autolink:H,blockSkip:/\[[^[\]]*?\]\([^\(\)]*?\)|`[^`]*?`|<[^<>]*?>/g,br:v,code:/^(`+)([^`]|[^`][\s\S]*?[^`])\1(?!`)/,del:f,emStrongLDelim:M,emStrongRDelimAst:O,emStrongRDelimUnd:D,escape:Q,link:N,nolink:J,punctuation:C,reflink:G,reflinkSearch:k("reflink|nolink(?!\\()","g").replace("reflink",G).replace("nolink",J).getRegex(),tag:X,text:/^(`+|[^`])(?:(?= {2,}\n)|[\s\S]*?(?:(?=[\\t+" ".repeat(n.length)));e;)if(!(this.options.extensions&&this.options.extensions.block&&this.options.extensions.block.some((s=>!!(n=s.call({lexer:this},e,t))&&(e=e.substring(n.raw.length),t.push(n),!0)))))if(n=this.tokenizer.space(e))e=e.substring(n.raw.length),1===n.raw.length&&t.length>0?t[t.length-1].raw+="\n":t.push(n);else if(n=this.tokenizer.code(e))e=e.substring(n.raw.length),s=t[t.length-1],!s||"paragraph"!==s.type&&"text"!==s.type?t.push(n):(s.raw+="\n"+n.raw,s.text+="\n"+n.text,this.inlineQueue[this.inlineQueue.length-1].src=s.text);else if(n=this.tokenizer.fences(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.heading(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.hr(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.blockquote(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.list(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.html(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.def(e))e=e.substring(n.raw.length),s=t[t.length-1],!s||"paragraph"!==s.type&&"text"!==s.type?this.tokens.links[n.tag]||(this.tokens.links[n.tag]={href:n.href,title:n.title}):(s.raw+="\n"+n.raw,s.text+="\n"+n.raw,this.inlineQueue[this.inlineQueue.length-1].src=s.text);else if(n=this.tokenizer.table(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.lheading(e))e=e.substring(n.raw.length),t.push(n);else{if(r=e,this.options.extensions&&this.options.extensions.startBlock){let t=1/0;const n=e.slice(1);let s;this.options.extensions.startBlock.forEach((e=>{s=e.call({lexer:this},n),"number"==typeof s&&s>=0&&(t=Math.min(t,s))})),t<1/0&&t>=0&&(r=e.substring(0,t+1))}if(this.state.top&&(n=this.tokenizer.paragraph(r)))s=t[t.length-1],i&&"paragraph"===s.type?(s.raw+="\n"+n.raw,s.text+="\n"+n.text,this.inlineQueue.pop(),this.inlineQueue[this.inlineQueue.length-1].src=s.text):t.push(n),i=r.length!==e.length,e=e.substring(n.raw.length);else if(n=this.tokenizer.text(e))e=e.substring(n.raw.length),s=t[t.length-1],s&&"text"===s.type?(s.raw+="\n"+n.raw,s.text+="\n"+n.text,this.inlineQueue.pop(),this.inlineQueue[this.inlineQueue.length-1].src=s.text):t.push(n);else if(e){const t="Infinite loop on byte: "+e.charCodeAt(0);if(this.options.silent){console.error(t);break}throw new Error(t)}}return this.state.top=!0,t}inline(e,t=[]){return this.inlineQueue.push({src:e,tokens:t}),t}inlineTokens(e,t=[]){let n,s,r,i,l,o,a=e;if(this.tokens.links){const e=Object.keys(this.tokens.links);if(e.length>0)for(;null!=(i=this.tokenizer.rules.inline.reflinkSearch.exec(a));)e.includes(i[0].slice(i[0].lastIndexOf("[")+1,-1))&&(a=a.slice(0,i.index)+"["+"a".repeat(i[0].length-2)+"]"+a.slice(this.tokenizer.rules.inline.reflinkSearch.lastIndex))}for(;null!=(i=this.tokenizer.rules.inline.blockSkip.exec(a));)a=a.slice(0,i.index)+"["+"a".repeat(i[0].length-2)+"]"+a.slice(this.tokenizer.rules.inline.blockSkip.lastIndex);for(;null!=(i=this.tokenizer.rules.inline.anyPunctuation.exec(a));)a=a.slice(0,i.index)+"++"+a.slice(this.tokenizer.rules.inline.anyPunctuation.lastIndex);for(;e;)if(l||(o=""),l=!1,!(this.options.extensions&&this.options.extensions.inline&&this.options.extensions.inline.some((s=>!!(n=s.call({lexer:this},e,t))&&(e=e.substring(n.raw.length),t.push(n),!0)))))if(n=this.tokenizer.escape(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.tag(e))e=e.substring(n.raw.length),s=t[t.length-1],s&&"text"===n.type&&"text"===s.type?(s.raw+=n.raw,s.text+=n.text):t.push(n);else if(n=this.tokenizer.link(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.reflink(e,this.tokens.links))e=e.substring(n.raw.length),s=t[t.length-1],s&&"text"===n.type&&"text"===s.type?(s.raw+=n.raw,s.text+=n.text):t.push(n);else if(n=this.tokenizer.emStrong(e,a,o))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.codespan(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.br(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.del(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.autolink(e))e=e.substring(n.raw.length),t.push(n);else if(this.state.inLink||!(n=this.tokenizer.url(e))){if(r=e,this.options.extensions&&this.options.extensions.startInline){let t=1/0;const n=e.slice(1);let s;this.options.extensions.startInline.forEach((e=>{s=e.call({lexer:this},n),"number"==typeof s&&s>=0&&(t=Math.min(t,s))})),t<1/0&&t>=0&&(r=e.substring(0,t+1))}if(n=this.tokenizer.inlineText(r))e=e.substring(n.raw.length),"_"!==n.raw.slice(-1)&&(o=n.raw.slice(-1)),l=!0,s=t[t.length-1],s&&"text"===s.type?(s.raw+=n.raw,s.text+=n.text):t.push(n);else if(e){const t="Infinite loop on byte: "+e.charCodeAt(0);if(this.options.silent){console.error(t);break}throw new Error(t)}}else e=e.substring(n.raw.length),t.push(n);return t}}class se{options;constructor(t){this.options=t||e.defaults}code(e,t,n){const s=(t||"").match(/^\S*/)?.[0];return e=e.replace(/\n$/,"")+"\n",s?'
'+(n?e:c(e,!0))+"
\n":"
"+(n?e:c(e,!0))+"
\n"}blockquote(e){return`
\n${e}
\n`}html(e,t){return e}heading(e,t,n){return`${e}\n`}hr(){return"
\n"}list(e,t,n){const s=t?"ol":"ul";return"<"+s+(t&&1!==n?' start="'+n+'"':"")+">\n"+e+"\n"}listitem(e,t,n){return`
  • ${e}
  • \n`}checkbox(e){return"'}paragraph(e){return`

    ${e}

    \n`}table(e,t){return t&&(t=`${t}`),"\n\n"+e+"\n"+t+"
    \n"}tablerow(e){return`\n${e}\n`}tablecell(e,t){const n=t.header?"th":"td";return(t.align?`<${n} align="${t.align}">`:`<${n}>`)+e+`\n`}strong(e){return`${e}`}em(e){return`${e}`}codespan(e){return`${e}`}br(){return"
    "}del(e){return`${e}`}link(e,t,n){const s=g(e);if(null===s)return n;let r='
    ",r}image(e,t,n){const s=g(e);if(null===s)return n;let r=`${n}0&&"paragraph"===n.tokens[0].type?(n.tokens[0].text=e+" "+n.tokens[0].text,n.tokens[0].tokens&&n.tokens[0].tokens.length>0&&"text"===n.tokens[0].tokens[0].type&&(n.tokens[0].tokens[0].text=e+" "+n.tokens[0].tokens[0].text)):n.tokens.unshift({type:"text",text:e+" "}):o+=e+" "}o+=this.parse(n.tokens,i),l+=this.renderer.listitem(o,r,!!s)}n+=this.renderer.list(l,t,s);continue}case"html":{const e=r;n+=this.renderer.html(e.text,e.block);continue}case"paragraph":{const e=r;n+=this.renderer.paragraph(this.parseInline(e.tokens));continue}case"text":{let i=r,l=i.tokens?this.parseInline(i.tokens):i.text;for(;s+1{const r=e[s].flat(1/0);n=n.concat(this.walkTokens(r,t))})):e.tokens&&(n=n.concat(this.walkTokens(e.tokens,t)))}}return n}use(...e){const t=this.defaults.extensions||{renderers:{},childTokens:{}};return e.forEach((e=>{const n={...e};if(n.async=this.defaults.async||n.async||!1,e.extensions&&(e.extensions.forEach((e=>{if(!e.name)throw new Error("extension name required");if("renderer"in e){const n=t.renderers[e.name];t.renderers[e.name]=n?function(...t){let s=e.renderer.apply(this,t);return!1===s&&(s=n.apply(this,t)),s}:e.renderer}if("tokenizer"in e){if(!e.level||"block"!==e.level&&"inline"!==e.level)throw new Error("extension level must be 'block' or 'inline'");const n=t[e.level];n?n.unshift(e.tokenizer):t[e.level]=[e.tokenizer],e.start&&("block"===e.level?t.startBlock?t.startBlock.push(e.start):t.startBlock=[e.start]:"inline"===e.level&&(t.startInline?t.startInline.push(e.start):t.startInline=[e.start]))}"childTokens"in e&&e.childTokens&&(t.childTokens[e.name]=e.childTokens)})),n.extensions=t),e.renderer){const t=this.defaults.renderer||new se(this.defaults);for(const n in e.renderer){if(!(n in t))throw new Error(`renderer '${n}' does not exist`);if("options"===n)continue;const s=n,r=e.renderer[s],i=t[s];t[s]=(...e)=>{let n=r.apply(t,e);return!1===n&&(n=i.apply(t,e)),n||""}}n.renderer=t}if(e.tokenizer){const t=this.defaults.tokenizer||new w(this.defaults);for(const n in e.tokenizer){if(!(n in t))throw new Error(`tokenizer '${n}' does not exist`);if(["options","rules","lexer"].includes(n))continue;const s=n,r=e.tokenizer[s],i=t[s];t[s]=(...e)=>{let n=r.apply(t,e);return!1===n&&(n=i.apply(t,e)),n}}n.tokenizer=t}if(e.hooks){const t=this.defaults.hooks||new le;for(const n in e.hooks){if(!(n in t))throw new Error(`hook '${n}' does not exist`);if("options"===n)continue;const s=n,r=e.hooks[s],i=t[s];le.passThroughHooks.has(n)?t[s]=e=>{if(this.defaults.async)return Promise.resolve(r.call(t,e)).then((e=>i.call(t,e)));const n=r.call(t,e);return i.call(t,n)}:t[s]=(...e)=>{let n=r.apply(t,e);return!1===n&&(n=i.apply(t,e)),n}}n.hooks=t}if(e.walkTokens){const t=this.defaults.walkTokens,s=e.walkTokens;n.walkTokens=function(e){let n=[];return n.push(s.call(this,e)),t&&(n=n.concat(t.call(this,e))),n}}this.defaults={...this.defaults,...n}})),this}setOptions(e){return this.defaults={...this.defaults,...e},this}lexer(e,t){return ne.lex(e,t??this.defaults)}parser(e,t){return ie.parse(e,t??this.defaults)}#e(e,t){return(n,s)=>{const r={...s},i={...this.defaults,...r};!0===this.defaults.async&&!1===r.async&&(i.silent||console.warn("marked(): The async option was set to true by an extension. The async: false option sent to parse will be ignored."),i.async=!0);const l=this.#t(!!i.silent,!!i.async);if(null==n)return l(new Error("marked(): input parameter is undefined or null"));if("string"!=typeof n)return l(new Error("marked(): input parameter is of type "+Object.prototype.toString.call(n)+", string expected"));if(i.hooks&&(i.hooks.options=i),i.async)return Promise.resolve(i.hooks?i.hooks.preprocess(n):n).then((t=>e(t,i))).then((e=>i.hooks?i.hooks.processAllTokens(e):e)).then((e=>i.walkTokens?Promise.all(this.walkTokens(e,i.walkTokens)).then((()=>e)):e)).then((e=>t(e,i))).then((e=>i.hooks?i.hooks.postprocess(e):e)).catch(l);try{i.hooks&&(n=i.hooks.preprocess(n));let s=e(n,i);i.hooks&&(s=i.hooks.processAllTokens(s)),i.walkTokens&&this.walkTokens(s,i.walkTokens);let r=t(s,i);return i.hooks&&(r=i.hooks.postprocess(r)),r}catch(e){return l(e)}}}#t(e,t){return n=>{if(n.message+="\nPlease report this to https://github.com/markedjs/marked.",e){const e="

    An error occurred:

    "+c(n.message+"",!0)+"
    ";return t?Promise.resolve(e):e}if(t)return Promise.reject(n);throw n}}}const ae=new oe;function ce(e,t){return ae.parse(e,t)}ce.options=ce.setOptions=function(e){return ae.setOptions(e),ce.defaults=ae.defaults,n(ce.defaults),ce},ce.getDefaults=t,ce.defaults=e.defaults,ce.use=function(...e){return ae.use(...e),ce.defaults=ae.defaults,n(ce.defaults),ce},ce.walkTokens=function(e,t){return ae.walkTokens(e,t)},ce.parseInline=ae.parseInline,ce.Parser=ie,ce.parser=ie.parse,ce.Renderer=se,ce.TextRenderer=re,ce.Lexer=ne,ce.lexer=ne.lex,ce.Tokenizer=w,ce.Hooks=le,ce.parse=ce;const he=ce.options,pe=ce.setOptions,ue=ce.use,ke=ce.walkTokens,ge=ce.parseInline,fe=ce,de=ie.parse,xe=ne.lex;e.Hooks=le,e.Lexer=ne,e.Marked=oe,e.Parser=ie,e.Renderer=se,e.TextRenderer=re,e.Tokenizer=w,e.getDefaults=t,e.lexer=xe,e.marked=ce,e.options=he,e.parse=fe,e.parseInline=ge,e.parser=de,e.setOptions=pe,e.use=ue,e.walkTokens=ke})); diff --git a/custom_nodes/comfyui-kjnodes/kjweb_async/protovis.min.js b/custom_nodes/comfyui-kjnodes/kjweb_async/protovis.min.js new file mode 100644 index 0000000000000000000000000000000000000000..dfb84166521a49e4f7e41539933b101e126bd72f --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/kjweb_async/protovis.min.js @@ -0,0 +1,277 @@ +var a;if(!Array.prototype.map)Array.prototype.map=function(b,c){for(var d=this.length,f=new Array(d),g=0;g>>0,f=0;f=d)throw new Error("reduce: no values, no initial value");}for(;f=0&&d=69&&m<100?1900:0)});return"([0-9]+)";case "%Y":q.push(function(m){g=m});return"([0-9]+)";case "%%":q.push(function(){}); +return"%"}return p});(f=f.match(n))&&f.forEach(function(p,m){q[m](p)});return new Date(g,h,i,j,k,l)};return c}; +pv.Format.time=function(b){function c(f){f=Number(f);switch(b){case "short":if(f>=31536E6)return(f/31536E6).toFixed(1)+" years";else if(f>=6048E5)return(f/6048E5).toFixed(1)+" weeks";else if(f>=864E5)return(f/864E5).toFixed(1)+" days";else if(f>=36E5)return(f/36E5).toFixed(1)+" hours";else if(f>=6E4)return(f/6E4).toFixed(1)+" minutes";return(f/1E3).toFixed(1)+" seconds";case "long":var g=[],h=f%36E5/6E4>>0;g.push(d("0",2,f%6E4/1E3>>0));if(f>=36E5){var i=f%864E5/36E5>>0;g.push(d("0",2,h));if(f>=864E5){g.push(d("0", +2,i));g.push(Math.floor(f/864E5).toFixed())}else g.push(i.toFixed())}else g.push(h.toFixed());return g.reverse().join(":")}}var d=pv.Format.pad;c.format=c;c.parse=function(f){switch(b){case "short":for(var g=/([0-9,.]+)\s*([a-z]+)/g,h,i=0;h=g.exec(f);){var j=parseFloat(h[0].replace(",","")),k=0;switch(h[2].toLowerCase()){case "year":case "years":k=31536E6;break;case "week":case "weeks":k=6048E5;break;case "day":case "days":k=864E5;break;case "hour":case "hours":k=36E5;break;case "minute":case "minutes":k= +6E4;break;case "second":case "seconds":k=1E3;break}i+=j*k}return i;case "long":h=f.replace(",","").split(":").reverse();i=0;if(h.length)i+=parseFloat(h[0])*1E3;if(h.length>1)i+=parseFloat(h[1])*6E4;if(h.length>2)i+=parseFloat(h[2])*36E5;if(h.length>3)i+=parseFloat(h[3])*864E5;return i}};return c}; +pv.Format.number=function(){function b(r){if(Infinity>h)r=Math.round(r*i)/i;var s=String(Math.abs(r)).split("."),t=s[0];if(t.length>d)t=t.substring(t.length-d);if(l&&t.length3)t=t.replace(/\B(?=(?:\d{3})+(?!\d))/g,n);if(!l&&t.lengthd)s=s.substring(s.length-d);r=r[1]?Number("0."+r[1]):0;if(Infinity>h)r=Math.round(r*i)/i;return Math.round(s)+r};b.integerDigits=function(r,s){if(arguments.length){c=Number(r);d=arguments.length>1?Number(s):c;f=c+Math.floor(c/3)*n.length;return this}return[c,d]};b.fractionDigits=function(r,s){if(arguments.length){g= +Number(r);h=arguments.length>1?Number(s):g;i=Math.pow(10,h);return this}return[g,h]};b.integerPad=function(r){if(arguments.length){j=String(r);l=/\d/.test(j);return this}return j};b.fractionPad=function(r){if(arguments.length){k=String(r);return this}return k};b.decimal=function(r){if(arguments.length){q=String(r);return this}return q};b.group=function(r){if(arguments.length){n=r?String(r):"";f=c+Math.floor(c/3)*n.length;return this}return n};b.negativeAffix=function(r,s){if(arguments.length){p=String(r|| +"");m=String(s||"");return this}return[p,m]};return b};pv.map=function(b,c){var d={};return c?b.map(function(f,g){d.index=g;return c.call(d,f)}):b.slice()};pv.repeat=function(b,c){if(arguments.length==1)c=2;return pv.blend(pv.range(c).map(function(){return b}))};pv.cross=function(b,c){for(var d=[],f=0,g=b.length,h=c.length;fc){b.length=d;for(var f=c;fc?1:0}; +pv.reverseOrder=function(b,c){return cb?1:0};pv.search=function(b,c,d){if(!d)d=pv.identity;for(var f=0,g=b.length-1;f<=g;){var h=f+g>>1,i=d(b[h]);if(ic)g=h-1;else return h}return-f-1};pv.search.index=function(b,c,d){b=pv.search(b,c,d);return b<0?-b-1:b}; +pv.range=function(b,c,d){if(arguments.length==1){c=b;b=0}if(d==undefined)d=1;if((c-b)/d==Infinity)throw new Error("range must be finite");var f=[],g=0,h;c-=(c-b)*1.0E-10;if(d<0)for(;(h=b+d*g++)>c;)f.push(h);else for(;(h=b+d*g++)f){f=i;d=h}}return d}; +pv.min=function(b,c){if(c==pv.index)return 0;return Math.min.apply(null,c?pv.map(b,c):b)};pv.min.index=function(b,c){if(!b.length)return-1;if(c==pv.index)return 0;if(!c)c=pv.identity;for(var d=0,f=Infinity,g={},h=0;h0?Math.pow(c,Math.floor(pv.log(b,c))):-Math.pow(c,-Math.floor(-pv.log(-b,c)))};pv.logCeil=function(b,c){return b>0?Math.pow(c,Math.ceil(pv.log(b,c))):-Math.pow(c,-Math.ceil(-pv.log(-b,c)))}; +(function(){var b=Math.PI/180,c=180/Math.PI;pv.radians=function(d){return b*d};pv.degrees=function(d){return c*d}})();pv.keys=function(b){var c=[];for(var d in b)c.push(d);return c};pv.entries=function(b){var c=[];for(var d in b)c.push({key:d,value:b[d]});return c};pv.values=function(b){var c=[];for(var d in b)c.push(b[d]);return c};pv.dict=function(b,c){for(var d={},f={},g=0;g=94608E6){p=31536E6;u="%Y";o=function(w){w.setFullYear(w.getFullYear()+v)}}else if(t>=7776E6){p=2592E6;u="%m/%Y";o=function(w){w.setMonth(w.getMonth()+v)}}else if(t>=18144E5){p=6048E5;u="%m/%d";o=function(w){w.setDate(w.getDate()+7*v)}}else if(t>=2592E5){p=864E5;u="%m/%d";o=function(w){w.setDate(w.getDate()+v)}}else if(t>=108E5){p=36E5;u="%I:%M %p";o=function(w){w.setHours(w.getHours()+ +v)}}else if(t>=18E4){p=6E4;u="%I:%M %p";o=function(w){w.setMinutes(w.getMinutes()+v)}}else if(t>=3E3){p=1E3;u="%I:%M:%S";o=function(w){w.setSeconds(w.getSeconds()+v)}}else{p=1;u="%S.%Qs";o=function(w){w.setTime(w.getTime()+v)}}q=pv.Format.date(u);s=new Date(s);u=[];x(s,p);t=t/p;if(t>10)switch(p){case 36E5:v=t>20?6:3;s.setHours(Math.floor(s.getHours()/v)*v);break;case 2592E6:v=3;s.setMonth(Math.floor(s.getMonth()/v)*v);break;case 6E4:v=t>30?15:t>15?10:5;s.setMinutes(Math.floor(s.getMinutes()/v)*v); +break;case 1E3:v=t>90?15:t>60?10:5;s.setSeconds(Math.floor(s.getSeconds()/v)*v);break;case 1:v=t>1E3?250:t>200?100:t>100?50:t>50?25:5;s.setMilliseconds(Math.floor(s.getMilliseconds()/v)*v);break;default:v=pv.logCeil(t/15,10);if(t/v<2)v/=5;else if(t/v<5)v/=2;s.setFullYear(Math.floor(s.getFullYear()/v)*v);break}for(;;){o(s);if(s>m)break;u.push(new Date(s))}return r?u.reverse():u}arguments.length||(n=10);v=pv.logFloor(t/n,10);p=n/(t/v);if(p<=0.15)v*=10;else if(p<=0.35)v*=5;else if(p<=0.75)v*=2;p=Math.ceil(s/ +v)*v;m=Math.floor(m/v)*v;q=pv.Format.number().fractionDigits(Math.max(0,-Math.floor(pv.log(v,10)+0.01)));m=pv.range(p,m+v,v);return r?m.reverse():m};c.tickFormat=function(n){return q(n)};c.nice=function(){if(d.length!=2)return this;var n=d[0],p=d[d.length-1],m=p0;i--)l.push(-g(-j)*i);else{for(;jh[1];k--);return l.slice(j,k)};b.tickFormat=function(h){return h.toPrecision(1)}; +b.nice=function(){var h=b.domain();return b.domain(pv.logFloor(h[0],c),pv.logCeil(h[1],c))};b.base=function(h){if(arguments.length){c=Number(h);d=Math.log(c);b.transform(f,g);return this}return c};b.domain.apply(b,arguments);return b.base(10)};pv.Scale.root=function(){var b=pv.Scale.quantitative();b.power=function(c){if(arguments.length){var d=Number(c),f=1/d;b.transform(function(g){return Math.pow(g,f)},function(g){return Math.pow(g,d)});return this}return d};b.domain.apply(b,arguments);return b.power(2)}; +pv.Scale.ordinal=function(){function b(g){g in d||(d[g]=c.push(g)-1);return f[d[g]%f.length]}var c=[],d={},f=[];b.domain=function(g,h){if(arguments.length){g=g instanceof Array?arguments.length>1?pv.map(g,h):g:Array.prototype.slice.call(arguments);c=[];for(var i={},j=0;j1?pv.map(g,h):g:Array.prototype.slice.call(arguments); +if(typeof f[0]=="string")f=f.map(pv.color);return this}return f};b.split=function(g,h){var i=(h-g)/this.domain().length;f=pv.range(g+i/2,h,i);return this};b.splitFlush=function(g,h){var i=this.domain().length,j=(h-g)/(i-1);f=i==1?[(g+h)/2]:pv.range(g,h+j/2,j);return this};b.splitBanded=function(g,h,i){if(arguments.length<3)i=1;if(i<0){var j=this.domain().length;j=(h-g- -i*j)/(j+1);f=pv.range(g+j,h,j-i);f.band=-i}else{j=(h-g)/(this.domain().length+(1-i));f=pv.range(g+j*(1-i),h,j);f.band=j*i}return this}; +b.by=function(g){function h(){return b(g.apply(this,arguments))}for(var i in b)h[i]=b[i];return h};b.domain.apply(b,arguments);return b}; +pv.Scale.quantile=function(){function b(i){return h(Math.max(0,Math.min(d,pv.search.index(f,i)-1))/d)}var c=-1,d=-1,f=[],g=[],h=pv.Scale.linear();b.quantiles=function(i){if(arguments.length){c=Number(i);if(c<0){f=[g[0]].concat(g);d=g.length-1}else{f=[];f[0]=g[0];for(var j=1;j<=c;j++)f[j]=g[~~(j*(g.length-1)/c)];d=c-1}return this}return f};b.domain=function(i,j){if(arguments.length){g=i instanceof Array?pv.map(i,j):Array.prototype.slice.call(arguments);g.sort(pv.naturalOrder);b.quantiles(c);return this}return g}; +b.range=function(){if(arguments.length){h.range.apply(h,arguments);return this}return h.range()};b.by=function(i){function j(){return b(i.apply(this,arguments))}for(var k in b)j[k]=b[k];return j};b.domain.apply(b,arguments);return b}; +pv.histogram=function(b,c){var d=true;return{bins:function(f){var g=pv.map(b,c),h=[];arguments.length||(f=pv.Scale.linear(g).ticks());for(var i=0;i360)j-=360;else if(j<0)j+=360;if(j<60)return i+(h-i)*j/60;if(j<180)return h;if(j<240)return i+(h-i)*(240-j)/60;return i}function c(j){return Math.round(b(j)*255)}var d=this.h,f=this.s,g=this.l;d%=360;if(d<0)d+=360;f=Math.max(0,Math.min(f,1));g=Math.max(0,Math.min(g,1));var h=g<=0.5?g*(1+f):g+f-g*f,i=2*g-h;return pv.rgb(c(d+120),c(d),c(d-120),this.a)}; +pv.Color.names={aliceblue:"#f0f8ff",antiquewhite:"#faebd7",aqua:"#00ffff",aquamarine:"#7fffd4",azure:"#f0ffff",beige:"#f5f5dc",bisque:"#ffe4c4",black:"#000000",blanchedalmond:"#ffebcd",blue:"#0000ff",blueviolet:"#8a2be2",brown:"#a52a2a",burlywood:"#deb887",cadetblue:"#5f9ea0",chartreuse:"#7fff00",chocolate:"#d2691e",coral:"#ff7f50",cornflowerblue:"#6495ed",cornsilk:"#fff8dc",crimson:"#dc143c",cyan:"#00ffff",darkblue:"#00008b",darkcyan:"#008b8b",darkgoldenrod:"#b8860b",darkgray:"#a9a9a9",darkgreen:"#006400", +darkgrey:"#a9a9a9",darkkhaki:"#bdb76b",darkmagenta:"#8b008b",darkolivegreen:"#556b2f",darkorange:"#ff8c00",darkorchid:"#9932cc",darkred:"#8b0000",darksalmon:"#e9967a",darkseagreen:"#8fbc8f",darkslateblue:"#483d8b",darkslategray:"#2f4f4f",darkslategrey:"#2f4f4f",darkturquoise:"#00ced1",darkviolet:"#9400d3",deeppink:"#ff1493",deepskyblue:"#00bfff",dimgray:"#696969",dimgrey:"#696969",dodgerblue:"#1e90ff",firebrick:"#b22222",floralwhite:"#fffaf0",forestgreen:"#228b22",fuchsia:"#ff00ff",gainsboro:"#dcdcdc", +ghostwhite:"#f8f8ff",gold:"#ffd700",goldenrod:"#daa520",gray:"#808080",green:"#008000",greenyellow:"#adff2f",grey:"#808080",honeydew:"#f0fff0",hotpink:"#ff69b4",indianred:"#cd5c5c",indigo:"#4b0082",ivory:"#fffff0",khaki:"#f0e68c",lavender:"#e6e6fa",lavenderblush:"#fff0f5",lawngreen:"#7cfc00",lemonchiffon:"#fffacd",lightblue:"#add8e6",lightcoral:"#f08080",lightcyan:"#e0ffff",lightgoldenrodyellow:"#fafad2",lightgray:"#d3d3d3",lightgreen:"#90ee90",lightgrey:"#d3d3d3",lightpink:"#ffb6c1",lightsalmon:"#ffa07a", +lightseagreen:"#20b2aa",lightskyblue:"#87cefa",lightslategray:"#778899",lightslategrey:"#778899",lightsteelblue:"#b0c4de",lightyellow:"#ffffe0",lime:"#00ff00",limegreen:"#32cd32",linen:"#faf0e6",magenta:"#ff00ff",maroon:"#800000",mediumaquamarine:"#66cdaa",mediumblue:"#0000cd",mediumorchid:"#ba55d3",mediumpurple:"#9370db",mediumseagreen:"#3cb371",mediumslateblue:"#7b68ee",mediumspringgreen:"#00fa9a",mediumturquoise:"#48d1cc",mediumvioletred:"#c71585",midnightblue:"#191970",mintcream:"#f5fffa",mistyrose:"#ffe4e1", +moccasin:"#ffe4b5",navajowhite:"#ffdead",navy:"#000080",oldlace:"#fdf5e6",olive:"#808000",olivedrab:"#6b8e23",orange:"#ffa500",orangered:"#ff4500",orchid:"#da70d6",palegoldenrod:"#eee8aa",palegreen:"#98fb98",paleturquoise:"#afeeee",palevioletred:"#db7093",papayawhip:"#ffefd5",peachpuff:"#ffdab9",peru:"#cd853f",pink:"#ffc0cb",plum:"#dda0dd",powderblue:"#b0e0e6",purple:"#800080",red:"#ff0000",rosybrown:"#bc8f8f",royalblue:"#4169e1",saddlebrown:"#8b4513",salmon:"#fa8072",sandybrown:"#f4a460",seagreen:"#2e8b57", +seashell:"#fff5ee",sienna:"#a0522d",silver:"#c0c0c0",skyblue:"#87ceeb",slateblue:"#6a5acd",slategray:"#708090",slategrey:"#708090",snow:"#fffafa",springgreen:"#00ff7f",steelblue:"#4682b4",tan:"#d2b48c",teal:"#008080",thistle:"#d8bfd8",tomato:"#ff6347",turquoise:"#40e0d0",violet:"#ee82ee",wheat:"#f5deb3",white:"#ffffff",whitesmoke:"#f5f5f5",yellow:"#ffff00",yellowgreen:"#9acd32",transparent:pv.Color.transparent=pv.rgb(0,0,0,0)};(function(){var b=pv.Color.names;for(var c in b)b[c]=pv.color(b[c])})(); +pv.colors=function(){var b=pv.Scale.ordinal();b.range.apply(b,arguments);return b};pv.Colors={};pv.Colors.category10=function(){var b=pv.colors("#1f77b4","#ff7f0e","#2ca02c","#d62728","#9467bd","#8c564b","#e377c2","#7f7f7f","#bcbd22","#17becf");b.domain.apply(b,arguments);return b}; +pv.Colors.category20=function(){var b=pv.colors("#1f77b4","#aec7e8","#ff7f0e","#ffbb78","#2ca02c","#98df8a","#d62728","#ff9896","#9467bd","#c5b0d5","#8c564b","#c49c94","#e377c2","#f7b6d2","#7f7f7f","#c7c7c7","#bcbd22","#dbdb8d","#17becf","#9edae5");b.domain.apply(b,arguments);return b}; +pv.Colors.category19=function(){var b=pv.colors("#9c9ede","#7375b5","#4a5584","#cedb9c","#b5cf6b","#8ca252","#637939","#e7cb94","#e7ba52","#bd9e39","#8c6d31","#e7969c","#d6616b","#ad494a","#843c39","#de9ed6","#ce6dbd","#a55194","#7b4173");b.domain.apply(b,arguments);return b};pv.ramp=function(){var b=pv.Scale.linear();b.range.apply(b,arguments);return b}; +pv.Scene=pv.SvgScene={svg:"http://www.w3.org/2000/svg",xmlns:"http://www.w3.org/2000/xmlns",xlink:"http://www.w3.org/1999/xlink",xhtml:"http://www.w3.org/1999/xhtml",scale:1,events:["DOMMouseScroll","mousewheel","mousedown","mouseup","mouseover","mouseout","mousemove","click","dblclick"],implicit:{svg:{"shape-rendering":"auto","pointer-events":"painted",x:0,y:0,dy:0,"text-anchor":"start",transform:"translate(0,0)",fill:"none","fill-opacity":1,stroke:"none","stroke-opacity":1,"stroke-width":1.5,"stroke-linejoin":"miter"}, +css:{font:"10px sans-serif"}}};pv.SvgScene.updateAll=function(b){if(b.length&&b[0].reverse&&b.type!="line"&&b.type!="area"){for(var c=pv.extend(b),d=0,f=b.length-1;f>=0;d++,f--)c[d]=b[f];b=c}this.removeSiblings(this[b.type](b))};pv.SvgScene.create=function(b){return document.createElementNS(this.svg,b)}; +pv.SvgScene.expect=function(b,c,d,f){if(b){if(b.tagName=="a")b=b.firstChild;if(b.tagName!=c){c=this.create(c);b.parentNode.replaceChild(c,b);b=c}}else b=this.create(c);for(var g in d){c=d[g];if(c==this.implicit.svg[g])c=null;c==null?b.removeAttribute(g):b.setAttribute(g,c)}for(g in f){c=f[g];if(c==this.implicit.css[g])c=null;if(c==null)b.style.removeProperty(g);else b.style[g]=c}return b}; +pv.SvgScene.append=function(b,c,d){b.$scene={scenes:c,index:d};b=this.title(b,c[d]);b.parentNode||c.$g.appendChild(b);return b.nextSibling};pv.SvgScene.title=function(b,c){var d=b.parentNode;if(d&&d.tagName!="a")d=null;if(c.title){if(!d){d=this.create("a");b.parentNode&&b.parentNode.replaceChild(d,b);d.appendChild(b)}d.setAttributeNS(this.xlink,"title",c.title);return d}d&&d.parentNode.replaceChild(b,d);return b}; +pv.SvgScene.dispatch=pv.listener(function(b){var c=b.target.$scene;if(c){var d=b.type;switch(d){case "DOMMouseScroll":d="mousewheel";b.wheel=-480*b.detail;break;case "mousewheel":b.wheel=(window.opera?12:1)*b.wheelDelta;break}pv.Mark.dispatch(d,c.scenes,c.index)&&b.preventDefault()}});pv.SvgScene.removeSiblings=function(b){for(;b;){var c=b.nextSibling;b.parentNode.removeChild(b);b=c}};pv.SvgScene.undefined=function(){}; +pv.SvgScene.pathBasis=function(){function b(f,g,h,i,j){return{x:f[0]*g.left+f[1]*h.left+f[2]*i.left+f[3]*j.left,y:f[0]*g.top+f[1]*h.top+f[2]*i.top+f[3]*j.top}}var c=[[1/6,2/3,1/6,0],[0,2/3,1/3,0],[0,1/3,2/3,0],[0,1/6,2/3,1/6]],d=function(f,g,h,i){var j=b(c[1],f,g,h,i),k=b(c[2],f,g,h,i);f=b(c[3],f,g,h,i);return"C"+j.x+","+j.y+","+k.x+","+k.y+","+f.x+","+f.y};d.segment=function(f,g,h,i){var j=b(c[0],f,g,h,i),k=b(c[1],f,g,h,i),l=b(c[2],f,g,h,i);f=b(c[3],f,g,h,i);return"M"+j.x+","+j.y+"C"+k.x+","+k.y+ +","+l.x+","+l.y+","+f.x+","+f.y};return d}();pv.SvgScene.curveBasis=function(b){if(b.length<=2)return"";var c="",d=b[0],f=d,g=d,h=b[1];c+=this.pathBasis(d,f,g,h);for(var i=2;i1){j=c[1];h=b[k];k++;f+="C"+(g.left+i.x)+","+(g.top+i.y)+","+(h.left-j.x)+","+(h.top-j.y)+","+h.left+","+h.top;for(g=2;g9){k=3/Math.sqrt(k);f[h]= +k*i*d[h];f[h+1]=k*j*d[h]}}for(h=0;h2&&(g.interpolate=="basis"||g.interpolate=="cardinal"||g.interpolate=="monotone")?d:c)(l,q-1));l=q-1}}if(!j.length)return f;f=this.expect(f,"path",{"shape-rendering":g.antialias?null:"crispEdges","pointer-events":g.events,cursor:g.cursor,d:"M"+j.join("ZM")+"Z",fill:h.color,"fill-opacity":h.opacity|| +null,stroke:i.color,"stroke-opacity":i.opacity||null,"stroke-width":i.opacity?g.lineWidth/this.scale:null});return this.append(f,b,0)}; +pv.SvgScene.areaSegment=function(b){var c=b.$g.firstChild,d=b[0],f,g;if(d.interpolate=="basis"||d.interpolate=="cardinal"||d.interpolate=="monotone"){f=[];g=[];for(var h=0,i=b.length;h2&&(d.interpolate=="basis"||d.interpolate=="cardinal"||d.interpolate=="monotone"))switch(d.interpolate){case "basis":h+=this.curveBasis(b);break;case "cardinal":h+=this.curveCardinal(b,d.tension);break;case "monotone":h+=this.curveMonotone(b); +break}else for(var i=1;i1)break;return"A"+f+","+f+" 0 0,"+d+" "+c.left+","+c.top;case "step-before":return"V"+c.top+"H"+c.left;case "step-after":return"H"+c.left+"V"+c.top}return"L"+c.left+","+c.top};pv.SvgScene.lineIntersect=function(b,c,d,f){return b.plus(c.times(d.minus(b).dot(f.perp())/c.dot(f.perp())))}; +pv.SvgScene.pathJoin=function(b,c,d,f){var g=pv.vector(c.left,c.top);d=pv.vector(d.left,d.top);var h=d.minus(g),i=h.perp().norm(),j=i.times(c.lineWidth/(2*this.scale));c=g.plus(j);var k=d.plus(j),l=d.minus(j);j=g.minus(j);if(b&&b.visible){b=g.minus(b.left,b.top).perp().norm().plus(i);j=this.lineIntersect(g,b,j,h);c=this.lineIntersect(g,b,c,h)}if(f&&f.visible){f=pv.vector(f.left,f.top).minus(d).perp().norm().plus(i);l=this.lineIntersect(d,f,l,h);k=this.lineIntersect(d,f,k,h)}return"M"+c.x+","+c.y+ +"L"+k.x+","+k.y+" "+l.x+","+l.y+" "+j.x+","+j.y}; +pv.SvgScene.panel=function(b){for(var c=b.$g,d=c&&c.firstChild,f=0;f=2*Math.PI)i=i?"M0,"+j+"A"+j+","+j+" 0 1,1 0,"+-j+"A"+j+","+j+" 0 1,1 0,"+j+"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"Z":"M0,"+j+"A"+j+","+j+" 0 1,1 0,"+-j+"A"+j+","+j+" 0 1,1 0,"+j+"Z";else{var l=Math.min(f.startAngle,f.endAngle),q=Math.max(f.startAngle,f.endAngle), +n=Math.cos(l),p=Math.cos(q);l=Math.sin(l);q=Math.sin(q);i=i?"M"+j*n+","+j*l+"A"+j+","+j+" 0 "+(k1?c:null)}; +a.anchor=function(b){b||(b="center");return(new pv.Anchor(this)).name(b).data(function(){return this.scene.target.map(function(c){return c.data})}).visible(function(){return this.scene.target[this.index].visible}).left(function(){var c=this.scene.target[this.index],d=c.width||0;switch(this.name()){case "bottom":case "top":case "center":return c.left+d/2;case "left":return null}return c.left+d}).top(function(){var c=this.scene.target[this.index],d=c.height||0;switch(this.name()){case "left":case "right":case "center":return c.top+ +d/2;case "top":return null}return c.top+d}).right(function(){var c=this.scene.target[this.index];return this.name()=="left"?c.right+(c.width||0):null}).bottom(function(){var c=this.scene.target[this.index];return this.name()=="top"?c.bottom+(c.height||0):null}).textAlign(function(){switch(this.name()){case "bottom":case "top":case "center":return"center";case "right":return"right"}return"left"}).textBaseline(function(){switch(this.name()){case "right":case "left":case "center":return"middle";case "top":return"top"}return"bottom"})}; +a.anchorTarget=function(){return this.target};a.margin=function(b){return this.left(b).right(b).top(b).bottom(b)};a.instance=function(b){var c=this.scene||this.parent.instance(-1).children[this.childIndex],d=!arguments.length||this.hasOwnProperty("index")?this.index:b;return c[d<0?c.length-1:d]}; +a.instances=function(b){for(var c=this,d=[],f;!(f=c.scene);){b=b.parent;d.push({index:b.index,childIndex:c.childIndex});c=c.parent}for(;d.length;){b=d.pop();f=f[b.index].children[b.childIndex]}if(this.hasOwnProperty("index")){d=pv.extend(f[this.index]);d.right=d.top=d.left=d.bottom=0;return[d]}return f};a.first=function(){return this.scene[0]};a.last=function(){return this.scene[this.scene.length-1]};a.sibling=function(){return this.index==0?null:this.scene[this.index-1]}; +a.cousin=function(){var b=this.parent;return(b=b&&b.sibling())&&b.children?b.children[this.childIndex][this.index]:null}; +a.render=function(){function b(i,j,k){i.scale=k;if(j=0;l--){var q=k[l];if(!(q.name in c)){c[q.name]=q;switch(q.name){case "data":f=q;break;case "visible":g=q;break;default:d[q.type].push(q);break}}}while(j=j.proto)}var c={},d=[[],[],[],[]],f,g;b(this);b(this.defaults);d[1].reverse();d[3].reverse();var h=this;do for(var i in h.properties)i in c||d[2].push(c[i]={name:i,type:2,value:null});while(h=h.proto);h=d[0].concat(d[1]);for(i=0;ih.id)d[g.name]={id:0,value:g.type&1?g.value.apply(this,c):g.value}}}d=this.binds.data;d=d.type& +1?d.value.apply(this,c):d.value;c.unshift(null);b.length=d.length;for(f=0;f0;l--){p=m[l];p.scale=q;q*=p.scene[p.index].transform.k}if(n.children){l=0;for(m=n.children.length;l=3*Math.PI/2};pv.Wedge.prototype.buildImplied=function(b){if(b.angle==null)b.angle=b.endAngle-b.startAngle;else if(b.endAngle==null)b.endAngle=b.startAngle+b.angle;pv.Mark.prototype.buildImplied.call(this,b)};pv.simulation=function(b){return new pv.Simulation(b)};pv.Simulation=function(b){for(var c=0;c=s,u=q.y>=t;l.leaf=false;switch((u<<1)+x){case 0:l=l.c1||(l.c1=new pv.Quadtree.Node);break;case 1:l=l.c2||(l.c2=new pv.Quadtree.Node);break;case 2:l=l.c3||(l.c3=new pv.Quadtree.Node);break;case 3:l=l.c4||(l.c4=new pv.Quadtree.Node); +break}if(x)n=s;else m=s;if(u)p=t;else r=t;c(l,q,n,p,m,r)}var f,g=Number.POSITIVE_INFINITY,h=g,i=Number.NEGATIVE_INFINITY,j=i;for(f=b;f;f=f.next){if(f.xi)i=f.x;if(f.y>j)j=f.y}f=i-g;var k=j-h;if(f>k)j=h+f;else i=g+k;this.xMin=g;this.yMin=h;this.xMax=i;this.yMax=j;this.root=new pv.Quadtree.Node;for(f=b;f;f=f.next)c(this.root,f,g,h,i,j)};pv.Quadtree.Node=function(){this.leaf=true;this.p=this.c4=this.c3=this.c2=this.c1=null};pv.Force={}; +pv.Force.charge=function(b){function c(l){function q(m){c(m);l.cn+=m.cn;n+=m.cn*m.cx;p+=m.cn*m.cy}var n=0,p=0;l.cn=0;if(!l.leaf){l.c1&&q(l.c1);l.c2&&q(l.c2);l.c3&&q(l.c3);l.c4&&q(l.c4)}if(l.p){l.cn+=b;n+=b*l.p.x;p+=b*l.p.y}l.cx=n/l.cn;l.cy=p/l.cn}function d(l,q,n,p,m,r){var s=l.cx-q.x,t=l.cy-q.y,x=1/Math.sqrt(s*s+t*t);if(l.leaf&&l.p!=q||(m-n)*xg)x=g;l=l.cn*x*x*x;s=s*l;t=t*l;q.fx+=s;q.fy+=t}}else if(!l.leaf){var u=(n+m)*0.5,o=(p+r)*0.5;l.c1&&d(l.c1,q,n,p,u,o);l.c2&&d(l.c2,q,u,p, +m,o);l.c3&&d(l.c3,q,n,o,u,r);l.c4&&d(l.c4,q,u,o,m,r);if(!(xg)x=g;if(l.p&&l.p!=q){l=b*x*x*x;s=s*l;t=t*l;q.fx+=s;q.fy+=t}}}}var f=2,g=1/f,h=500,i=1/h,j=0.9,k={};arguments.length||(b=-40);k.constant=function(l){if(arguments.length){b=Number(l);return k}return b};k.domain=function(l,q){if(arguments.length){f=Number(l);g=1/f;h=Number(q);i=1/h;return k}return[f,h]};k.theta=function(l){if(arguments.length){j=Number(l);return k}return j};k.apply=function(l,q){c(q.root);for(l=l;l;l=l.next)d(q.root, +l,q.xMin,q.yMin,q.xMax,q.yMax)};return k};pv.Force.drag=function(b){var c={};arguments.length||(b=0.1);c.constant=function(d){if(arguments.length){b=d;return c}return b};c.apply=function(d){if(b)for(d=d;d;d=d.next){d.fx-=b*d.vx;d.fy-=b*d.vy}};return c}; +pv.Force.spring=function(b){var c=0.1,d=20,f,g,h={};arguments.length||(b=0.1);h.links=function(i){if(arguments.length){f=i;g=i.map(function(j){return 1/Math.sqrt(Math.max(j.sourceNode.linkDegree,j.targetNode.linkDegree))});return h}return f};h.constant=function(i){if(arguments.length){b=Number(i);return h}return b};h.damping=function(i){if(arguments.length){c=Number(i);return h}return c};h.length=function(i){if(arguments.length){d=Number(i);return h}return d};h.apply=function(){for(var i=0;ig,o=sh){l.c1&&u&&c(l.c1,q,n,p,s,t);l.c2&&o&&c(l.c2,q,s,p,m,t)}if(x){l.c3&&u&&c(l.c3,q,n,t,s,r);l.c4&&o&&c(l.c4,q,s,t,m,r)}}if(l.p&&l.p!=q){n=q.x-l.p.x;p=q.y-l.p.y;m=Math.sqrt(n*n+p*p);r=f+b(l.p);if(mm)m=p}for(var r=0;rc.max?c.max:g.x;if(d)for(g=f;g;g=g.next)g.y=g.yd.max?d.max:g.y};return b};pv.Layout=function(){pv.Panel.call(this)};pv.Layout.prototype=pv.extend(pv.Panel); +pv.Layout.prototype.property=function(b,c){if(!this.hasOwnProperty("properties"))this.properties=pv.extend(this.properties);this.properties[b]=true;this.propertyMethod(b,false,pv.Mark.cast[b]=c);return this}; +pv.Layout.Network=function(){pv.Layout.call(this);var b=this;this.$id=pv.id();(this.node=(new pv.Mark).data(function(){return b.nodes()}).strokeStyle("#1f77b4").fillStyle("#fff").left(function(c){return c.x}).top(function(c){return c.y})).parent=this;this.link=(new pv.Mark).extend(this.node).data(function(c){return[c.sourceNode,c.targetNode]}).fillStyle(null).lineWidth(function(c,d){return d.linkValue*1.5}).strokeStyle("rgba(0,0,0,.2)");this.link.add=function(c){return b.add(pv.Panel).data(function(){return b.links()}).add(c).extend(this)}; +(this.label=(new pv.Mark).extend(this.node).textMargin(7).textBaseline("middle").text(function(c){return c.nodeName||c.nodeValue}).textAngle(function(c){c=c.midAngle;return pv.Wedge.upright(c)?c:c+Math.PI}).textAlign(function(c){return pv.Wedge.upright(c.midAngle)?"left":"right"})).parent=this}; +pv.Layout.Network.prototype=pv.extend(pv.Layout).property("nodes",function(b){return b.map(function(c,d){if(typeof c!="object")c={nodeValue:c};c.index=d;return c})}).property("links",function(b){return b.map(function(c){if(isNaN(c.linkValue))c.linkValue=isNaN(c.value)?1:c.value;return c})});pv.Layout.Network.prototype.reset=function(){this.$id=pv.id();return this};pv.Layout.Network.prototype.buildProperties=function(b,c){if((b.$id||0)=this.$id)return true;b.$id=this.$id;b.nodes.forEach(function(c){c.linkDegree=0});b.links.forEach(function(c){var d=c.linkValue;(c.sourceNode||(c.sourceNode=b.nodes[c.source])).linkDegree+=d;(c.targetNode||(c.targetNode=b.nodes[c.target])).linkDegree+=d})};pv.Layout.Hierarchy=function(){pv.Layout.Network.call(this);this.link.strokeStyle("#ccc")};pv.Layout.Hierarchy.prototype=pv.extend(pv.Layout.Network); +pv.Layout.Hierarchy.prototype.buildImplied=function(b){if(!b.links)b.links=pv.Layout.Hierarchy.links.call(this);pv.Layout.Network.prototype.buildImplied.call(this,b)};pv.Layout.Hierarchy.links=function(){return this.nodes().filter(function(b){return b.parentNode}).map(function(b){return{sourceNode:b,targetNode:b.parentNode,linkValue:1}})}; +pv.Layout.Hierarchy.NodeLink={buildImplied:function(b){function c(m){return m.parentNode?m.depth*(n-q)+q:0}function d(m){return m.parentNode?(m.breadth-0.25)*2*Math.PI:0}function f(m){switch(i){case "left":return m.depth*k;case "right":return k-m.depth*k;case "top":return m.breadth*k;case "bottom":return k-m.breadth*k;case "radial":return k/2+c(m)*Math.cos(m.midAngle)}}function g(m){switch(i){case "left":return m.breadth*l;case "right":return l-m.breadth*l;case "top":return m.depth*l;case "bottom":return l- +m.depth*l;case "radial":return l/2+c(m)*Math.sin(m.midAngle)}}var h=b.nodes,i=b.orient,j=/^(top|bottom)$/.test(i),k=b.width,l=b.height;if(i=="radial"){var q=b.innerRadius,n=b.outerRadius;if(q==null)q=0;if(n==null)n=Math.min(k,l)/2}for(b=0;bb.dy?0:-Math.PI/2});(this.leaf=(new pv.Mark).extend(this.node).fillStyle(null).strokeStyle(null).visible(function(b){return!b.firstChild})).parent= +this;delete this.link};pv.Layout.Treemap.prototype=pv.extend(pv.Layout.Hierarchy).property("round",Boolean).property("paddingLeft",Number).property("paddingRight",Number).property("paddingTop",Number).property("paddingBottom",Number).property("mode",String).property("order",String);a=pv.Layout.Treemap.prototype;a.defaults=(new pv.Layout.Treemap).extend(pv.Layout.Hierarchy.prototype.defaults).mode("squarify").order("ascending");a.padding=function(b){return this.paddingLeft(b).paddingRight(b).paddingTop(b).paddingBottom(b)}; +a.$size=function(b){return Number(b.nodeValue)};a.size=function(b){this.$size=pv.functor(b);return this}; +a.buildImplied=function(b){function c(r,s,t,x,u,o,v){for(var w=0,y=0;wt)t=v;u+=v}u*=u;s*=s;return Math.max(s*t/u,u/(s*x))}function f(r,s){function t(A){var D=o==y,G=pv.sum(A,n),E=y?p(G/y):0;c(A,G,D,x,u,D?o:E,D?E:v);if(D){u+=E;v-=E}else{x+= +E;o-=E}y=Math.min(o,v);return D}var x=r.x+j,u=r.y+l,o=r.dx-j-k,v=r.dy-l-q;if(m!="squarify")c(r.childNodes,r.size,m=="slice"?true:m=="dice"?false:s&1,x,u,o,v);else{var w=[];s=Infinity;var y=Math.min(o,v),z=o*v/r.size;if(!(r.size<=0)){r.visitBefore(function(A){A.size*=z});for(r=r.childNodes.slice();r.length;){var C=r[r.length-1];if(C.size){w.push(C);z=d(w,y);if(z<=s){r.pop();s=z}else{w.pop();t(w);w.length=0;s=Infinity}}else r.pop()}if(t(w))for(s=0;s0){i(k(C,o,v),o,B);A+=B;D+=B}G+=C.mod;A+=y.mod;E+=w.mod;D+=z.mod;C=h(C);y=g(y)}if(C&&!h(z)){z.thread=C;z.mod+=G-D}if(y&&!g(w)){w.thread=y;w.mod+=A-E;v=o}}return v}function g(o){return o.firstChild||o.thread}function h(o){return o.lastChild||o.thread}function i(o,v,w){var y=v.number-o.number;v.change-=w/y;v.shift+=w;o.change+= +w/y;v.prelim+=w;v.mod+=w}function j(o){var v=0,w=0;for(o=o.lastChild;o;o=o.previousSibling){o.prelim+=v;o.mod+=v;w+=o.change;v+=o.shift+w}}function k(o,v,w){return o.ancestor.parentNode==v.parentNode?o.ancestor:w}function l(o,v){return(v?1:t+1)/(m=="radial"?o:1)}function q(o){return m=="radial"?o.breadth/r:0}function n(o){switch(m){case "left":return o.depth;case "right":return x-o.depth;case "top":case "bottom":return o.breadth+x/2;case "radial":return x/2+o.depth*Math.cos(q(o))}}function p(o){switch(m){case "left":case "right":return o.breadth+ +u/2;case "top":return o.depth;case "bottom":return u-o.depth;case "radial":return u/2+o.depth*Math.sin(q(o))}}if(!pv.Layout.Hierarchy.prototype.buildImplied.call(this,b)){var m=b.orient,r=b.depth,s=b.breadth,t=b.group,x=b.width,u=b.height;b=b.nodes[0];b.visitAfter(function(o,v){o.ancestor=o;o.prelim=0;o.mod=0;o.change=0;o.shift=0;o.number=o.previousSibling?o.previousSibling.number+1:0;o.depth=v});c(b);d(b,-b.prelim,0);b.visitAfter(function(o){o.breadth*=s;o.depth*=r;o.midAngle=q(o);o.x=n(o);o.y=p(o); +if(o.firstChild)o.midAngle+=Math.PI;delete o.breadth;delete o.depth;delete o.ancestor;delete o.prelim;delete o.mod;delete o.change;delete o.shift;delete o.number;delete o.thread})}};pv.Layout.Indent=function(){pv.Layout.Hierarchy.call(this);this.link.interpolate("step-after")};pv.Layout.Indent.prototype=pv.extend(pv.Layout.Hierarchy).property("depth",Number).property("breadth",Number);pv.Layout.Indent.prototype.defaults=(new pv.Layout.Indent).extend(pv.Layout.Hierarchy.prototype.defaults).depth(15).breadth(15); +pv.Layout.Indent.prototype.buildImplied=function(b){function c(i,j,k){i.x=g+k++*f;i.y=h+j++*d;i.midAngle=0;for(i=i.firstChild;i;i=i.nextSibling)j=c(i,j,k);return j}if(!pv.Layout.Hierarchy.prototype.buildImplied.call(this,b)){var d=b.breadth,f=b.depth,g=0,h=0;c(b.nodes[0],1,1)}};pv.Layout.Pack=function(){pv.Layout.Hierarchy.call(this);this.node.radius(function(b){return b.radius}).strokeStyle("rgb(31, 119, 180)").fillStyle("rgba(31, 119, 180, .25)");this.label.textAlign("center");delete this.link}; +pv.Layout.Pack.prototype=pv.extend(pv.Layout.Hierarchy).property("spacing",Number).property("order",String);pv.Layout.Pack.prototype.defaults=(new pv.Layout.Pack).extend(pv.Layout.Hierarchy.prototype.defaults).spacing(1).order("ascending");pv.Layout.Pack.prototype.$radius=function(){return 1};pv.Layout.Pack.prototype.size=function(b){this.$radius=typeof b=="function"?function(){return Math.sqrt(b.apply(this,arguments))}:(b=Math.sqrt(b),function(){return b});return this}; +pv.Layout.Pack.prototype.buildImplied=function(b){function c(n){var p=pv.Mark.stack;p.unshift(null);for(var m=0,r=n.length;m0.0010}var t=Infinity,x=-Infinity,u=Infinity,o=-Infinity,v,w,y,z,C;v=n[0];v.x=-v.radius;v.y=0;p(v);if(n.length>1){w=n[1];w.x=w.radius;w.y=0;p(w);if(n.length>2){y=n[2];g(v,w,y);p(y);m(v,y);v.p= +y;m(y,w);w=v.n;for(var A=3;A0){r(v,z);w=z;A--}else if(D<0){r(z,w);v=z;A--}}}}v=(t+x)/2;w=(u+o)/2;for(A=y=0;An.min){n.sim.step(); +q=true}q&&d.render()},42)}else for(k=0;kg)g=j;i.size=i.firstChild?pv.sum(i.childNodes,function(k){return k.size}):c.$size.apply(c,(f[0]=i,f))});f.shift();switch(b.order){case "ascending":d.sort(function(i,j){return i.size-j.size});break;case "descending":d.sort(function(i,j){return j.size-i.size});break}var h=1/g;d.minBreadth=0;d.breadth= +0.5;d.maxBreadth=1;d.visitBefore(function(i){for(var j=i.minBreadth,k=i.maxBreadth-j,l=i.firstChild;l;l=l.nextSibling){l.minBreadth=j;l.maxBreadth=j+=l.size/i.size*k;l.breadth=(j+l.minBreadth)/2}});d.visitAfter(function(i,j){i.minDepth=(j-1)*h;i.maxDepth=i.depth=j*h});pv.Layout.Hierarchy.NodeLink.buildImplied.call(this,b)}};pv.Layout.Partition.Fill=function(){pv.Layout.Partition.call(this);pv.Layout.Hierarchy.Fill.constructor.call(this)};pv.Layout.Partition.Fill.prototype=pv.extend(pv.Layout.Partition); +pv.Layout.Partition.Fill.prototype.buildImplied=function(b){pv.Layout.Partition.prototype.buildImplied.call(this,b)||pv.Layout.Hierarchy.Fill.buildImplied.call(this,b)};pv.Layout.Arc=function(){pv.Layout.Network.call(this);var b,c,d,f=this.buildImplied;this.buildImplied=function(g){f.call(this,g);c=g.directed;b=g.orient=="radial"?"linear":"polar";d=g.orient=="right"||g.orient=="top"};this.link.data(function(g){var h=g.sourceNode;g=g.targetNode;return d!=(c||h.breadth>1)*f:null}).bottom(function(k,l){return d=="mirror"?l&1?null:(l+1>>1)*-f:(l&1||-1)*(l+1>>1)*f}).fillStyle(function(k,l){return(l&1?h:i)((l>>1)+1)});this.band.add=function(k){return b.add(pv.Panel).extend(c).add(k).extend(this)}};pv.Layout.Horizon.prototype=pv.extend(pv.Layout).property("bands",Number).property("mode",String).property("backgroundStyle",pv.color).property("positiveStyle",pv.color).property("negativeStyle",pv.color); +pv.Layout.Horizon.prototype.defaults=(new pv.Layout.Horizon).extend(pv.Layout.prototype.defaults).bands(2).mode("offset").backgroundStyle("white").positiveStyle("#1f77b4").negativeStyle("#d62728"); +pv.Layout.Rollup=function(){pv.Layout.Network.call(this);var b=this,c,d,f=b.buildImplied;this.buildImplied=function(g){f.call(this,g);c=g.$rollup.nodes;d=g.$rollup.links};this.node.data(function(){return c}).size(function(g){return g.nodes.length*20});this.link.interpolate("polar").eccentricity(0.8);this.link.add=function(g){return b.add(pv.Panel).data(function(){return d}).add(g).extend(this)}};pv.Layout.Rollup.prototype=pv.extend(pv.Layout.Network).property("directed",Boolean); +pv.Layout.Rollup.prototype.x=function(b){this.$x=pv.functor(b);return this};pv.Layout.Rollup.prototype.y=function(b){this.$y=pv.functor(b);return this}; +pv.Layout.Rollup.prototype.buildImplied=function(b){function c(r){return i[r]+","+j[r]}if(!pv.Layout.Network.prototype.buildImplied.call(this,b)){var d=b.nodes,f=b.links,g=b.directed,h=d.length,i=[],j=[],k=0,l={},q={},n=pv.Mark.stack,p={parent:this};n.unshift(null);for(var m=0;mk.index?k.index+","+d.index:d.index+","+k.index;(n=q[h])||(n=q[h]={sourceNode:d,targetNode:k,linkValue:0,links:[]});n.links.push(f[m]);n.linkValue+=f[m].linkValue}b.$rollup={nodes:pv.values(l),links:pv.values(q)}}}; +pv.Layout.Matrix=function(){pv.Layout.Network.call(this);var b,c,d,f,g,h=this.buildImplied;this.buildImplied=function(i){h.call(this,i);b=i.nodes.length;c=i.width/b;d=i.height/b;f=i.$matrix.labels;g=i.$matrix.pairs};this.link.data(function(){return g}).left(function(){return c*(this.index%b)}).top(function(){return d*Math.floor(this.index/b)}).width(function(){return c}).height(function(){return d}).lineWidth(1.5).strokeStyle("#fff").fillStyle(function(i){return i.linkValue?"#555":"#eee"}).parent= +this;delete this.link.add;this.label.data(function(){return f}).left(function(){return this.index&1?c*((this.index>>1)+0.5):0}).top(function(){return this.index&1?0:d*((this.index>>1)+0.5)}).textMargin(4).textAlign(function(){return this.index&1?"left":"right"}).textAngle(function(){return this.index&1?-Math.PI/2:0});delete this.node};pv.Layout.Matrix.prototype=pv.extend(pv.Layout.Network).property("directed",Boolean);pv.Layout.Matrix.prototype.sort=function(b){this.$sort=b;return this}; +pv.Layout.Matrix.prototype.buildImplied=function(b){if(!pv.Layout.Network.prototype.buildImplied.call(this,b)){var c=b.nodes,d=b.links,f=this.$sort,g=c.length,h=pv.range(g),i=[],j=[],k={};b.$matrix={labels:i,pairs:j};f&&h.sort(function(m,r){return f(c[m],c[r])});for(var l=0;lk)l=null;if(g){if(l&&g.scene==l.scene&&g.index==l.index)return;pv.Mark.dispatch("unpoint",g.scene,g.index)}if(g=l){pv.Mark.dispatch("point",l.scene,l.index);pv.listen(this.root.canvas(),"mouseout",f)}}function f(l){if(g&&!pv.ancestor(this,l.relatedTarget)){pv.Mark.dispatch("unpoint",g.scene,g.index);g=null}}var g,h=null,i=1,j=1,k=arguments.length?b*b:900;d.collapse=function(l){if(arguments.length){h=String(l);switch(h){case "y":i= +1;j=0;break;case "x":i=0;j=1;break;default:j=i=1;break}return d}return h};return d}; +pv.Behavior.select=function(){function b(j){g=this.index;f=this.scene;i=this.mouse();h=j;h.x=i.x;h.y=i.y;h.dx=h.dy=0;pv.Mark.dispatch("selectstart",f,g)}function c(){if(f){f.mark.context(f,g,function(){var j=this.mouse();h.x=Math.max(0,Math.min(i.x,j.x));h.y=Math.max(0,Math.min(i.y,j.y));h.dx=Math.min(this.width(),Math.max(j.x,i.x))-h.x;h.dy=Math.min(this.height(),Math.max(j.y,i.y))-h.y;this.render()});pv.Mark.dispatch("select",f,g)}}function d(){if(f){pv.Mark.dispatch("selectend",f,g);f=null}}var f, +g,h,i;pv.listen(window,"mousemove",c);pv.listen(window,"mouseup",d);return b}; +pv.Behavior.resize=function(b){function c(k){h=this.index;g=this.scene;j=this.mouse();i=k;switch(b){case "left":j.x=i.x+i.dx;break;case "right":j.x=i.x;break;case "top":j.y=i.y+i.dy;break;case "bottom":j.y=i.y;break}pv.Mark.dispatch("resizestart",g,h)}function d(){if(g){g.mark.context(g,h,function(){var k=this.mouse();i.x=Math.max(0,Math.min(j.x,k.x));i.y=Math.max(0,Math.min(j.y,k.y));i.dx=Math.min(this.parent.width(),Math.max(k.x,j.x))-i.x;i.dy=Math.min(this.parent.height(),Math.max(k.y,j.y))-i.y; +this.render()});pv.Mark.dispatch("resize",g,h)}}function f(){if(g){pv.Mark.dispatch("resizeend",g,h);g=null}}var g,h,i,j;pv.listen(window,"mousemove",d);pv.listen(window,"mouseup",f);return c}; +pv.Behavior.pan=function(){function b(){g=this.index;f=this.scene;i=pv.vector(pv.event.pageX,pv.event.pageY);h=this.transform();j=1/(h.k*this.scale);if(k)k={x:(1-h.k)*this.width(),y:(1-h.k)*this.height()}}function c(){if(f){f.mark.context(f,g,function(){var l=h.translate((pv.event.pageX-i.x)*j,(pv.event.pageY-i.y)*j);if(k){l.x=Math.max(k.x,Math.min(0,l.x));l.y=Math.max(k.y,Math.min(0,l.y))}this.transform(l).render()});pv.Mark.dispatch("pan",f,g)}}function d(){f=null}var f,g,h,i,j,k;b.bound=function(l){if(arguments.length){k= +Boolean(l);return this}return Boolean(k)};pv.listen(window,"mousemove",c);pv.listen(window,"mouseup",d);return b}; +pv.Behavior.zoom=function(b){function c(){var f=this.mouse(),g=pv.event.wheel*b;f=this.transform().translate(f.x,f.y).scale(g<0?1E3/(1E3-g):(1E3+g)/1E3).translate(-f.x,-f.y);if(d){f.k=Math.max(1,f.k);f.x=Math.max((1-f.k)*this.width(),Math.min(0,f.x));f.y=Math.max((1-f.k)*this.height(),Math.min(0,f.y))}this.transform(f).render();pv.Mark.dispatch("zoom",this.scene,this.index)}var d;arguments.length||(b=1/48);c.bound=function(f){if(arguments.length){d=Boolean(f);return this}return Boolean(d)};return c}; +pv.Geo=function(){}; +pv.Geo.projections={mercator:{project:function(b){return{x:b.lng/180,y:b.lat>85?1:b.lat<-85?-1:Math.log(Math.tan(Math.PI/4+pv.radians(b.lat)/2))/Math.PI}},invert:function(b){return{lng:b.x*180,lat:pv.degrees(2*Math.atan(Math.exp(b.y*Math.PI))-Math.PI/2)}}},"gall-peters":{project:function(b){return{x:b.lng/180,y:Math.sin(pv.radians(b.lat))}},invert:function(b){return{lng:b.x*180,lat:pv.degrees(Math.asin(b.y))}}},sinusoidal:{project:function(b){return{x:pv.radians(b.lng)*Math.cos(pv.radians(b.lat))/Math.PI, +y:b.lat/90}},invert:function(b){return{lng:pv.degrees(b.x*Math.PI/Math.cos(b.y*Math.PI/2)),lat:b.y*90}}},aitoff:{project:function(b){var c=pv.radians(b.lng);b=pv.radians(b.lat);var d=Math.acos(Math.cos(b)*Math.cos(c/2));return{x:2*(d?Math.cos(b)*Math.sin(c/2)*d/Math.sin(d):0)/Math.PI,y:2*(d?Math.sin(b)*d/Math.sin(d):0)/Math.PI}},invert:function(b){var c=b.y*Math.PI/2;return{lng:pv.degrees(b.x*Math.PI/2/Math.cos(c)),lat:pv.degrees(c)}}},hammer:{project:function(b){var c=pv.radians(b.lng);b=pv.radians(b.lat); +var d=Math.sqrt(1+Math.cos(b)*Math.cos(c/2));return{x:2*Math.SQRT2*Math.cos(b)*Math.sin(c/2)/d/3,y:Math.SQRT2*Math.sin(b)/d/1.5}},invert:function(b){var c=b.x*3;b=b.y*1.5;var d=Math.sqrt(1-c*c/16-b*b/4);return{lng:pv.degrees(2*Math.atan2(d*c,2*(2*d*d-1))),lat:pv.degrees(Math.asin(d*b))}}},identity:{project:function(b){return{x:b.lng/180,y:b.lat/90}},invert:function(b){return{lng:b.x*180,lat:b.y*90}}}}; +pv.Geo.scale=function(b){function c(m){if(!n||m.lng!=n.lng||m.lat!=n.lat){n=m;m=d(m);p={x:k(m.x),y:l(m.y)}}return p}function d(m){return j.project({lng:m.lng-q.lng,lat:m.lat})}function f(m){m=j.invert(m);m.lng+=q.lng;return m}var g={x:0,y:0},h={x:1,y:1},i=[],j=pv.Geo.projections.identity,k=pv.Scale.linear(-1,1).range(0,1),l=pv.Scale.linear(-1,1).range(1,0),q={lng:0,lat:0},n,p;c.x=function(m){return c(m).x};c.y=function(m){return c(m).y};c.ticks={lng:function(m){var r;if(i.length>1){var s=pv.Scale.linear(); +if(m==undefined)m=10;r=s.domain(i,function(t){return t.lat}).ticks(m);m=s.domain(i,function(t){return t.lng}).ticks(m)}else{r=pv.range(-80,81,10);m=pv.range(-180,181,10)}return m.map(function(t){return r.map(function(x){return{lat:x,lng:t}})})},lat:function(m){return pv.transpose(c.ticks.lng(m))}};c.invert=function(m){return f({x:k.invert(m.x),y:l.invert(m.y)})};c.domain=function(m,r){if(arguments.length){i=m instanceof Array?arguments.length>1?pv.map(m,r):m:Array.prototype.slice.call(arguments); +if(i.length>1){var s=i.map(function(x){return x.lng}),t=i.map(function(x){return x.lat});q={lng:(pv.max(s)+pv.min(s))/2,lat:(pv.max(t)+pv.min(t))/2};s=i.map(d);k.domain(s,function(x){return x.x});l.domain(s,function(x){return x.y})}else{q={lng:0,lat:0};k.domain(-1,1);l.domain(-1,1)}n=null;return this}return i};c.range=function(m,r){if(arguments.length){if(typeof m=="object"){g={x:Number(m.x),y:Number(m.y)};h={x:Number(r.x),y:Number(r.y)}}else{g={x:0,y:0};h={x:Number(m),y:Number(r)}}k.range(g.x,h.x); +l.range(h.y,g.y);n=null;return this}return[g,h]};c.projection=function(m){if(arguments.length){j=typeof m=="string"?pv.Geo.projections[m]||pv.Geo.projections.identity:m;return this.domain(i)}return m};c.by=function(m){function r(){return c(m.apply(this,arguments))}for(var s in c)r[s]=c[s];return r};arguments.length&&c.projection(b);return c}; diff --git a/custom_nodes/comfyui-kjnodes/kjweb_async/purify.min.js b/custom_nodes/comfyui-kjnodes/kjweb_async/purify.min.js new file mode 100644 index 0000000000000000000000000000000000000000..c2f5164618eebcc44b0186f594ccb8092639c670 --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/kjweb_async/purify.min.js @@ -0,0 +1,3 @@ +/*! @license DOMPurify 3.0.11 | (c) Cure53 and other contributors | Released under the Apache license 2.0 and Mozilla Public License 2.0 | github.com/cure53/DOMPurify/blob/3.0.11/LICENSE */ +!function(e,t){"object"==typeof exports&&"undefined"!=typeof module?module.exports=t():"function"==typeof define&&define.amd?define(t):(e="undefined"!=typeof globalThis?globalThis:e||self).DOMPurify=t()}(this,(function(){"use strict";const{entries:e,setPrototypeOf:t,isFrozen:n,getPrototypeOf:o,getOwnPropertyDescriptor:r}=Object;let{freeze:i,seal:a,create:l}=Object,{apply:c,construct:s}="undefined"!=typeof Reflect&&Reflect;i||(i=function(e){return e}),a||(a=function(e){return e}),c||(c=function(e,t,n){return e.apply(t,n)}),s||(s=function(e,t){return new e(...t)});const u=b(Array.prototype.forEach),m=b(Array.prototype.pop),p=b(Array.prototype.push),f=b(String.prototype.toLowerCase),d=b(String.prototype.toString),h=b(String.prototype.match),g=b(String.prototype.replace),T=b(String.prototype.indexOf),y=b(String.prototype.trim),E=b(Object.prototype.hasOwnProperty),A=b(RegExp.prototype.test),_=(N=TypeError,function(){for(var e=arguments.length,t=new Array(e),n=0;n1?n-1:0),r=1;r2&&void 0!==arguments[2]?arguments[2]:f;t&&t(e,null);let i=o.length;for(;i--;){let t=o[i];if("string"==typeof t){const e=r(t);e!==t&&(n(o)||(o[i]=e),t=e)}e[t]=!0}return e}function R(e){for(let t=0;t/gm),B=a(/\${[\w\W]*}/gm),W=a(/^data-[\-\w.\u00B7-\uFFFF]/),G=a(/^aria-[\-\w]+$/),Y=a(/^(?:(?:(?:f|ht)tps?|mailto|tel|callto|sms|cid|xmpp):|[^a-z]|[a-z+.\-]+(?:[^a-z+.\-:]|$))/i),j=a(/^(?:\w+script|data):/i),X=a(/[\u0000-\u0020\u00A0\u1680\u180E\u2000-\u2029\u205F\u3000]/g),q=a(/^html$/i),$=a(/^[a-z][.\w]*(-[.\w]+)+$/i);var K=Object.freeze({__proto__:null,MUSTACHE_EXPR:H,ERB_EXPR:z,TMPLIT_EXPR:B,DATA_ATTR:W,ARIA_ATTR:G,IS_ALLOWED_URI:Y,IS_SCRIPT_OR_DATA:j,ATTR_WHITESPACE:X,DOCTYPE_NAME:q,CUSTOM_ELEMENT:$});const V=function(){return"undefined"==typeof window?null:window},Z=function(e,t){if("object"!=typeof e||"function"!=typeof e.createPolicy)return null;let n=null;const o="data-tt-policy-suffix";t&&t.hasAttribute(o)&&(n=t.getAttribute(o));const r="dompurify"+(n?"#"+n:"");try{return e.createPolicy(r,{createHTML:e=>e,createScriptURL:e=>e})}catch(e){return console.warn("TrustedTypes policy "+r+" could not be created."),null}};var J=function t(){let n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:V();const o=e=>t(e);if(o.version="3.0.11",o.removed=[],!n||!n.document||9!==n.document.nodeType)return o.isSupported=!1,o;let{document:r}=n;const a=r,c=a.currentScript,{DocumentFragment:s,HTMLTemplateElement:N,Node:b,Element:R,NodeFilter:H,NamedNodeMap:z=n.NamedNodeMap||n.MozNamedAttrMap,HTMLFormElement:B,DOMParser:W,trustedTypes:G}=n,j=R.prototype,X=L(j,"cloneNode"),$=L(j,"nextSibling"),J=L(j,"childNodes"),Q=L(j,"parentNode");if("function"==typeof N){const e=r.createElement("template");e.content&&e.content.ownerDocument&&(r=e.content.ownerDocument)}let ee,te="";const{implementation:ne,createNodeIterator:oe,createDocumentFragment:re,getElementsByTagName:ie}=r,{importNode:ae}=a;let le={};o.isSupported="function"==typeof e&&"function"==typeof Q&&ne&&void 0!==ne.createHTMLDocument;const{MUSTACHE_EXPR:ce,ERB_EXPR:se,TMPLIT_EXPR:ue,DATA_ATTR:me,ARIA_ATTR:pe,IS_SCRIPT_OR_DATA:fe,ATTR_WHITESPACE:de,CUSTOM_ELEMENT:he}=K;let{IS_ALLOWED_URI:ge}=K,Te=null;const ye=S({},[...D,...C,...O,...v,...M]);let Ee=null;const Ae=S({},[...I,...U,...P,...F]);let _e=Object.seal(l(null,{tagNameCheck:{writable:!0,configurable:!1,enumerable:!0,value:null},attributeNameCheck:{writable:!0,configurable:!1,enumerable:!0,value:null},allowCustomizedBuiltInElements:{writable:!0,configurable:!1,enumerable:!0,value:!1}})),Ne=null,be=null,Se=!0,Re=!0,we=!1,Le=!0,De=!1,Ce=!0,Oe=!1,xe=!1,ve=!1,ke=!1,Me=!1,Ie=!1,Ue=!0,Pe=!1;const Fe="user-content-";let He=!0,ze=!1,Be={},We=null;const Ge=S({},["annotation-xml","audio","colgroup","desc","foreignobject","head","iframe","math","mi","mn","mo","ms","mtext","noembed","noframes","noscript","plaintext","script","style","svg","template","thead","title","video","xmp"]);let Ye=null;const je=S({},["audio","video","img","source","image","track"]);let Xe=null;const qe=S({},["alt","class","for","id","label","name","pattern","placeholder","role","summary","title","value","style","xmlns"]),$e="http://www.w3.org/1998/Math/MathML",Ke="http://www.w3.org/2000/svg",Ve="http://www.w3.org/1999/xhtml";let Ze=Ve,Je=!1,Qe=null;const et=S({},[$e,Ke,Ve],d);let tt=null;const nt=["application/xhtml+xml","text/html"],ot="text/html";let rt=null,it=null;const at=r.createElement("form"),lt=function(e){return e instanceof RegExp||e instanceof Function},ct=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};if(!it||it!==e){if(e&&"object"==typeof e||(e={}),e=w(e),tt=-1===nt.indexOf(e.PARSER_MEDIA_TYPE)?ot:e.PARSER_MEDIA_TYPE,rt="application/xhtml+xml"===tt?d:f,Te=E(e,"ALLOWED_TAGS")?S({},e.ALLOWED_TAGS,rt):ye,Ee=E(e,"ALLOWED_ATTR")?S({},e.ALLOWED_ATTR,rt):Ae,Qe=E(e,"ALLOWED_NAMESPACES")?S({},e.ALLOWED_NAMESPACES,d):et,Xe=E(e,"ADD_URI_SAFE_ATTR")?S(w(qe),e.ADD_URI_SAFE_ATTR,rt):qe,Ye=E(e,"ADD_DATA_URI_TAGS")?S(w(je),e.ADD_DATA_URI_TAGS,rt):je,We=E(e,"FORBID_CONTENTS")?S({},e.FORBID_CONTENTS,rt):Ge,Ne=E(e,"FORBID_TAGS")?S({},e.FORBID_TAGS,rt):{},be=E(e,"FORBID_ATTR")?S({},e.FORBID_ATTR,rt):{},Be=!!E(e,"USE_PROFILES")&&e.USE_PROFILES,Se=!1!==e.ALLOW_ARIA_ATTR,Re=!1!==e.ALLOW_DATA_ATTR,we=e.ALLOW_UNKNOWN_PROTOCOLS||!1,Le=!1!==e.ALLOW_SELF_CLOSE_IN_ATTR,De=e.SAFE_FOR_TEMPLATES||!1,Ce=!1!==e.SAFE_FOR_XML,Oe=e.WHOLE_DOCUMENT||!1,ke=e.RETURN_DOM||!1,Me=e.RETURN_DOM_FRAGMENT||!1,Ie=e.RETURN_TRUSTED_TYPE||!1,ve=e.FORCE_BODY||!1,Ue=!1!==e.SANITIZE_DOM,Pe=e.SANITIZE_NAMED_PROPS||!1,He=!1!==e.KEEP_CONTENT,ze=e.IN_PLACE||!1,ge=e.ALLOWED_URI_REGEXP||Y,Ze=e.NAMESPACE||Ve,_e=e.CUSTOM_ELEMENT_HANDLING||{},e.CUSTOM_ELEMENT_HANDLING&<(e.CUSTOM_ELEMENT_HANDLING.tagNameCheck)&&(_e.tagNameCheck=e.CUSTOM_ELEMENT_HANDLING.tagNameCheck),e.CUSTOM_ELEMENT_HANDLING&<(e.CUSTOM_ELEMENT_HANDLING.attributeNameCheck)&&(_e.attributeNameCheck=e.CUSTOM_ELEMENT_HANDLING.attributeNameCheck),e.CUSTOM_ELEMENT_HANDLING&&"boolean"==typeof e.CUSTOM_ELEMENT_HANDLING.allowCustomizedBuiltInElements&&(_e.allowCustomizedBuiltInElements=e.CUSTOM_ELEMENT_HANDLING.allowCustomizedBuiltInElements),De&&(Re=!1),Me&&(ke=!0),Be&&(Te=S({},M),Ee=[],!0===Be.html&&(S(Te,D),S(Ee,I)),!0===Be.svg&&(S(Te,C),S(Ee,U),S(Ee,F)),!0===Be.svgFilters&&(S(Te,O),S(Ee,U),S(Ee,F)),!0===Be.mathMl&&(S(Te,v),S(Ee,P),S(Ee,F))),e.ADD_TAGS&&(Te===ye&&(Te=w(Te)),S(Te,e.ADD_TAGS,rt)),e.ADD_ATTR&&(Ee===Ae&&(Ee=w(Ee)),S(Ee,e.ADD_ATTR,rt)),e.ADD_URI_SAFE_ATTR&&S(Xe,e.ADD_URI_SAFE_ATTR,rt),e.FORBID_CONTENTS&&(We===Ge&&(We=w(We)),S(We,e.FORBID_CONTENTS,rt)),He&&(Te["#text"]=!0),Oe&&S(Te,["html","head","body"]),Te.table&&(S(Te,["tbody"]),delete Ne.tbody),e.TRUSTED_TYPES_POLICY){if("function"!=typeof e.TRUSTED_TYPES_POLICY.createHTML)throw _('TRUSTED_TYPES_POLICY configuration option must provide a "createHTML" hook.');if("function"!=typeof e.TRUSTED_TYPES_POLICY.createScriptURL)throw _('TRUSTED_TYPES_POLICY configuration option must provide a "createScriptURL" hook.');ee=e.TRUSTED_TYPES_POLICY,te=ee.createHTML("")}else void 0===ee&&(ee=Z(G,c)),null!==ee&&"string"==typeof te&&(te=ee.createHTML(""));i&&i(e),it=e}},st=S({},["mi","mo","mn","ms","mtext"]),ut=S({},["foreignobject","desc","title","annotation-xml"]),mt=S({},["title","style","font","a","script"]),pt=S({},[...C,...O,...x]),ft=S({},[...v,...k]),dt=function(e){let t=Q(e);t&&t.tagName||(t={namespaceURI:Ze,tagName:"template"});const n=f(e.tagName),o=f(t.tagName);return!!Qe[e.namespaceURI]&&(e.namespaceURI===Ke?t.namespaceURI===Ve?"svg"===n:t.namespaceURI===$e?"svg"===n&&("annotation-xml"===o||st[o]):Boolean(pt[n]):e.namespaceURI===$e?t.namespaceURI===Ve?"math"===n:t.namespaceURI===Ke?"math"===n&&ut[o]:Boolean(ft[n]):e.namespaceURI===Ve?!(t.namespaceURI===Ke&&!ut[o])&&(!(t.namespaceURI===$e&&!st[o])&&(!ft[n]&&(mt[n]||!pt[n]))):!("application/xhtml+xml"!==tt||!Qe[e.namespaceURI]))},ht=function(e){p(o.removed,{element:e});try{e.parentNode.removeChild(e)}catch(t){e.remove()}},gt=function(e,t){try{p(o.removed,{attribute:t.getAttributeNode(e),from:t})}catch(e){p(o.removed,{attribute:null,from:t})}if(t.removeAttribute(e),"is"===e&&!Ee[e])if(ke||Me)try{ht(t)}catch(e){}else try{t.setAttribute(e,"")}catch(e){}},Tt=function(e){let t=null,n=null;if(ve)e=""+e;else{const t=h(e,/^[\r\n\t ]+/);n=t&&t[0]}"application/xhtml+xml"===tt&&Ze===Ve&&(e=''+e+"");const o=ee?ee.createHTML(e):e;if(Ze===Ve)try{t=(new W).parseFromString(o,tt)}catch(e){}if(!t||!t.documentElement){t=ne.createDocument(Ze,"template",null);try{t.documentElement.innerHTML=Je?te:o}catch(e){}}const i=t.body||t.documentElement;return e&&n&&i.insertBefore(r.createTextNode(n),i.childNodes[0]||null),Ze===Ve?ie.call(t,Oe?"html":"body")[0]:Oe?t.documentElement:i},yt=function(e){return oe.call(e.ownerDocument||e,e,H.SHOW_ELEMENT|H.SHOW_COMMENT|H.SHOW_TEXT|H.SHOW_PROCESSING_INSTRUCTION|H.SHOW_CDATA_SECTION,null)},Et=function(e){return e instanceof B&&("string"!=typeof e.nodeName||"string"!=typeof e.textContent||"function"!=typeof e.removeChild||!(e.attributes instanceof z)||"function"!=typeof e.removeAttribute||"function"!=typeof e.setAttribute||"string"!=typeof e.namespaceURI||"function"!=typeof e.insertBefore||"function"!=typeof e.hasChildNodes)},At=function(e){return"function"==typeof b&&e instanceof b},_t=function(e,t,n){le[e]&&u(le[e],(e=>{e.call(o,t,n,it)}))},Nt=function(e){let t=null;if(_t("beforeSanitizeElements",e,null),Et(e))return ht(e),!0;const n=rt(e.nodeName);if(_t("uponSanitizeElement",e,{tagName:n,allowedTags:Te}),e.hasChildNodes()&&!At(e.firstElementChild)&&A(/<[/\w]/g,e.innerHTML)&&A(/<[/\w]/g,e.textContent))return ht(e),!0;if(7===e.nodeType)return ht(e),!0;if(Ce&&8===e.nodeType&&A(/<[/\w]/g,e.data))return ht(e),!0;if(!Te[n]||Ne[n]){if(!Ne[n]&&St(n)){if(_e.tagNameCheck instanceof RegExp&&A(_e.tagNameCheck,n))return!1;if(_e.tagNameCheck instanceof Function&&_e.tagNameCheck(n))return!1}if(He&&!We[n]){const t=Q(e)||e.parentNode,n=J(e)||e.childNodes;if(n&&t){for(let o=n.length-1;o>=0;--o)t.insertBefore(X(n[o],!0),$(e))}}return ht(e),!0}return e instanceof R&&!dt(e)?(ht(e),!0):"noscript"!==n&&"noembed"!==n&&"noframes"!==n||!A(/<\/no(script|embed|frames)/i,e.innerHTML)?(De&&3===e.nodeType&&(t=e.textContent,u([ce,se,ue],(e=>{t=g(t,e," ")})),e.textContent!==t&&(p(o.removed,{element:e.cloneNode()}),e.textContent=t)),_t("afterSanitizeElements",e,null),!1):(ht(e),!0)},bt=function(e,t,n){if(Ue&&("id"===t||"name"===t)&&(n in r||n in at))return!1;if(Re&&!be[t]&&A(me,t));else if(Se&&A(pe,t));else if(!Ee[t]||be[t]){if(!(St(e)&&(_e.tagNameCheck instanceof RegExp&&A(_e.tagNameCheck,e)||_e.tagNameCheck instanceof Function&&_e.tagNameCheck(e))&&(_e.attributeNameCheck instanceof RegExp&&A(_e.attributeNameCheck,t)||_e.attributeNameCheck instanceof Function&&_e.attributeNameCheck(t))||"is"===t&&_e.allowCustomizedBuiltInElements&&(_e.tagNameCheck instanceof RegExp&&A(_e.tagNameCheck,n)||_e.tagNameCheck instanceof Function&&_e.tagNameCheck(n))))return!1}else if(Xe[t]);else if(A(ge,g(n,de,"")));else if("src"!==t&&"xlink:href"!==t&&"href"!==t||"script"===e||0!==T(n,"data:")||!Ye[e]){if(we&&!A(fe,g(n,de,"")));else if(n)return!1}else;return!0},St=function(e){return"annotation-xml"!==e&&h(e,he)},Rt=function(e){_t("beforeSanitizeAttributes",e,null);const{attributes:t}=e;if(!t)return;const n={attrName:"",attrValue:"",keepAttr:!0,allowedAttributes:Ee};let r=t.length;for(;r--;){const i=t[r],{name:a,namespaceURI:l,value:c}=i,s=rt(a);let p="value"===a?c:y(c);if(n.attrName=s,n.attrValue=p,n.keepAttr=!0,n.forceKeepAttr=void 0,_t("uponSanitizeAttribute",e,n),p=n.attrValue,n.forceKeepAttr)continue;if(gt(a,e),!n.keepAttr)continue;if(!Le&&A(/\/>/i,p)){gt(a,e);continue}De&&u([ce,se,ue],(e=>{p=g(p,e," ")}));const f=rt(e.nodeName);if(bt(f,s,p)){if(!Pe||"id"!==s&&"name"!==s||(gt(a,e),p=Fe+p),ee&&"object"==typeof G&&"function"==typeof G.getAttributeType)if(l);else switch(G.getAttributeType(f,s)){case"TrustedHTML":p=ee.createHTML(p);break;case"TrustedScriptURL":p=ee.createScriptURL(p)}try{l?e.setAttributeNS(l,a,p):e.setAttribute(a,p),m(o.removed)}catch(e){}}}_t("afterSanitizeAttributes",e,null)},wt=function e(t){let n=null;const o=yt(t);for(_t("beforeSanitizeShadowDOM",t,null);n=o.nextNode();)_t("uponSanitizeShadowNode",n,null),Nt(n)||(n.content instanceof s&&e(n.content),Rt(n));_t("afterSanitizeShadowDOM",t,null)};return o.sanitize=function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=null,r=null,i=null,l=null;if(Je=!e,Je&&(e="\x3c!--\x3e"),"string"!=typeof e&&!At(e)){if("function"!=typeof e.toString)throw _("toString is not a function");if("string"!=typeof(e=e.toString()))throw _("dirty is not a string, aborting")}if(!o.isSupported)return e;if(xe||ct(t),o.removed=[],"string"==typeof e&&(ze=!1),ze){if(e.nodeName){const t=rt(e.nodeName);if(!Te[t]||Ne[t])throw _("root node is forbidden and cannot be sanitized in-place")}}else if(e instanceof b)n=Tt("\x3c!----\x3e"),r=n.ownerDocument.importNode(e,!0),1===r.nodeType&&"BODY"===r.nodeName||"HTML"===r.nodeName?n=r:n.appendChild(r);else{if(!ke&&!De&&!Oe&&-1===e.indexOf("<"))return ee&&Ie?ee.createHTML(e):e;if(n=Tt(e),!n)return ke?null:Ie?te:""}n&&ve&&ht(n.firstChild);const c=yt(ze?e:n);for(;i=c.nextNode();)Nt(i)||(i.content instanceof s&&wt(i.content),Rt(i));if(ze)return e;if(ke){if(Me)for(l=re.call(n.ownerDocument);n.firstChild;)l.appendChild(n.firstChild);else l=n;return(Ee.shadowroot||Ee.shadowrootmode)&&(l=ae.call(a,l,!0)),l}let m=Oe?n.outerHTML:n.innerHTML;return Oe&&Te["!doctype"]&&n.ownerDocument&&n.ownerDocument.doctype&&n.ownerDocument.doctype.name&&A(q,n.ownerDocument.doctype.name)&&(m="\n"+m),De&&u([ce,se,ue],(e=>{m=g(m,e," ")})),ee&&Ie?ee.createHTML(m):m},o.setConfig=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};ct(e),xe=!0},o.clearConfig=function(){it=null,xe=!1},o.isValidAttribute=function(e,t,n){it||ct({});const o=rt(e),r=rt(t);return bt(o,r,n)},o.addHook=function(e,t){"function"==typeof t&&(le[e]=le[e]||[],p(le[e],t))},o.removeHook=function(e){if(le[e])return m(le[e])},o.removeHooks=function(e){le[e]&&(le[e]=[])},o.removeAllHooks=function(){le={}},o}();return J})); +//# sourceMappingURL=purify.min.js.map diff --git a/custom_nodes/comfyui-kjnodes/kjweb_async/svg-path-properties.min.js b/custom_nodes/comfyui-kjnodes/kjweb_async/svg-path-properties.min.js new file mode 100644 index 0000000000000000000000000000000000000000..88d47e0de4c54f881083164c20045a7e8b621caf --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/kjweb_async/svg-path-properties.min.js @@ -0,0 +1,2 @@ +// http://geoexamples.com/path-properties/ v1.2.0 Copyright 2023 Roger Veciana i Rovira +!function(t,n){"object"==typeof exports&&"undefined"!=typeof module?n(exports):"function"==typeof define&&define.amd?define(["exports"],n):n((t="undefined"!=typeof globalThis?globalThis:t||self).svgPathProperties={})}(this,(function(t){"use strict";function n(t,n){for(var e=0;et.length)&&(n=t.length);for(var e=0,i=new Array(n);eu.length&&(t=u.length);var n=f({x:u.x0,y:u.y0},u.rx,u.ry,u.xAxisRotate,u.LargeArcFlag,u.SweepFlag,{x:u.x1,y:u.y1},t/u.length);return{x:n.x,y:n.y}})),i(this,"getTangentAtLength",(function(t){t<0?t=0:t>u.length&&(t=u.length);var n,e=.05,i=u.getPointAtLength(t);t<0?t=0:t>u.length&&(t=u.length);var r=(n=t1&&(n=Math.sqrt(c)*n,e=Math.sqrt(c)*e);var f=(Math.pow(n,2)*Math.pow(e,2)-Math.pow(n,2)*Math.pow(l.y,2)-Math.pow(e,2)*Math.pow(l.x,2))/(Math.pow(n,2)*Math.pow(l.y,2)+Math.pow(e,2)*Math.pow(l.x,2));f=f<0?0:f;var y=(r!==h?1:-1)*Math.sqrt(f),v=y*(n*l.y/e),M=y*(-e*l.x/n),L={x:Math.cos(o)*v-Math.sin(o)*M+(t.x+s.x)/2,y:Math.sin(o)*v+Math.cos(o)*M+(t.y+s.y)/2},d={x:(l.x-v)/n,y:(l.y-M)/e},A=w({x:1,y:0},d),b=w(d,{x:(-l.x-v)/n,y:(-l.y-M)/e});!h&&b>0?b-=2*Math.PI:h&&b<0&&(b+=2*Math.PI);var P=A+(b%=2*Math.PI)*a,m=n*Math.cos(P),T=e*Math.sin(P);return{x:Math.cos(o)*m-Math.sin(o)*T+L.x,y:Math.sin(o)*m+Math.cos(o)*T+L.y,ellipticalArcStartAngle:A,ellipticalArcEndAngle:A+b,ellipticalArcAngle:P,ellipticalArcCenter:L,resultantRx:n,resultantRy:e}},y=function(t,n){t=t||500;for(var e,i=0,r=[],h=[],s=n(0),a=0;a0?Math.sqrt(l*l+c):0,y=u*u+c>0?Math.sqrt(u*u+c):0,p=u+Math.sqrt(u*u+c)!==0&&(l+f)/(u+y)!=0?c*Math.log(Math.abs((l+f)/(u+y))):0;return Math.sqrt(a)/2*(l*f-u*y+p)},_=function(t,n,e){return{x:2*(1-e)*(t[1]-t[0])+2*e*(t[2]-t[1]),y:2*(1-e)*(n[1]-n[0])+2*e*(n[2]-n[1])}};function S(t,n,e){var i=N(1,e,t),r=N(1,e,n),h=i*i+r*r;return Math.sqrt(h)}var N=function t(n,e,i){var r,h,s=i.length-1;if(0===s)return 0;if(0===n){h=0;for(var a=0;a<=s;a++)h+=A[s][a]*Math.pow(1-e,s-a)*Math.pow(e,a)*i[a];return h}r=new Array(s);for(var o=0;o.001;){var a=e(r+h),o=Math.abs(t-a)/n;if(o500)break}return r},j=e((function(t,n,e,r,h,s,a,o){var g=this;i(this,"a",void 0),i(this,"b",void 0),i(this,"c",void 0),i(this,"d",void 0),i(this,"length",void 0),i(this,"getArcLength",void 0),i(this,"getPoint",void 0),i(this,"getDerivative",void 0),i(this,"getTotalLength",(function(){return g.length})),i(this,"getPointAtLength",(function(t){var n=[g.a.x,g.b.x,g.c.x,g.d.x],e=[g.a.y,g.b.y,g.c.y,g.d.y],i=C(t,g.length,(function(t){return g.getArcLength(n,e,t)}));return g.getPoint(n,e,i)})),i(this,"getTangentAtLength",(function(t){var n=[g.a.x,g.b.x,g.c.x,g.d.x],e=[g.a.y,g.b.y,g.c.y,g.d.y],i=C(t,g.length,(function(t){return g.getArcLength(n,e,t)})),r=g.getDerivative(n,e,i),h=Math.sqrt(r.x*r.x+r.y*r.y);return h>0?{x:r.x/h,y:r.y/h}:{x:0,y:0}})),i(this,"getPropertiesAtLength",(function(t){var n,e=[g.a.x,g.b.x,g.c.x,g.d.x],i=[g.a.y,g.b.y,g.c.y,g.d.y],r=C(t,g.length,(function(t){return g.getArcLength(e,i,t)})),h=g.getDerivative(e,i,r),s=Math.sqrt(h.x*h.x+h.y*h.y);n=s>0?{x:h.x/s,y:h.y/s}:{x:0,y:0};var a=g.getPoint(e,i,r);return{x:a.x,y:a.y,tangentX:n.x,tangentY:n.y}})),i(this,"getC",(function(){return g.c})),i(this,"getD",(function(){return g.d})),this.a={x:t,y:n},this.b={x:e,y:r},this.c={x:h,y:s},void 0!==a&&void 0!==o?(this.getArcLength=m,this.getPoint=b,this.getDerivative=P,this.d={x:a,y:o}):(this.getArcLength=q,this.getPoint=T,this.getDerivative=_,this.d={x:0,y:0}),this.length=this.getArcLength([this.a.x,this.b.x,this.c.x,this.d.x],[this.a.y,this.b.y,this.c.y,this.d.y],1)})),O=e((function(t){var n=this;i(this,"length",0),i(this,"partial_lengths",[]),i(this,"functions",[]),i(this,"initial_point",null),i(this,"getPartAtLength",(function(t){t<0?t=0:t>n.length&&(t=n.length);for(var e=n.partial_lengths.length-1;n.partial_lengths[e]>=t&&e>0;)e--;return e++,{fraction:t-n.partial_lengths[e-1],i:e}})),i(this,"getTotalLength",(function(){return n.length})),i(this,"getPointAtLength",(function(t){var e=n.getPartAtLength(t),i=n.functions[e.i];if(i)return i.getPointAtLength(e.fraction);if(n.initial_point)return n.initial_point;throw new Error("Wrong function at this part.")})),i(this,"getTangentAtLength",(function(t){var e=n.getPartAtLength(t),i=n.functions[e.i];if(i)return i.getTangentAtLength(e.fraction);if(n.initial_point)return{x:0,y:0};throw new Error("Wrong function at this part.")})),i(this,"getPropertiesAtLength",(function(t){var e=n.getPartAtLength(t),i=n.functions[e.i];if(i)return i.getPropertiesAtLength(e.fraction);if(n.initial_point)return{x:n.initial_point.x,y:n.initial_point.y,tangentX:0,tangentY:0};throw new Error("Wrong function at this part.")})),i(this,"getParts",(function(){for(var t=[],e=0;e0?t:"M0,0").match(o);if(!n)throw new Error("No path elements found in string ".concat(t));return n.reduce((function(t,n){var e=n.charAt(0),i=e.toLowerCase(),h=u(n.substring(1));if("m"===i&&h.length>2&&(t.push([e].concat(r(h.splice(0,2)))),i="l",e="m"===e?"l":"L"),"a"===i.toLowerCase()&&(5===h.length||6===h.length)){var s=n.substring(1).trim().split(" ");h=[Number(s[0]),Number(s[1]),Number(s[2]),Number(s[3].charAt(0)),Number(s[3].charAt(1)),Number(s[3].substring(2)),Number(s[4])]}for(;h.length>=0;){if(h.length===a[i]){t.push([e].concat(r(h.splice(0,a[i]))));break}if(h.length0?(this.length+=e.getTotalLength(),this.functions.push(e),s=[h[y][5]+s[0],h[y][6]+s[1]]):this.functions.push(new l(s[0],s[0],s[1],s[1]));else if("S"===h[y][0]){if(y>0&&["C","c","S","s"].indexOf(h[y-1][0])>-1){if(e){var p=e.getC();e=new j(s[0],s[1],2*s[0]-p.x,2*s[1]-p.y,h[y][1],h[y][2],h[y][3],h[y][4])}}else e=new j(s[0],s[1],s[0],s[1],h[y][1],h[y][2],h[y][3],h[y][4]);e&&(this.length+=e.getTotalLength(),s=[h[y][3],h[y][4]],this.functions.push(e))}else if("s"===h[y][0]){if(y>0&&["C","c","S","s"].indexOf(h[y-1][0])>-1){if(e){var x=e.getC(),v=e.getD();e=new j(s[0],s[1],s[0]+v.x-x.x,s[1]+v.y-x.y,s[0]+h[y][1],s[1]+h[y][2],s[0]+h[y][3],s[1]+h[y][4])}}else e=new j(s[0],s[1],s[0],s[1],s[0]+h[y][1],s[1]+h[y][2],s[0]+h[y][3],s[1]+h[y][4]);e&&(this.length+=e.getTotalLength(),s=[h[y][3]+s[0],h[y][4]+s[1]],this.functions.push(e))}else if("Q"===h[y][0]){if(s[0]==h[y][1]&&s[1]==h[y][2]){var M=new l(h[y][1],h[y][3],h[y][2],h[y][4]);this.length+=M.getTotalLength(),this.functions.push(M)}else e=new j(s[0],s[1],h[y][1],h[y][2],h[y][3],h[y][4],void 0,void 0),this.length+=e.getTotalLength(),this.functions.push(e);s=[h[y][3],h[y][4]],g=[h[y][1],h[y][2]]}else if("q"===h[y][0]){if(0!=h[y][1]||0!=h[y][2])e=new j(s[0],s[1],s[0]+h[y][1],s[1]+h[y][2],s[0]+h[y][3],s[1]+h[y][4],void 0,void 0),this.length+=e.getTotalLength(),this.functions.push(e);else{var w=new l(s[0]+h[y][1],s[0]+h[y][3],s[1]+h[y][2],s[1]+h[y][4]);this.length+=w.getTotalLength(),this.functions.push(w)}g=[s[0]+h[y][1],s[1]+h[y][2]],s=[h[y][3]+s[0],h[y][4]+s[1]]}else if("T"===h[y][0]){if(y>0&&["Q","q","T","t"].indexOf(h[y-1][0])>-1)e=new j(s[0],s[1],2*s[0]-g[0],2*s[1]-g[1],h[y][1],h[y][2],void 0,void 0),this.functions.push(e),this.length+=e.getTotalLength();else{var L=new l(s[0],h[y][1],s[1],h[y][2]);this.functions.push(L),this.length+=L.getTotalLength()}g=[2*s[0]-g[0],2*s[1]-g[1]],s=[h[y][1],h[y][2]]}else if("t"===h[y][0]){if(y>0&&["Q","q","T","t"].indexOf(h[y-1][0])>-1)e=new j(s[0],s[1],2*s[0]-g[0],2*s[1]-g[1],s[0]+h[y][1],s[1]+h[y][2],void 0,void 0),this.length+=e.getTotalLength(),this.functions.push(e);else{var d=new l(s[0],s[0]+h[y][1],s[1],s[1]+h[y][2]);this.length+=d.getTotalLength(),this.functions.push(d)}g=[2*s[0]-g[0],2*s[1]-g[1]],s=[h[y][1]+s[0],h[y][2]+s[1]]}else if("A"===h[y][0]){var A=new c(s[0],s[1],h[y][1],h[y][2],h[y][3],1===h[y][4],1===h[y][5],h[y][6],h[y][7]);this.length+=A.getTotalLength(),s=[h[y][6],h[y][7]],this.functions.push(A)}else if("a"===h[y][0]){var b=new c(s[0],s[1],h[y][1],h[y][2],h[y][3],1===h[y][4],1===h[y][5],s[0]+h[y][6],s[1]+h[y][7]);this.length+=b.getTotalLength(),s=[s[0]+h[y][6],s[1]+h[y][7]],this.functions.push(b)}this.partial_lengths.push(this.length)}})),E=e((function(t){var n=this;if(i(this,"inst",void 0),i(this,"getTotalLength",(function(){return n.inst.getTotalLength()})),i(this,"getPointAtLength",(function(t){return n.inst.getPointAtLength(t)})),i(this,"getTangentAtLength",(function(t){return n.inst.getTangentAtLength(t)})),i(this,"getPropertiesAtLength",(function(t){return n.inst.getPropertiesAtLength(t)})),i(this,"getParts",(function(){return n.inst.getParts()})),this.inst=new O(t),!(this instanceof E))return new E(t)}));t.svgPathProperties=E})); diff --git a/custom_nodes/comfyui-kjnodes/nodes/__pycache__/audioscheduler_nodes.cpython-310.pyc b/custom_nodes/comfyui-kjnodes/nodes/__pycache__/audioscheduler_nodes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7b4e903917ce841d5e936a84e025f62491e09f3 Binary files /dev/null and b/custom_nodes/comfyui-kjnodes/nodes/__pycache__/audioscheduler_nodes.cpython-310.pyc differ diff --git a/custom_nodes/comfyui-kjnodes/nodes/__pycache__/batchcrop_nodes.cpython-310.pyc b/custom_nodes/comfyui-kjnodes/nodes/__pycache__/batchcrop_nodes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c4b2baa8cfdebb4494541d0f283753135ac49a1 Binary files /dev/null and b/custom_nodes/comfyui-kjnodes/nodes/__pycache__/batchcrop_nodes.cpython-310.pyc differ diff --git a/custom_nodes/comfyui-kjnodes/nodes/__pycache__/curve_nodes.cpython-310.pyc b/custom_nodes/comfyui-kjnodes/nodes/__pycache__/curve_nodes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61321f04d01376b1cb1f9fd6eafc3a35c0eac3db Binary files /dev/null and b/custom_nodes/comfyui-kjnodes/nodes/__pycache__/curve_nodes.cpython-310.pyc differ diff --git a/custom_nodes/comfyui-kjnodes/nodes/__pycache__/image_nodes.cpython-310.pyc b/custom_nodes/comfyui-kjnodes/nodes/__pycache__/image_nodes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..090bec9af4cde615e9efa6ff18b1f960349e767c Binary files /dev/null and b/custom_nodes/comfyui-kjnodes/nodes/__pycache__/image_nodes.cpython-310.pyc differ diff --git a/custom_nodes/comfyui-kjnodes/nodes/__pycache__/intrinsic_lora_nodes.cpython-310.pyc b/custom_nodes/comfyui-kjnodes/nodes/__pycache__/intrinsic_lora_nodes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d48d7028e5ebce352e9b58463d106e5b0249b25 Binary files /dev/null and b/custom_nodes/comfyui-kjnodes/nodes/__pycache__/intrinsic_lora_nodes.cpython-310.pyc differ diff --git a/custom_nodes/comfyui-kjnodes/nodes/__pycache__/lora_nodes.cpython-310.pyc b/custom_nodes/comfyui-kjnodes/nodes/__pycache__/lora_nodes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..38c43b3bc9b56c5ac2ddddf1a626f959ca9eb360 Binary files /dev/null and b/custom_nodes/comfyui-kjnodes/nodes/__pycache__/lora_nodes.cpython-310.pyc differ diff --git a/custom_nodes/comfyui-kjnodes/nodes/__pycache__/mask_nodes.cpython-310.pyc b/custom_nodes/comfyui-kjnodes/nodes/__pycache__/mask_nodes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f9100d1238acee33d221c3cdd4d1de273a9c60a Binary files /dev/null and b/custom_nodes/comfyui-kjnodes/nodes/__pycache__/mask_nodes.cpython-310.pyc differ diff --git a/custom_nodes/comfyui-kjnodes/nodes/__pycache__/model_optimization_nodes.cpython-310.pyc b/custom_nodes/comfyui-kjnodes/nodes/__pycache__/model_optimization_nodes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1c39ab021062662c2ccbd0f5f6e7e197346d3f0 Binary files /dev/null and b/custom_nodes/comfyui-kjnodes/nodes/__pycache__/model_optimization_nodes.cpython-310.pyc differ diff --git a/custom_nodes/comfyui-kjnodes/nodes/__pycache__/nodes.cpython-310.pyc b/custom_nodes/comfyui-kjnodes/nodes/__pycache__/nodes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..527cbde686091ac4cf0a111e22366b25b71ceb37 Binary files /dev/null and b/custom_nodes/comfyui-kjnodes/nodes/__pycache__/nodes.cpython-310.pyc differ diff --git a/custom_nodes/comfyui-kjnodes/nodes/audioscheduler_nodes.py b/custom_nodes/comfyui-kjnodes/nodes/audioscheduler_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..69d0422e7da875298f87fe60a7f6d1494530dca2 --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/nodes/audioscheduler_nodes.py @@ -0,0 +1,251 @@ +# to be used with https://github.com/a1lazydog/ComfyUI-AudioScheduler +import torch +from torchvision.transforms import functional as TF +from PIL import Image, ImageDraw +import numpy as np +from ..utility.utility import pil2tensor +from nodes import MAX_RESOLUTION + +class NormalizedAmplitudeToMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "normalized_amp": ("NORMALIZED_AMPLITUDE",), + "width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "frame_offset": ("INT", {"default": 0,"min": -255, "max": 255, "step": 1}), + "location_x": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}), + "location_y": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}), + "size": ("INT", {"default": 128,"min": 8, "max": 4096, "step": 1}), + "shape": ( + [ + 'none', + 'circle', + 'square', + 'triangle', + ], + { + "default": 'none' + }), + "color": ( + [ + 'white', + 'amplitude', + ], + { + "default": 'amplitude' + }), + },} + + CATEGORY = "KJNodes/audio" + RETURN_TYPES = ("MASK",) + FUNCTION = "convert" + DESCRIPTION = """ +Works as a bridge to the AudioScheduler -nodes: +https://github.com/a1lazydog/ComfyUI-AudioScheduler +Creates masks based on the normalized amplitude. +""" + + def convert(self, normalized_amp, width, height, frame_offset, shape, location_x, location_y, size, color): + # Ensure normalized_amp is an array and within the range [0, 1] + normalized_amp = np.clip(normalized_amp, 0.0, 1.0) + + # Offset the amplitude values by rolling the array + normalized_amp = np.roll(normalized_amp, frame_offset) + + # Initialize an empty list to hold the image tensors + out = [] + # Iterate over each amplitude value to create an image + for amp in normalized_amp: + # Scale the amplitude value to cover the full range of grayscale values + if color == 'amplitude': + grayscale_value = int(amp * 255) + elif color == 'white': + grayscale_value = 255 + # Convert the grayscale value to an RGB format + gray_color = (grayscale_value, grayscale_value, grayscale_value) + finalsize = size * amp + + if shape == 'none': + shapeimage = Image.new("RGB", (width, height), gray_color) + else: + shapeimage = Image.new("RGB", (width, height), "black") + + draw = ImageDraw.Draw(shapeimage) + if shape == 'circle' or shape == 'square': + # Define the bounding box for the shape + left_up_point = (location_x - finalsize, location_y - finalsize) + right_down_point = (location_x + finalsize,location_y + finalsize) + two_points = [left_up_point, right_down_point] + + if shape == 'circle': + draw.ellipse(two_points, fill=gray_color) + elif shape == 'square': + draw.rectangle(two_points, fill=gray_color) + + elif shape == 'triangle': + # Define the points for the triangle + left_up_point = (location_x - finalsize, location_y + finalsize) # bottom left + right_down_point = (location_x + finalsize, location_y + finalsize) # bottom right + top_point = (location_x, location_y) # top point + draw.polygon([top_point, left_up_point, right_down_point], fill=gray_color) + + shapeimage = pil2tensor(shapeimage) + mask = shapeimage[:, :, :, 0] + out.append(mask) + + return (torch.cat(out, dim=0),) + +class NormalizedAmplitudeToFloatList: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "normalized_amp": ("NORMALIZED_AMPLITUDE",), + },} + + CATEGORY = "KJNodes/audio" + RETURN_TYPES = ("FLOAT",) + FUNCTION = "convert" + DESCRIPTION = """ +Works as a bridge to the AudioScheduler -nodes: +https://github.com/a1lazydog/ComfyUI-AudioScheduler +Creates a list of floats from the normalized amplitude. +""" + + def convert(self, normalized_amp): + # Ensure normalized_amp is an array and within the range [0, 1] + normalized_amp = np.clip(normalized_amp, 0.0, 1.0) + return (normalized_amp.tolist(),) + +class OffsetMaskByNormalizedAmplitude: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "normalized_amp": ("NORMALIZED_AMPLITUDE",), + "mask": ("MASK",), + "x": ("INT", { "default": 0, "min": -4096, "max": MAX_RESOLUTION, "step": 1, "display": "number" }), + "y": ("INT", { "default": 0, "min": -4096, "max": MAX_RESOLUTION, "step": 1, "display": "number" }), + "rotate": ("BOOLEAN", { "default": False }), + "angle_multiplier": ("FLOAT", { "default": 0.0, "min": -1.0, "max": 1.0, "step": 0.001, "display": "number" }), + } + } + + RETURN_TYPES = ("MASK",) + RETURN_NAMES = ("mask",) + FUNCTION = "offset" + CATEGORY = "KJNodes/audio" + DESCRIPTION = """ +Works as a bridge to the AudioScheduler -nodes: +https://github.com/a1lazydog/ComfyUI-AudioScheduler +Offsets masks based on the normalized amplitude. +""" + + def offset(self, mask, x, y, angle_multiplier, rotate, normalized_amp): + + # Ensure normalized_amp is an array and within the range [0, 1] + offsetmask = mask.clone() + normalized_amp = np.clip(normalized_amp, 0.0, 1.0) + + batch_size, height, width = mask.shape + + if rotate: + for i in range(batch_size): + rotation_amp = int(normalized_amp[i] * (360 * angle_multiplier)) + rotation_angle = rotation_amp + offsetmask[i] = TF.rotate(offsetmask[i].unsqueeze(0), rotation_angle).squeeze(0) + if x != 0 or y != 0: + for i in range(batch_size): + offset_amp = normalized_amp[i] * 10 + shift_x = min(x*offset_amp, width-1) + shift_y = min(y*offset_amp, height-1) + if shift_x != 0: + offsetmask[i] = torch.roll(offsetmask[i], shifts=int(shift_x), dims=1) + if shift_y != 0: + offsetmask[i] = torch.roll(offsetmask[i], shifts=int(shift_y), dims=0) + + return offsetmask, + +class ImageTransformByNormalizedAmplitude: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "normalized_amp": ("NORMALIZED_AMPLITUDE",), + "zoom_scale": ("FLOAT", { "default": 0.0, "min": -1.0, "max": 1.0, "step": 0.001, "display": "number" }), + "x_offset": ("INT", { "default": 0, "min": (1 -MAX_RESOLUTION), "max": MAX_RESOLUTION, "step": 1, "display": "number" }), + "y_offset": ("INT", { "default": 0, "min": (1 -MAX_RESOLUTION), "max": MAX_RESOLUTION, "step": 1, "display": "number" }), + "cumulative": ("BOOLEAN", { "default": False }), + "image": ("IMAGE",), + }} + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "amptransform" + CATEGORY = "KJNodes/audio" + DESCRIPTION = """ +Works as a bridge to the AudioScheduler -nodes: +https://github.com/a1lazydog/ComfyUI-AudioScheduler +Transforms image based on the normalized amplitude. +""" + + def amptransform(self, image, normalized_amp, zoom_scale, cumulative, x_offset, y_offset): + # Ensure normalized_amp is an array and within the range [0, 1] + normalized_amp = np.clip(normalized_amp, 0.0, 1.0) + transformed_images = [] + + # Initialize the cumulative zoom factor + prev_amp = 0.0 + + for i in range(image.shape[0]): + img = image[i] # Get the i-th image in the batch + amp = normalized_amp[i] # Get the corresponding amplitude value + + # Incrementally increase the cumulative zoom factor + if cumulative: + prev_amp += amp + amp += prev_amp + + # Convert the image tensor from BxHxWxC to CxHxW format expected by torchvision + img = img.permute(2, 0, 1) + + # Convert PyTorch tensor to PIL Image for processing + pil_img = TF.to_pil_image(img) + + # Calculate the crop size based on the amplitude + width, height = pil_img.size + crop_size = int(min(width, height) * (1 - amp * zoom_scale)) + crop_size = max(crop_size, 1) + + # Calculate the crop box coordinates (centered crop) + left = (width - crop_size) // 2 + top = (height - crop_size) // 2 + right = (width + crop_size) // 2 + bottom = (height + crop_size) // 2 + + # Crop and resize back to original size + cropped_img = TF.crop(pil_img, top, left, crop_size, crop_size) + resized_img = TF.resize(cropped_img, (height, width)) + + # Convert back to tensor in CxHxW format + tensor_img = TF.to_tensor(resized_img) + + # Convert the tensor back to BxHxWxC format + tensor_img = tensor_img.permute(1, 2, 0) + + # Offset the image based on the amplitude + offset_amp = amp * 10 # Calculate the offset magnitude based on the amplitude + shift_x = min(x_offset * offset_amp, img.shape[1] - 1) # Calculate the shift in x direction + shift_y = min(y_offset * offset_amp, img.shape[0] - 1) # Calculate the shift in y direction + + # Apply the offset to the image tensor + if shift_x != 0: + tensor_img = torch.roll(tensor_img, shifts=int(shift_x), dims=1) + if shift_y != 0: + tensor_img = torch.roll(tensor_img, shifts=int(shift_y), dims=0) + + # Add to the list + transformed_images.append(tensor_img) + + # Stack all transformed images into a batch + transformed_batch = torch.stack(transformed_images) + + return (transformed_batch,) \ No newline at end of file diff --git a/custom_nodes/comfyui-kjnodes/nodes/batchcrop_nodes.py b/custom_nodes/comfyui-kjnodes/nodes/batchcrop_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..3b8cd3aa39a2b14662aa323a86005a68579f4b04 --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/nodes/batchcrop_nodes.py @@ -0,0 +1,763 @@ +from ..utility.utility import tensor2pil, pil2tensor +from PIL import Image, ImageDraw, ImageFilter +import numpy as np +import torch +from torchvision.transforms import Resize, CenterCrop, InterpolationMode +import math + +#based on nodes from mtb https://github.com/melMass/comfy_mtb + +def bbox_to_region(bbox, target_size=None): + bbox = bbox_check(bbox, target_size) + return (bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]) + +def bbox_check(bbox, target_size=None): + if not target_size: + return bbox + + new_bbox = ( + bbox[0], + bbox[1], + min(target_size[0] - bbox[0], bbox[2]), + min(target_size[1] - bbox[1], bbox[3]), + ) + return new_bbox + +class BatchCropFromMask: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "original_images": ("IMAGE",), + "masks": ("MASK",), + "crop_size_mult": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}), + "bbox_smooth_alpha": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + } + + RETURN_TYPES = ( + "IMAGE", + "IMAGE", + "BBOX", + "INT", + "INT", + ) + RETURN_NAMES = ( + "original_images", + "cropped_images", + "bboxes", + "width", + "height", + ) + FUNCTION = "crop" + CATEGORY = "KJNodes/masking" + + def smooth_bbox_size(self, prev_bbox_size, curr_bbox_size, alpha): + if alpha == 0: + return prev_bbox_size + return round(alpha * curr_bbox_size + (1 - alpha) * prev_bbox_size) + + def smooth_center(self, prev_center, curr_center, alpha=0.5): + if alpha == 0: + return prev_center + return ( + round(alpha * curr_center[0] + (1 - alpha) * prev_center[0]), + round(alpha * curr_center[1] + (1 - alpha) * prev_center[1]) + ) + + def crop(self, masks, original_images, crop_size_mult, bbox_smooth_alpha): + + bounding_boxes = [] + cropped_images = [] + + self.max_bbox_width = 0 + self.max_bbox_height = 0 + + # First, calculate the maximum bounding box size across all masks + curr_max_bbox_width = 0 + curr_max_bbox_height = 0 + for mask in masks: + _mask = tensor2pil(mask)[0] + non_zero_indices = np.nonzero(np.array(_mask)) + min_x, max_x = np.min(non_zero_indices[1]), np.max(non_zero_indices[1]) + min_y, max_y = np.min(non_zero_indices[0]), np.max(non_zero_indices[0]) + width = max_x - min_x + height = max_y - min_y + curr_max_bbox_width = max(curr_max_bbox_width, width) + curr_max_bbox_height = max(curr_max_bbox_height, height) + + # Smooth the changes in the bounding box size + self.max_bbox_width = self.smooth_bbox_size(self.max_bbox_width, curr_max_bbox_width, bbox_smooth_alpha) + self.max_bbox_height = self.smooth_bbox_size(self.max_bbox_height, curr_max_bbox_height, bbox_smooth_alpha) + + # Apply the crop size multiplier + self.max_bbox_width = round(self.max_bbox_width * crop_size_mult) + self.max_bbox_height = round(self.max_bbox_height * crop_size_mult) + bbox_aspect_ratio = self.max_bbox_width / self.max_bbox_height + + # Then, for each mask and corresponding image... + for i, (mask, img) in enumerate(zip(masks, original_images)): + _mask = tensor2pil(mask)[0] + non_zero_indices = np.nonzero(np.array(_mask)) + min_x, max_x = np.min(non_zero_indices[1]), np.max(non_zero_indices[1]) + min_y, max_y = np.min(non_zero_indices[0]), np.max(non_zero_indices[0]) + + # Calculate center of bounding box + center_x = np.mean(non_zero_indices[1]) + center_y = np.mean(non_zero_indices[0]) + curr_center = (round(center_x), round(center_y)) + + # If this is the first frame, initialize prev_center with curr_center + if not hasattr(self, 'prev_center'): + self.prev_center = curr_center + + # Smooth the changes in the center coordinates from the second frame onwards + if i > 0: + center = self.smooth_center(self.prev_center, curr_center, bbox_smooth_alpha) + else: + center = curr_center + + # Update prev_center for the next frame + self.prev_center = center + + # Create bounding box using max_bbox_width and max_bbox_height + half_box_width = round(self.max_bbox_width / 2) + half_box_height = round(self.max_bbox_height / 2) + min_x = max(0, center[0] - half_box_width) + max_x = min(img.shape[1], center[0] + half_box_width) + min_y = max(0, center[1] - half_box_height) + max_y = min(img.shape[0], center[1] + half_box_height) + + # Append bounding box coordinates + bounding_boxes.append((min_x, min_y, max_x - min_x, max_y - min_y)) + + # Crop the image from the bounding box + cropped_img = img[min_y:max_y, min_x:max_x, :] + + # Calculate the new dimensions while maintaining the aspect ratio + new_height = min(cropped_img.shape[0], self.max_bbox_height) + new_width = round(new_height * bbox_aspect_ratio) + + # Resize the image + resize_transform = Resize((new_height, new_width)) + resized_img = resize_transform(cropped_img.permute(2, 0, 1)) + + # Perform the center crop to the desired size + crop_transform = CenterCrop((self.max_bbox_height, self.max_bbox_width)) # swap the order here if necessary + cropped_resized_img = crop_transform(resized_img) + + cropped_images.append(cropped_resized_img.permute(1, 2, 0)) + + cropped_out = torch.stack(cropped_images, dim=0) + + return (original_images, cropped_out, bounding_boxes, self.max_bbox_width, self.max_bbox_height, ) + +class BatchUncrop: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "original_images": ("IMAGE",), + "cropped_images": ("IMAGE",), + "bboxes": ("BBOX",), + "border_blending": ("FLOAT", {"default": 0.25, "min": 0.0, "max": 1.0, "step": 0.01}, ), + "crop_rescale": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "border_top": ("BOOLEAN", {"default": True}), + "border_bottom": ("BOOLEAN", {"default": True}), + "border_left": ("BOOLEAN", {"default": True}), + "border_right": ("BOOLEAN", {"default": True}), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "uncrop" + + CATEGORY = "KJNodes/masking" + + def uncrop(self, original_images, cropped_images, bboxes, border_blending, crop_rescale, border_top, border_bottom, border_left, border_right): + def inset_border(image, border_width, border_color, border_top, border_bottom, border_left, border_right): + draw = ImageDraw.Draw(image) + width, height = image.size + if border_top: + draw.rectangle((0, 0, width, border_width), fill=border_color) + if border_bottom: + draw.rectangle((0, height - border_width, width, height), fill=border_color) + if border_left: + draw.rectangle((0, 0, border_width, height), fill=border_color) + if border_right: + draw.rectangle((width - border_width, 0, width, height), fill=border_color) + return image + + if len(original_images) != len(cropped_images): + raise ValueError(f"The number of original_images ({len(original_images)}) and cropped_images ({len(cropped_images)}) should be the same") + + # Ensure there are enough bboxes, but drop the excess if there are more bboxes than images + if len(bboxes) > len(original_images): + print(f"Warning: Dropping excess bounding boxes. Expected {len(original_images)}, but got {len(bboxes)}") + bboxes = bboxes[:len(original_images)] + elif len(bboxes) < len(original_images): + raise ValueError("There should be at least as many bboxes as there are original and cropped images") + + input_images = tensor2pil(original_images) + crop_imgs = tensor2pil(cropped_images) + + out_images = [] + for i in range(len(input_images)): + img = input_images[i] + crop = crop_imgs[i] + bbox = bboxes[i] + + # uncrop the image based on the bounding box + bb_x, bb_y, bb_width, bb_height = bbox + + paste_region = bbox_to_region((bb_x, bb_y, bb_width, bb_height), img.size) + + # scale factors + scale_x = crop_rescale + scale_y = crop_rescale + + # scaled paste_region + paste_region = (round(paste_region[0]*scale_x), round(paste_region[1]*scale_y), round(paste_region[2]*scale_x), round(paste_region[3]*scale_y)) + + # rescale the crop image to fit the paste_region + crop = crop.resize((round(paste_region[2]-paste_region[0]), round(paste_region[3]-paste_region[1]))) + crop_img = crop.convert("RGB") + + if border_blending > 1.0: + border_blending = 1.0 + elif border_blending < 0.0: + border_blending = 0.0 + + blend_ratio = (max(crop_img.size) / 2) * float(border_blending) + + blend = img.convert("RGBA") + mask = Image.new("L", img.size, 0) + + mask_block = Image.new("L", (paste_region[2]-paste_region[0], paste_region[3]-paste_region[1]), 255) + mask_block = inset_border(mask_block, round(blend_ratio / 2), (0), border_top, border_bottom, border_left, border_right) + + mask.paste(mask_block, paste_region) + blend.paste(crop_img, paste_region) + + mask = mask.filter(ImageFilter.BoxBlur(radius=blend_ratio / 4)) + mask = mask.filter(ImageFilter.GaussianBlur(radius=blend_ratio / 4)) + + blend.putalpha(mask) + img = Image.alpha_composite(img.convert("RGBA"), blend) + out_images.append(img.convert("RGB")) + + return (pil2tensor(out_images),) + +class BatchCropFromMaskAdvanced: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "original_images": ("IMAGE",), + "masks": ("MASK",), + "crop_size_mult": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "bbox_smooth_alpha": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + } + + RETURN_TYPES = ( + "IMAGE", + "IMAGE", + "MASK", + "IMAGE", + "MASK", + "BBOX", + "BBOX", + "INT", + "INT", + ) + RETURN_NAMES = ( + "original_images", + "cropped_images", + "cropped_masks", + "combined_crop_image", + "combined_crop_masks", + "bboxes", + "combined_bounding_box", + "bbox_width", + "bbox_height", + ) + FUNCTION = "crop" + CATEGORY = "KJNodes/masking" + + def smooth_bbox_size(self, prev_bbox_size, curr_bbox_size, alpha): + return round(alpha * curr_bbox_size + (1 - alpha) * prev_bbox_size) + + def smooth_center(self, prev_center, curr_center, alpha=0.5): + return (round(alpha * curr_center[0] + (1 - alpha) * prev_center[0]), + round(alpha * curr_center[1] + (1 - alpha) * prev_center[1])) + + def crop(self, masks, original_images, crop_size_mult, bbox_smooth_alpha): + bounding_boxes = [] + combined_bounding_box = [] + cropped_images = [] + cropped_masks = [] + cropped_masks_out = [] + combined_crop_out = [] + combined_cropped_images = [] + combined_cropped_masks = [] + + def calculate_bbox(mask): + non_zero_indices = np.nonzero(np.array(mask)) + + # handle empty masks + min_x, max_x, min_y, max_y = 0, 0, 0, 0 + if len(non_zero_indices[1]) > 0 and len(non_zero_indices[0]) > 0: + min_x, max_x = np.min(non_zero_indices[1]), np.max(non_zero_indices[1]) + min_y, max_y = np.min(non_zero_indices[0]), np.max(non_zero_indices[0]) + + width = max_x - min_x + height = max_y - min_y + bbox_size = max(width, height) + return min_x, max_x, min_y, max_y, bbox_size + + combined_mask = torch.max(masks, dim=0)[0] + _mask = tensor2pil(combined_mask)[0] + new_min_x, new_max_x, new_min_y, new_max_y, combined_bbox_size = calculate_bbox(_mask) + center_x = (new_min_x + new_max_x) / 2 + center_y = (new_min_y + new_max_y) / 2 + half_box_size = round(combined_bbox_size // 2) + new_min_x = max(0, round(center_x - half_box_size)) + new_max_x = min(original_images[0].shape[1], round(center_x + half_box_size)) + new_min_y = max(0, round(center_y - half_box_size)) + new_max_y = min(original_images[0].shape[0], round(center_y + half_box_size)) + + combined_bounding_box.append((new_min_x, new_min_y, new_max_x - new_min_x, new_max_y - new_min_y)) + + self.max_bbox_size = 0 + + # First, calculate the maximum bounding box size across all masks + curr_max_bbox_size = max(calculate_bbox(tensor2pil(mask)[0])[-1] for mask in masks) + # Smooth the changes in the bounding box size + self.max_bbox_size = self.smooth_bbox_size(self.max_bbox_size, curr_max_bbox_size, bbox_smooth_alpha) + # Apply the crop size multiplier + self.max_bbox_size = round(self.max_bbox_size * crop_size_mult) + # Make sure max_bbox_size is divisible by 16, if not, round it upwards so it is + self.max_bbox_size = math.ceil(self.max_bbox_size / 16) * 16 + + if self.max_bbox_size > original_images[0].shape[0] or self.max_bbox_size > original_images[0].shape[1]: + # max_bbox_size can only be as big as our input's width or height, and it has to be even + self.max_bbox_size = math.floor(min(original_images[0].shape[0], original_images[0].shape[1]) / 2) * 2 + + # Then, for each mask and corresponding image... + for i, (mask, img) in enumerate(zip(masks, original_images)): + _mask = tensor2pil(mask)[0] + non_zero_indices = np.nonzero(np.array(_mask)) + + # check for empty masks + if len(non_zero_indices[0]) > 0 and len(non_zero_indices[1]) > 0: + min_x, max_x = np.min(non_zero_indices[1]), np.max(non_zero_indices[1]) + min_y, max_y = np.min(non_zero_indices[0]), np.max(non_zero_indices[0]) + + # Calculate center of bounding box + center_x = np.mean(non_zero_indices[1]) + center_y = np.mean(non_zero_indices[0]) + curr_center = (round(center_x), round(center_y)) + + # If this is the first frame, initialize prev_center with curr_center + if not hasattr(self, 'prev_center'): + self.prev_center = curr_center + + # Smooth the changes in the center coordinates from the second frame onwards + if i > 0: + center = self.smooth_center(self.prev_center, curr_center, bbox_smooth_alpha) + else: + center = curr_center + + # Update prev_center for the next frame + self.prev_center = center + + # Create bounding box using max_bbox_size + half_box_size = self.max_bbox_size // 2 + min_x = max(0, center[0] - half_box_size) + max_x = min(img.shape[1], center[0] + half_box_size) + min_y = max(0, center[1] - half_box_size) + max_y = min(img.shape[0], center[1] + half_box_size) + + # Append bounding box coordinates + bounding_boxes.append((min_x, min_y, max_x - min_x, max_y - min_y)) + + # Crop the image from the bounding box + cropped_img = img[min_y:max_y, min_x:max_x, :] + cropped_mask = mask[min_y:max_y, min_x:max_x] + + # Resize the cropped image to a fixed size + new_size = max(cropped_img.shape[0], cropped_img.shape[1]) + resize_transform = Resize(new_size, interpolation=InterpolationMode.NEAREST, max_size=max(img.shape[0], img.shape[1])) + resized_mask = resize_transform(cropped_mask.unsqueeze(0).unsqueeze(0)).squeeze(0).squeeze(0) + resized_img = resize_transform(cropped_img.permute(2, 0, 1)) + # Perform the center crop to the desired size + # Constrain the crop to the smaller of our bbox or our image so we don't expand past the image dimensions. + crop_transform = CenterCrop((min(self.max_bbox_size, resized_img.shape[1]), min(self.max_bbox_size, resized_img.shape[2]))) + + cropped_resized_img = crop_transform(resized_img) + cropped_images.append(cropped_resized_img.permute(1, 2, 0)) + + cropped_resized_mask = crop_transform(resized_mask) + cropped_masks.append(cropped_resized_mask) + + combined_cropped_img = original_images[i][new_min_y:new_max_y, new_min_x:new_max_x, :] + combined_cropped_images.append(combined_cropped_img) + + combined_cropped_mask = masks[i][new_min_y:new_max_y, new_min_x:new_max_x] + combined_cropped_masks.append(combined_cropped_mask) + else: + bounding_boxes.append((0, 0, img.shape[1], img.shape[0])) + cropped_images.append(img) + cropped_masks.append(mask) + combined_cropped_images.append(img) + combined_cropped_masks.append(mask) + + cropped_out = torch.stack(cropped_images, dim=0) + combined_crop_out = torch.stack(combined_cropped_images, dim=0) + cropped_masks_out = torch.stack(cropped_masks, dim=0) + combined_crop_mask_out = torch.stack(combined_cropped_masks, dim=0) + + return (original_images, cropped_out, cropped_masks_out, combined_crop_out, combined_crop_mask_out, bounding_boxes, combined_bounding_box, self.max_bbox_size, self.max_bbox_size) + +class FilterZeroMasksAndCorrespondingImages: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "masks": ("MASK",), + }, + "optional": { + "original_images": ("IMAGE",), + }, + } + + RETURN_TYPES = ("MASK", "IMAGE", "IMAGE", "INDEXES",) + RETURN_NAMES = ("non_zero_masks_out", "non_zero_mask_images_out", "zero_mask_images_out", "zero_mask_images_out_indexes",) + FUNCTION = "filter" + CATEGORY = "KJNodes/masking" + DESCRIPTION = """ +Filter out all the empty (i.e. all zero) mask in masks +Also filter out all the corresponding images in original_images by indexes if provide + +original_images (optional): If provided, need have same length as masks. +""" + + def filter(self, masks, original_images=None): + non_zero_masks = [] + non_zero_mask_images = [] + zero_mask_images = [] + zero_mask_images_indexes = [] + + masks_num = len(masks) + also_process_images = False + if original_images is not None: + imgs_num = len(original_images) + if len(original_images) == masks_num: + also_process_images = True + else: + print(f"[WARNING] ignore input: original_images, due to number of original_images ({imgs_num}) is not equal to number of masks ({masks_num})") + + for i in range(masks_num): + non_zero_num = np.count_nonzero(np.array(masks[i])) + if non_zero_num > 0: + non_zero_masks.append(masks[i]) + if also_process_images: + non_zero_mask_images.append(original_images[i]) + else: + zero_mask_images.append(original_images[i]) + zero_mask_images_indexes.append(i) + + non_zero_masks_out = torch.stack(non_zero_masks, dim=0) + non_zero_mask_images_out = zero_mask_images_out = zero_mask_images_out_indexes = None + + if also_process_images: + non_zero_mask_images_out = torch.stack(non_zero_mask_images, dim=0) + if len(zero_mask_images) > 0: + zero_mask_images_out = torch.stack(zero_mask_images, dim=0) + zero_mask_images_out_indexes = zero_mask_images_indexes + + return (non_zero_masks_out, non_zero_mask_images_out, zero_mask_images_out, zero_mask_images_out_indexes) + +class InsertImageBatchByIndexes: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "images": ("IMAGE",), + "images_to_insert": ("IMAGE",), + "insert_indexes": ("INDEXES",), + }, + } + + RETURN_TYPES = ("IMAGE", ) + RETURN_NAMES = ("images_after_insert", ) + FUNCTION = "insert" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +This node is designed to be use with node FilterZeroMasksAndCorrespondingImages +It inserts the images_to_insert into images according to insert_indexes + +Returns: + images_after_insert: updated original images with origonal sequence order +""" + + def insert(self, images, images_to_insert, insert_indexes): + images_after_insert = images + + if images_to_insert is not None and insert_indexes is not None: + images_to_insert_num = len(images_to_insert) + insert_indexes_num = len(insert_indexes) + if images_to_insert_num == insert_indexes_num: + images_after_insert = [] + + i_images = 0 + for i in range(len(images) + images_to_insert_num): + if i in insert_indexes: + images_after_insert.append(images_to_insert[insert_indexes.index(i)]) + else: + images_after_insert.append(images[i_images]) + i_images += 1 + + images_after_insert = torch.stack(images_after_insert, dim=0) + + else: + print(f"[WARNING] skip this node, due to number of images_to_insert ({images_to_insert_num}) is not equal to number of insert_indexes ({insert_indexes_num})") + + + return (images_after_insert, ) + +class BatchUncropAdvanced: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "original_images": ("IMAGE",), + "cropped_images": ("IMAGE",), + "cropped_masks": ("MASK",), + "combined_crop_mask": ("MASK",), + "bboxes": ("BBOX",), + "border_blending": ("FLOAT", {"default": 0.25, "min": 0.0, "max": 1.0, "step": 0.01}, ), + "crop_rescale": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "use_combined_mask": ("BOOLEAN", {"default": False}), + "use_square_mask": ("BOOLEAN", {"default": True}), + }, + "optional": { + "combined_bounding_box": ("BBOX", {"default": None}), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "uncrop" + CATEGORY = "KJNodes/masking" + + + def uncrop(self, original_images, cropped_images, cropped_masks, combined_crop_mask, bboxes, border_blending, crop_rescale, use_combined_mask, use_square_mask, combined_bounding_box = None): + + def inset_border(image, border_width=20, border_color=(0)): + width, height = image.size + bordered_image = Image.new(image.mode, (width, height), border_color) + bordered_image.paste(image, (0, 0)) + draw = ImageDraw.Draw(bordered_image) + draw.rectangle((0, 0, width - 1, height - 1), outline=border_color, width=border_width) + return bordered_image + + if len(original_images) != len(cropped_images): + raise ValueError(f"The number of original_images ({len(original_images)}) and cropped_images ({len(cropped_images)}) should be the same") + + # Ensure there are enough bboxes, but drop the excess if there are more bboxes than images + if len(bboxes) > len(original_images): + print(f"Warning: Dropping excess bounding boxes. Expected {len(original_images)}, but got {len(bboxes)}") + bboxes = bboxes[:len(original_images)] + elif len(bboxes) < len(original_images): + raise ValueError("There should be at least as many bboxes as there are original and cropped images") + + crop_imgs = tensor2pil(cropped_images) + input_images = tensor2pil(original_images) + out_images = [] + + for i in range(len(input_images)): + img = input_images[i] + crop = crop_imgs[i] + bbox = bboxes[i] + + if use_combined_mask: + bb_x, bb_y, bb_width, bb_height = combined_bounding_box[0] + paste_region = bbox_to_region((bb_x, bb_y, bb_width, bb_height), img.size) + mask = combined_crop_mask[i] + else: + bb_x, bb_y, bb_width, bb_height = bbox + paste_region = bbox_to_region((bb_x, bb_y, bb_width, bb_height), img.size) + mask = cropped_masks[i] + + # scale paste_region + scale_x = scale_y = crop_rescale + paste_region = (round(paste_region[0]*scale_x), round(paste_region[1]*scale_y), round(paste_region[2]*scale_x), round(paste_region[3]*scale_y)) + + # rescale the crop image to fit the paste_region + crop = crop.resize((round(paste_region[2]-paste_region[0]), round(paste_region[3]-paste_region[1]))) + crop_img = crop.convert("RGB") + + #border blending + if border_blending > 1.0: + border_blending = 1.0 + elif border_blending < 0.0: + border_blending = 0.0 + + blend_ratio = (max(crop_img.size) / 2) * float(border_blending) + blend = img.convert("RGBA") + + if use_square_mask: + mask = Image.new("L", img.size, 0) + mask_block = Image.new("L", (paste_region[2]-paste_region[0], paste_region[3]-paste_region[1]), 255) + mask_block = inset_border(mask_block, round(blend_ratio / 2), (0)) + mask.paste(mask_block, paste_region) + else: + original_mask = tensor2pil(mask)[0] + original_mask = original_mask.resize((paste_region[2]-paste_region[0], paste_region[3]-paste_region[1])) + mask = Image.new("L", img.size, 0) + mask.paste(original_mask, paste_region) + + mask = mask.filter(ImageFilter.BoxBlur(radius=blend_ratio / 4)) + mask = mask.filter(ImageFilter.GaussianBlur(radius=blend_ratio / 4)) + + blend.paste(crop_img, paste_region) + blend.putalpha(mask) + + img = Image.alpha_composite(img.convert("RGBA"), blend) + out_images.append(img.convert("RGB")) + + return (pil2tensor(out_images),) + +class SplitBboxes: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "bboxes": ("BBOX",), + "index": ("INT", {"default": 0,"min": 0, "max": 99999999, "step": 1}), + }, + } + + RETURN_TYPES = ("BBOX","BBOX",) + RETURN_NAMES = ("bboxes_a","bboxes_b",) + FUNCTION = "splitbbox" + CATEGORY = "KJNodes/masking" + DESCRIPTION = """ +Splits the specified bbox list at the given index into two lists. +""" + + def splitbbox(self, bboxes, index): + bboxes_a = bboxes[:index] # Sub-list from the start of bboxes up to (but not including) the index + bboxes_b = bboxes[index:] # Sub-list from the index to the end of bboxes + + return (bboxes_a, bboxes_b,) + +class BboxToInt: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "bboxes": ("BBOX",), + "index": ("INT", {"default": 0,"min": 0, "max": 99999999, "step": 1}), + }, + } + + RETURN_TYPES = ("INT","INT","INT","INT","INT","INT",) + RETURN_NAMES = ("x_min","y_min","width","height", "center_x","center_y",) + FUNCTION = "bboxtoint" + CATEGORY = "KJNodes/masking" + DESCRIPTION = """ +Returns selected index from bounding box list as integers. +""" + def bboxtoint(self, bboxes, index): + x_min, y_min, width, height = bboxes[index] + center_x = int(x_min + width / 2) + center_y = int(y_min + height / 2) + + return (x_min, y_min, width, height, center_x, center_y,) + +class BboxVisualize: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "images": ("IMAGE",), + "bboxes": ("BBOX",), + "line_width": ("INT", {"default": 1,"min": 1, "max": 10, "step": 1}), + "bbox_format": (["xywh", "xyxy"], {"default": "xywh"}), + }, + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("images",) + FUNCTION = "visualizebbox" + DESCRIPTION = """ +Visualizes the specified bbox on the image. +""" + + CATEGORY = "KJNodes/masking" + + def visualizebbox(self, bboxes, images, line_width, bbox_format): + image_list = [] + for image, bbox in zip(images, bboxes): + if bbox_format == "xywh": + x_min, y_min, width, height = bbox + elif bbox_format == "xyxy": + x_min, y_min, x_max, y_max = bbox + width = x_max - x_min + height = y_max - y_min + else: + raise ValueError(f"Unknown bbox_format: {bbox_format}") + + # Ensure bbox coordinates are integers + x_min = int(x_min) + y_min = int(y_min) + width = int(width) + height = int(height) + + # Permute the image dimensions + image = image.permute(2, 0, 1) + + # Clone the image to draw bounding boxes + img_with_bbox = image.clone() + + # Define the color for the bbox, e.g., red + color = torch.tensor([1, 0, 0], dtype=torch.float32) + + # Ensure color tensor matches the image channels + if color.shape[0] != img_with_bbox.shape[0]: + color = color.unsqueeze(1).expand(-1, line_width) + + # Draw lines for each side of the bbox with the specified line width + for lw in range(line_width): + # Top horizontal line + if y_min + lw < img_with_bbox.shape[1]: + img_with_bbox[:, y_min + lw, x_min:x_min + width] = color[:, None] + + # Bottom horizontal line + if y_min + height - lw < img_with_bbox.shape[1]: + img_with_bbox[:, y_min + height - lw, x_min:x_min + width] = color[:, None] + + # Left vertical line + if x_min + lw < img_with_bbox.shape[2]: + img_with_bbox[:, y_min:y_min + height, x_min + lw] = color[:, None] + + # Right vertical line + if x_min + width - lw < img_with_bbox.shape[2]: + img_with_bbox[:, y_min:y_min + height, x_min + width - lw] = color[:, None] + + # Permute the image dimensions back + img_with_bbox = img_with_bbox.permute(1, 2, 0).unsqueeze(0) + image_list.append(img_with_bbox) + + return (torch.cat(image_list, dim=0),) \ No newline at end of file diff --git a/custom_nodes/comfyui-kjnodes/nodes/curve_nodes.py b/custom_nodes/comfyui-kjnodes/nodes/curve_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..77be51f9a8a7a7fcc78b8fdc8ea6c423ed2a185c --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/nodes/curve_nodes.py @@ -0,0 +1,1636 @@ +import torch +from torchvision import transforms +import json +from PIL import Image, ImageDraw, ImageFont, ImageColor, ImageFilter, ImageChops +import numpy as np +from ..utility.utility import pil2tensor, tensor2pil +import folder_paths +import io +import base64 + +from comfy.utils import common_upscale + +def parse_color(color): + if isinstance(color, str) and ',' in color: + return tuple(int(c.strip()) for c in color.split(',')) + return color + +def parse_json_tracks(tracks): + tracks_data = [] + try: + # If tracks is a string, try to parse it as JSON + if isinstance(tracks, str): + parsed = json.loads(tracks.replace("'", '"')) + tracks_data.extend(parsed) + else: + # If tracks is a list of strings, parse each one + for track_str in tracks: + parsed = json.loads(track_str.replace("'", '"')) + tracks_data.append(parsed) + + # Check if we have a single track (dict with x,y) or a list of tracks + if tracks_data and isinstance(tracks_data[0], dict) and 'x' in tracks_data[0]: + # Single track detected, wrap it in a list + tracks_data = [tracks_data] + elif tracks_data and isinstance(tracks_data[0], list) and tracks_data[0] and isinstance(tracks_data[0][0], dict) and 'x' in tracks_data[0][0]: + # Already a list of tracks, nothing to do + pass + else: + # Unexpected format + print(f"Warning: Unexpected track format: {type(tracks_data[0])}") + + except json.JSONDecodeError as e: + print(f"Error parsing tracks JSON: {e}") + tracks_data = [] + + return tracks_data + +def plot_coordinates_to_tensor(coordinates, height, width, bbox_height, bbox_width, size_multiplier, prompt): + import matplotlib + matplotlib.use('Agg') + from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas + text_color = '#999999' + bg_color = '#353535' + matplotlib.pyplot.rcParams['text.color'] = text_color + fig, ax = matplotlib.pyplot.subplots(figsize=(width/100, height/100), dpi=100) + fig.patch.set_facecolor(bg_color) + ax.set_facecolor(bg_color) + ax.grid(color=text_color, linestyle='-', linewidth=0.5) + ax.set_xlabel('x', color=text_color) + ax.set_ylabel('y', color=text_color) + for text in ax.get_xticklabels() + ax.get_yticklabels(): + text.set_color(text_color) + ax.set_title('position for: ' + prompt) + ax.set_xlabel('X Coordinate') + ax.set_ylabel('Y Coordinate') + #ax.legend().remove() + ax.set_xlim(0, width) # Set the x-axis to match the input latent width + ax.set_ylim(height, 0) # Set the y-axis to match the input latent height, with (0,0) at top-left + # Adjust the margins of the subplot + matplotlib.pyplot.subplots_adjust(left=0.08, right=0.95, bottom=0.05, top=0.95, wspace=0.2, hspace=0.2) + + cmap = matplotlib.pyplot.get_cmap('rainbow') + image_batch = [] + canvas = FigureCanvas(fig) + width, height = fig.get_size_inches() * fig.get_dpi() + # Draw a box at each coordinate + for i, ((x, y), size) in enumerate(zip(coordinates, size_multiplier)): + color_index = i / (len(coordinates) - 1) + color = cmap(color_index) + draw_height = bbox_height * size + draw_width = bbox_width * size + rect = matplotlib.patches.Rectangle((x - draw_width/2, y - draw_height/2), draw_width, draw_height, + linewidth=1, edgecolor=color, facecolor='none', alpha=0.5) + ax.add_patch(rect) + + # Check if there is a next coordinate to draw an arrow to + if i < len(coordinates) - 1: + x1, y1 = coordinates[i] + x2, y2 = coordinates[i + 1] + ax.annotate("", xy=(x2, y2), xytext=(x1, y1), + arrowprops=dict(arrowstyle="->", + linestyle="-", + lw=1, + color=color, + mutation_scale=20)) + canvas.draw() + image_np = np.frombuffer(canvas.tostring_rgb(), dtype='uint8').reshape(int(height), int(width), 3).copy() + image_tensor = torch.from_numpy(image_np).float() / 255.0 + image_tensor = image_tensor.unsqueeze(0) + image_batch.append(image_tensor) + + matplotlib.pyplot.close(fig) + image_batch_tensor = torch.cat(image_batch, dim=0) + + return image_batch_tensor + +class PlotCoordinates: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "coordinates": ("STRING", {"forceInput": True}), + "text": ("STRING", {"default": 'title', "multiline": False}), + "width": ("INT", {"default": 512, "min": 8, "max": 4096, "step": 8}), + "height": ("INT", {"default": 512, "min": 8, "max": 4096, "step": 8}), + "bbox_width": ("INT", {"default": 128, "min": 8, "max": 4096, "step": 8}), + "bbox_height": ("INT", {"default": 128, "min": 8, "max": 4096, "step": 8}), + }, + "optional": {"size_multiplier": ("FLOAT", {"default": [1.0], "forceInput": True})}, + } + RETURN_TYPES = ("IMAGE", "INT", "INT", "INT", "INT",) + RETURN_NAMES = ("images", "width", "height", "bbox_width", "bbox_height",) + FUNCTION = "append" + CATEGORY = "KJNodes/experimental" + DESCRIPTION = """ +Plots coordinates to sequence of images using Matplotlib. + +""" + + def append(self, coordinates, text, width, height, bbox_width, bbox_height, size_multiplier=[1.0]): + coordinates = json.loads(coordinates.replace("'", '"')) + coordinates = [(coord['x'], coord['y']) for coord in coordinates] + batch_size = len(coordinates) + if not size_multiplier or len(size_multiplier) != batch_size: + size_multiplier = [0] * batch_size + else: + size_multiplier = size_multiplier * (batch_size // len(size_multiplier)) + size_multiplier[:batch_size % len(size_multiplier)] + + plot_image_tensor = plot_coordinates_to_tensor(coordinates, height, width, bbox_height, bbox_width, size_multiplier, text) + + return (plot_image_tensor, width, height, bbox_width, bbox_height) + +class SplineEditor: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "points_store": ("STRING", {"multiline": False}), + "coordinates": ("STRING", {"multiline": False}), + "mask_width": ("INT", {"default": 512, "min": 8, "max": 4096, "step": 8}), + "mask_height": ("INT", {"default": 512, "min": 8, "max": 4096, "step": 8}), + "points_to_sample": ("INT", {"default": 16, "min": 2, "max": 1000, "step": 1}), + "sampling_method": ( + [ + 'path', + 'time', + 'controlpoints', + 'speed' + ], + { + "default": 'time' + }), + "interpolation": ( + [ + 'cardinal', + 'monotone', + 'basis', + 'linear', + 'step-before', + 'step-after', + 'polar', + 'polar-reverse', + ], + { + "default": 'cardinal' + }), + "tension": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "repeat_output": ("INT", {"default": 1, "min": 1, "max": 4096, "step": 1}), + "float_output_type": ( + [ + 'list', + 'pandas series', + 'tensor', + ], + { + "default": 'list' + }), + }, + "optional": { + "min_value": ("FLOAT", {"default": 0.0, "min": -10000.0, "max": 10000.0, "step": 0.01}), + "max_value": ("FLOAT", {"default": 1.0, "min": -10000.0, "max": 10000.0, "step": 0.01}), + "bg_image": ("IMAGE", ), + } + } + + RETURN_TYPES = ("MASK", "STRING", "FLOAT", "INT", "STRING",) + RETURN_NAMES = ("mask", "coord_str", "float", "count", "normalized_str",) + FUNCTION = "splinedata" + CATEGORY = "KJNodes/weights" + DESCRIPTION = """ +# WORK IN PROGRESS +Do not count on this as part of your workflow yet, +probably contains lots of bugs and stability is not +guaranteed!! + +## Graphical editor to create values for various +## schedules and/or mask batches. + +**Shift + click** to add control point at end. +**Ctrl + click** to add control point (subdivide) between two points. +**Right click on a point** to delete it. +Note that you can't delete from start/end. + +Right click on canvas for context menu: +NEW!: +- Add new spline + - Creates a new spline on same canvas, currently these paths are only outputed + as coordinates. +- Add single point + - Creates a single point that only returns it's current position coords +- Delete spline + - Deletes the currently selected spline, you can select a spline by clicking on + it's path, or cycle through them with the 'Next spline' -option. + +These are purely visual options, doesn't affect the output: + - Toggle handles visibility + - Display sample points: display the points to be returned. + +**points_to_sample** value sets the number of samples +returned from the **drawn spline itself**, this is independent from the +actual control points, so the interpolation type matters. +sampling_method: + - time: samples along the time axis, used for schedules + - path: samples along the path itself, useful for coordinates + - controlpoints: samples only the control points themselves + +output types: + - mask batch + example compatible nodes: anything that takes masks + - list of floats + example compatible nodes: IPAdapter weights + - pandas series + example compatible nodes: anything that takes Fizz' + nodes Batch Value Schedule + - torch tensor + example compatible nodes: unknown +""" + + def splinedata(self, mask_width, mask_height, coordinates, float_output_type, interpolation, + points_to_sample, sampling_method, points_store, tension, repeat_output, + min_value=0.0, max_value=1.0, bg_image=None): + + coordinates = json.loads(coordinates) + + # Handle nested list structure if present + all_normalized = [] + all_normalized_y_values = [] + + # Check if we have a nested list structure + if isinstance(coordinates, list) and len(coordinates) > 0 and isinstance(coordinates[0], list): + # Process each list of coordinates in the nested structure + coordinate_sets = coordinates + else: + # If not nested, treat as a single list of coordinates + coordinate_sets = [coordinates] + + # Process each set of coordinates + for coord_set in coordinate_sets: + normalized = [] + normalized_y_values = [] + + for coord in coord_set: + coord['x'] = int(round(coord['x'])) + coord['y'] = int(round(coord['y'])) + norm_x = (1.0 - (coord['x'] / mask_height) - 0.0) * (max_value - min_value) + min_value + norm_y = (1.0 - (coord['y'] / mask_height) - 0.0) * (max_value - min_value) + min_value + normalized_y_values.append(norm_y) + normalized.append({'x':norm_x, 'y':norm_y}) + + all_normalized.extend(normalized) + all_normalized_y_values.extend(normalized_y_values) + + # Use the combined normalized values for output + if float_output_type == 'list': + out_floats = all_normalized_y_values * repeat_output + elif float_output_type == 'pandas series': + try: + import pandas as pd + except: + raise Exception("MaskOrImageToWeight: pandas is not installed. Please install pandas to use this output_type") + out_floats = pd.Series(all_normalized_y_values * repeat_output), + elif float_output_type == 'tensor': + out_floats = torch.tensor(all_normalized_y_values * repeat_output, dtype=torch.float32) + + # Create a color map for grayscale intensities + color_map = lambda y: torch.full((mask_height, mask_width, 3), y, dtype=torch.float32) + + # Create image tensors for each normalized y value + mask_tensors = [color_map(y) for y in all_normalized_y_values] + masks_out = torch.stack(mask_tensors) + masks_out = masks_out.repeat(repeat_output, 1, 1, 1) + masks_out = masks_out.mean(dim=-1) + + if bg_image is None: + return (masks_out, json.dumps(coordinates if len(coordinates) > 1 else coordinates[0]), out_floats, len(out_floats), json.dumps(all_normalized)) + else: + transform = transforms.ToPILImage() + image = transform(bg_image[0].permute(2, 0, 1)) + buffered = io.BytesIO() + image.save(buffered, format="JPEG", quality=75) + + # Encode the image bytes to a Base64 string + img_bytes = buffered.getvalue() + img_base64 = base64.b64encode(img_bytes).decode('utf-8') + + return { + "ui": {"bg_image": [img_base64]}, + "result": (masks_out, json.dumps(coordinates if len(coordinates) > 1 else coordinates[0]), out_floats, len(out_floats), json.dumps(all_normalized)) + } + + +class CreateShapeMaskOnPath: + + RETURN_TYPES = ("MASK", "MASK",) + RETURN_NAMES = ("mask", "mask_inverted",) + FUNCTION = "createshapemask" + CATEGORY = "KJNodes/masking/generate" + DESCRIPTION = """ +Creates a mask or batch of masks with the specified shape. +Locations are center locations. +""" + DEPRECATED = True + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "shape": ( + [ 'circle', + 'square', + 'triangle', + ], + { + "default": 'circle' + }), + "coordinates": ("STRING", {"forceInput": True}), + "frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "shape_width": ("INT", {"default": 128,"min": 8, "max": 4096, "step": 1}), + "shape_height": ("INT", {"default": 128,"min": 8, "max": 4096, "step": 1}), + }, + "optional": { + "size_multiplier": ("FLOAT", {"default": [1.0], "forceInput": True}), + } + } + + def createshapemask(self, coordinates, frame_width, frame_height, shape_width, shape_height, shape, size_multiplier=[1.0]): + # Define the number of images in the batch + coordinates = coordinates.replace("'", '"') + coordinates = json.loads(coordinates) + + batch_size = len(coordinates) + out = [] + color = "white" + if not size_multiplier or len(size_multiplier) != batch_size: + size_multiplier = [0] * batch_size + else: + size_multiplier = size_multiplier * (batch_size // len(size_multiplier)) + size_multiplier[:batch_size % len(size_multiplier)] + for i, coord in enumerate(coordinates): + image = Image.new("RGB", (frame_width, frame_height), "black") + draw = ImageDraw.Draw(image) + + # Calculate the size for this frame and ensure it's not less than 0 + current_width = max(0, shape_width + i * size_multiplier[i]) + current_height = max(0, shape_height + i * size_multiplier[i]) + + location_x = coord['x'] + location_y = coord['y'] + + if shape == 'circle' or shape == 'square': + # Define the bounding box for the shape + left_up_point = (location_x - current_width // 2, location_y - current_height // 2) + right_down_point = (location_x + current_width // 2, location_y + current_height // 2) + two_points = [left_up_point, right_down_point] + + if shape == 'circle': + draw.ellipse(two_points, fill=color) + elif shape == 'square': + draw.rectangle(two_points, fill=color) + + elif shape == 'triangle': + # Define the points for the triangle + left_up_point = (location_x - current_width // 2, location_y + current_height // 2) # bottom left + right_down_point = (location_x + current_width // 2, location_y + current_height // 2) # bottom right + top_point = (location_x, location_y - current_height // 2) # top point + draw.polygon([top_point, left_up_point, right_down_point], fill=color) + + image = pil2tensor(image) + mask = image[:, :, :, 0] + out.append(mask) + outstack = torch.cat(out, dim=0) + return (outstack, 1.0 - outstack,) + + + +class CreateShapeImageOnPath: + + RETURN_TYPES = ("IMAGE", "MASK",) + RETURN_NAMES = ("image","mask", ) + FUNCTION = "createshapemask" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Creates an image or batch of images with the specified shape. +Locations are center locations. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "shape": ( + [ 'circle', + 'square', + 'triangle', + ], + { + "default": 'circle' + }), + "coordinates": ("STRING", {"forceInput": True}), + "frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "shape_width": ("INT", {"default": 128,"min": 2, "max": 4096, "step": 1}), + "shape_height": ("INT", {"default": 128,"min": 2, "max": 4096, "step": 1}), + "shape_color": ("STRING", {"default": 'white'}), + "bg_color": ("STRING", {"default": 'black'}), + "blur_radius": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 100, "step": 0.1}), + "intensity": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 100.0, "step": 0.01}), + }, + "optional": { + "size_multiplier": ("FLOAT", {"default": [1.0], "forceInput": True}), + "trailing": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "border_width": ("INT", {"default": 0, "min": 0, "max": 100, "step": 1}), + "border_color": ("STRING", {"default": 'black'}), + } + } + + def createshapemask(self, coordinates, frame_width, frame_height, shape_width, shape_height, shape_color, + bg_color, blur_radius, shape, intensity, size_multiplier=[1.0], trailing=1.0, border_width=0, border_color='black'): + + shape_color = parse_color(shape_color) + border_color = parse_color(border_color) + bg_color = parse_color(bg_color) + coords_list = parse_json_tracks(coordinates) + + batch_size = len(coords_list[0]) + images_list = [] + masks_list = [] + + if not size_multiplier or len(size_multiplier) != batch_size: + size_multiplier = [1] * batch_size + else: + size_multiplier = size_multiplier * (batch_size // len(size_multiplier)) + size_multiplier[:batch_size % len(size_multiplier)] + + previous_output = None + + for i in range(batch_size): + image = Image.new("RGB", (frame_width, frame_height), bg_color) + draw = ImageDraw.Draw(image) + + # Calculate the size for this frame and ensure it's not less than 0 + current_width = shape_width * size_multiplier[i] + current_height = shape_height * size_multiplier[i] + + for coords in coords_list: + location_x = coords[i]['x'] + location_y = coords[i]['y'] + + if shape == 'circle' or shape == 'square': + # Define the bounding box for the shape + left_up_point = (location_x - current_width // 2, location_y - current_height // 2) + right_down_point = (location_x + current_width // 2, location_y + current_height // 2) + two_points = [left_up_point, right_down_point] + + if shape == 'circle': + if border_width > 0: + draw.ellipse(two_points, fill=shape_color, outline=border_color, width=border_width) + else: + draw.ellipse(two_points, fill=shape_color) + elif shape == 'square': + if border_width > 0: + draw.rectangle(two_points, fill=shape_color, outline=border_color, width=border_width) + else: + draw.rectangle(two_points, fill=shape_color) + + elif shape == 'triangle': + # Define the points for the triangle + left_up_point = (location_x - current_width // 2, location_y + current_height // 2) # bottom left + right_down_point = (location_x + current_width // 2, location_y + current_height // 2) # bottom right + top_point = (location_x, location_y - current_height // 2) # top point + + if border_width > 0: + draw.polygon([top_point, left_up_point, right_down_point], fill=shape_color, outline=border_color, width=border_width) + else: + draw.polygon([top_point, left_up_point, right_down_point], fill=shape_color) + + if blur_radius != 0: + image = image.filter(ImageFilter.GaussianBlur(blur_radius)) + # Blend the current image with the accumulated image + + image = pil2tensor(image) + if trailing != 1.0 and previous_output is not None: + # Add the decayed previous output to the current frame + image += trailing * previous_output + image = image / image.max() + previous_output = image + image = image * intensity + mask = image[:, :, :, 0] + masks_list.append(mask) + images_list.append(image) + out_images = torch.cat(images_list, dim=0).cpu().float() + out_masks = torch.cat(masks_list, dim=0) + return (out_images, out_masks) + +class CreateTextOnPath: + + RETURN_TYPES = ("IMAGE", "MASK", "MASK",) + RETURN_NAMES = ("image", "mask", "mask_inverted",) + FUNCTION = "createtextmask" + CATEGORY = "KJNodes/masking/generate" + DESCRIPTION = """ +Creates a mask or batch of masks with the specified text. +Locations are center locations. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "coordinates": ("STRING", {"forceInput": True}), + "text": ("STRING", {"default": 'text', "multiline": True}), + "frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "font": (folder_paths.get_filename_list("kjnodes_fonts"), ), + "font_size": ("INT", {"default": 42}), + "alignment": ( + [ 'left', + 'center', + 'right' + ], + {"default": 'center'} + ), + "text_color": ("STRING", {"default": 'white'}), + }, + "optional": { + "size_multiplier": ("FLOAT", {"default": [1.0], "forceInput": True}), + } + } + + def createtextmask(self, coordinates, frame_width, frame_height, font, font_size, text, text_color, alignment, size_multiplier=[1.0]): + coordinates = coordinates.replace("'", '"') + coordinates = json.loads(coordinates) + + batch_size = len(coordinates) + mask_list = [] + image_list = [] + color = parse_color(text_color) + font_path = folder_paths.get_full_path("kjnodes_fonts", font) + + if len(size_multiplier) != batch_size: + size_multiplier = size_multiplier * (batch_size // len(size_multiplier)) + size_multiplier[:batch_size % len(size_multiplier)] + + for i, coord in enumerate(coordinates): + image = Image.new("RGB", (frame_width, frame_height), "black") + draw = ImageDraw.Draw(image) + lines = text.split('\n') # Split the text into lines + # Apply the size multiplier to the font size for this iteration + current_font_size = int(font_size * size_multiplier[i]) + current_font = ImageFont.truetype(font_path, current_font_size) + line_heights = [current_font.getbbox(line)[3] for line in lines] # List of line heights + total_text_height = sum(line_heights) # Total height of text block + + # Calculate the starting Y position to center the block of text + start_y = coord['y'] - total_text_height // 2 + for j, line in enumerate(lines): + text_width, text_height = current_font.getbbox(line)[2], line_heights[j] + if alignment == 'left': + location_x = coord['x'] + elif alignment == 'center': + location_x = int(coord['x'] - text_width // 2) + elif alignment == 'right': + location_x = int(coord['x'] - text_width) + + location_y = int(start_y + sum(line_heights[:j])) + text_position = (location_x, location_y) + # Draw the text + try: + draw.text(text_position, line, fill=color, font=current_font, features=['-liga']) + except: + draw.text(text_position, line, fill=color, font=current_font) + + image = pil2tensor(image) + non_black_pixels = (image > 0).any(dim=-1) + mask = non_black_pixels.to(image.dtype) + mask_list.append(mask) + image_list.append(image) + + out_images = torch.cat(image_list, dim=0).cpu().float() + out_masks = torch.cat(mask_list, dim=0) + return (out_images, out_masks, 1.0 - out_masks,) + +class CreateGradientFromCoords: + + RETURN_TYPES = ("IMAGE", ) + RETURN_NAMES = ("image", ) + FUNCTION = "generate" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Creates a gradient image from coordinates. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "coordinates": ("STRING", {"forceInput": True}), + "frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "start_color": ("STRING", {"default": 'white'}), + "end_color": ("STRING", {"default": 'black'}), + "multiplier": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 100.0, "step": 0.01}), + }, + } + + def generate(self, coordinates, frame_width, frame_height, start_color, end_color, multiplier): + # Parse the coordinates + coordinates = json.loads(coordinates.replace("'", '"')) + + # Create an image + image = Image.new("RGB", (frame_width, frame_height)) + draw = ImageDraw.Draw(image) + + # Extract start and end points for the gradient + start_coord = coordinates[0] + end_coord = coordinates[1] + + start_color = parse_color(start_color) + end_color = parse_color(end_color) + + # Calculate the gradient direction (vector) + gradient_direction = (end_coord['x'] - start_coord['x'], end_coord['y'] - start_coord['y']) + gradient_length = (gradient_direction[0] ** 2 + gradient_direction[1] ** 2) ** 0.5 + + # Iterate over each pixel in the image + for y in range(frame_height): + for x in range(frame_width): + # Calculate the projection of the point on the gradient line + point_vector = (x - start_coord['x'], y - start_coord['y']) + projection = (point_vector[0] * gradient_direction[0] + point_vector[1] * gradient_direction[1]) / gradient_length + projection = max(min(projection, gradient_length), 0) # Clamp the projection value + + # Calculate the blend factor for the current pixel + blend = projection * multiplier / gradient_length + + # Determine the color of the current pixel + color = ( + int(start_color[0] + (end_color[0] - start_color[0]) * blend), + int(start_color[1] + (end_color[1] - start_color[1]) * blend), + int(start_color[2] + (end_color[2] - start_color[2]) * blend) + ) + + # Set the pixel color + draw.point((x, y), fill=color) + + # Convert the PIL image to a tensor (assuming such a function exists in your context) + image_tensor = pil2tensor(image) + + return (image_tensor,) + +class GradientToFloat: + + RETURN_TYPES = ("FLOAT", "FLOAT",) + RETURN_NAMES = ("float_x", "float_y", ) + FUNCTION = "sample" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Calculates list of floats from image. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE", ), + "steps": ("INT", {"default": 10, "min": 2, "max": 10000, "step": 1}), + }, + } + + def sample(self, image, steps): + # Assuming image is a tensor with shape [B, H, W, C] + B, H, W, C = image.shape + + # Sample along the width axis (W) + w_intervals = torch.linspace(0, W - 1, steps=steps, dtype=torch.int64) + # Assuming we're sampling from the first batch and the first channel + w_sampled = image[0, :, w_intervals, 0] + + # Sample along the height axis (H) + h_intervals = torch.linspace(0, H - 1, steps=steps, dtype=torch.int64) + # Assuming we're sampling from the first batch and the first channel + h_sampled = image[0, h_intervals, :, 0] + + # Taking the mean across the height for width sampling, and across the width for height sampling + w_values = w_sampled.mean(dim=0).tolist() + h_values = h_sampled.mean(dim=1).tolist() + + return (w_values, h_values) + +class MaskOrImageToWeight: + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "output_type": ( + [ + 'list', + 'pandas series', + 'tensor', + 'string' + ], + { + "default": 'list' + }), + }, + "optional": { + "images": ("IMAGE",), + "masks": ("MASK",), + }, + + } + RETURN_TYPES = ("FLOAT", "STRING",) + FUNCTION = "execute" + CATEGORY = "KJNodes/weights" + DESCRIPTION = """ +Gets the mean values from mask or image batch +and returns that as the selected output type. +""" + + def execute(self, output_type, images=None, masks=None): + mean_values = [] + if masks is not None and images is None: + for mask in masks: + mean_values.append(mask.mean().item()) + elif masks is None and images is not None: + for image in images: + mean_values.append(image.mean().item()) + elif masks is not None and images is not None: + raise Exception("MaskOrImageToWeight: Use either mask or image input only.") + + # Convert mean_values to the specified output_type + if output_type == 'list': + out = mean_values + elif output_type == 'pandas series': + try: + import pandas as pd + except: + raise Exception("MaskOrImageToWeight: pandas is not installed. Please install pandas to use this output_type") + out = pd.Series(mean_values), + elif output_type == 'tensor': + out = torch.tensor(mean_values, dtype=torch.float32), + return (out, [str(value) for value in mean_values],) + +class WeightScheduleConvert: + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "input_values": ("FLOAT", {"default": 0.0, "forceInput": True}), + "output_type": ( + [ + 'match_input', + 'list', + 'pandas series', + 'tensor', + ], + { + "default": 'list' + }), + "invert": ("BOOLEAN", {"default": False}), + "repeat": ("INT", {"default": 1,"min": 1, "max": 255, "step": 1}), + }, + "optional": { + "remap_to_frames": ("INT", {"default": 0}), + "interpolation_curve": ("FLOAT", {"forceInput": True}), + "remap_values": ("BOOLEAN", {"default": False}), + "remap_min": ("FLOAT", {"default": 0.0, "min": -100000, "max": 100000.0, "step": 0.01}), + "remap_max": ("FLOAT", {"default": 1.0, "min": -100000, "max": 100000.0, "step": 0.01}), + }, + + } + RETURN_TYPES = ("FLOAT", "STRING", "INT",) + FUNCTION = "execute" + CATEGORY = "KJNodes/weights" + DESCRIPTION = """ +Converts different value lists/series to another type. +""" + + def detect_input_type(self, input_values): + import pandas as pd + if isinstance(input_values, list): + return 'list' + elif isinstance(input_values, pd.Series): + return 'pandas series' + elif isinstance(input_values, torch.Tensor): + return 'tensor' + else: + raise ValueError("Unsupported input type") + + def execute(self, input_values, output_type, invert, repeat, remap_to_frames=0, interpolation_curve=None, remap_min=0.0, remap_max=1.0, remap_values=False): + import pandas as pd + input_type = self.detect_input_type(input_values) + + if input_type == 'pandas series': + float_values = input_values.tolist() + elif input_type == 'tensor': + float_values = input_values + else: + float_values = input_values + + if invert: + float_values = [1 - value for value in float_values] + + if interpolation_curve is not None: + interpolated_pattern = [] + orig_float_values = float_values + for value in interpolation_curve: + min_val = min(orig_float_values) + max_val = max(orig_float_values) + # Normalize the values to [0, 1] + normalized_values = [(value - min_val) / (max_val - min_val) for value in orig_float_values] + # Interpolate the normalized values to the new frame count + remapped_float_values = np.interp(np.linspace(0, 1, int(remap_to_frames * value)), np.linspace(0, 1, len(normalized_values)), normalized_values).tolist() + interpolated_pattern.extend(remapped_float_values) + float_values = interpolated_pattern + else: + # Remap float_values to match target_frame_amount + if remap_to_frames > 0 and remap_to_frames != len(float_values): + min_val = min(float_values) + max_val = max(float_values) + # Normalize the values to [0, 1] + normalized_values = [(value - min_val) / (max_val - min_val) for value in float_values] + # Interpolate the normalized values to the new frame count + float_values = np.interp(np.linspace(0, 1, remap_to_frames), np.linspace(0, 1, len(normalized_values)), normalized_values).tolist() + + float_values = float_values * repeat + if remap_values: + float_values = self.remap_values(float_values, remap_min, remap_max) + + if output_type == 'list': + out = float_values, + elif output_type == 'pandas series': + out = pd.Series(float_values), + elif output_type == 'tensor': + if input_type == 'pandas series': + out = torch.tensor(float_values.values, dtype=torch.float32), + else: + out = torch.tensor(float_values, dtype=torch.float32), + elif output_type == 'match_input': + out = float_values, + return (out, [str(value) for value in float_values], [int(value) for value in float_values]) + + def remap_values(self, values, target_min, target_max): + # Determine the current range + current_min = min(values) + current_max = max(values) + current_range = current_max - current_min + + # Determine the target range + target_range = target_max - target_min + + # Perform the linear interpolation for each value + remapped_values = [(value - current_min) / current_range * target_range + target_min for value in values] + + return remapped_values + + +class FloatToMask: + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "input_values": ("FLOAT", {"forceInput": True, "default": 0}), + "width": ("INT", {"default": 100, "min": 1}), + "height": ("INT", {"default": 100, "min": 1}), + }, + } + RETURN_TYPES = ("MASK",) + FUNCTION = "execute" + CATEGORY = "KJNodes/masking/generate" + DESCRIPTION = """ +Generates a batch of masks based on the input float values. +The batch size is determined by the length of the input float values. +Each mask is generated with the specified width and height. +""" + + def execute(self, input_values, width, height): + import pandas as pd + # Ensure input_values is a list + if isinstance(input_values, (float, int)): + input_values = [input_values] + elif isinstance(input_values, pd.Series): + input_values = input_values.tolist() + elif isinstance(input_values, list) and all(isinstance(item, list) for item in input_values): + input_values = [item for sublist in input_values for item in sublist] + + # Generate a batch of masks based on the input_values + masks = [] + for value in input_values: + # Assuming value is a float between 0 and 1 representing the mask's intensity + mask = torch.ones((height, width), dtype=torch.float32) * value + masks.append(mask) + masks_out = torch.stack(masks, dim=0) + + return(masks_out,) +class WeightScheduleExtend: + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "input_values_1": ("FLOAT", {"default": 0.0, "forceInput": True}), + "input_values_2": ("FLOAT", {"default": 0.0, "forceInput": True}), + "output_type": ( + [ + 'match_input', + 'list', + 'pandas series', + 'tensor', + ], + { + "default": 'match_input' + }), + }, + + } + RETURN_TYPES = ("FLOAT",) + FUNCTION = "execute" + CATEGORY = "KJNodes/weights" + DESCRIPTION = """ +Extends, and converts if needed, different value lists/series +""" + + def detect_input_type(self, input_values): + import pandas as pd + if isinstance(input_values, list): + return 'list' + elif isinstance(input_values, pd.Series): + return 'pandas series' + elif isinstance(input_values, torch.Tensor): + return 'tensor' + else: + raise ValueError("Unsupported input type") + + def execute(self, input_values_1, input_values_2, output_type): + import pandas as pd + input_type_1 = self.detect_input_type(input_values_1) + input_type_2 = self.detect_input_type(input_values_2) + # Convert input_values_2 to the same format as input_values_1 if they do not match + if not input_type_1 == input_type_2: + print("Converting input_values_2 to the same format as input_values_1") + if input_type_1 == 'pandas series': + # Convert input_values_2 to a pandas Series + float_values_2 = pd.Series(input_values_2) + elif input_type_1 == 'tensor': + # Convert input_values_2 to a tensor + float_values_2 = torch.tensor(input_values_2, dtype=torch.float32) + else: + print("Input types match, no conversion needed") + # If the types match, no conversion is needed + float_values_2 = input_values_2 + + float_values = input_values_1 + float_values_2 + + if output_type == 'list': + return float_values, + elif output_type == 'pandas series': + return pd.Series(float_values), + elif output_type == 'tensor': + if input_type_1 == 'pandas series': + return torch.tensor(float_values.values, dtype=torch.float32), + else: + return torch.tensor(float_values, dtype=torch.float32), + elif output_type == 'match_input': + return float_values, + else: + raise ValueError(f"Unsupported output_type: {output_type}") + +class FloatToSigmas: + @classmethod + def INPUT_TYPES(s): + return {"required": + { + "float_list": ("FLOAT", {"default": 0.0, "forceInput": True}), + } + } + RETURN_TYPES = ("SIGMAS",) + RETURN_NAMES = ("SIGMAS",) + CATEGORY = "KJNodes/noise" + FUNCTION = "customsigmas" + DESCRIPTION = """ +Creates a sigmas tensor from list of float values. + +""" + def customsigmas(self, float_list): + return torch.tensor(float_list, dtype=torch.float32), + +class SigmasToFloat: + @classmethod + def INPUT_TYPES(s): + return {"required": + { + "sigmas": ("SIGMAS",), + } + } + RETURN_TYPES = ("FLOAT",) + RETURN_NAMES = ("float",) + CATEGORY = "KJNodes/noise" + FUNCTION = "customsigmas" + DESCRIPTION = """ +Creates a float list from sigmas tensors. + +""" + def customsigmas(self, sigmas): + return sigmas.tolist(), + +class GLIGENTextBoxApplyBatchCoords: + @classmethod + def INPUT_TYPES(s): + return {"required": {"conditioning_to": ("CONDITIONING", ), + "latents": ("LATENT", ), + "clip": ("CLIP", ), + "gligen_textbox_model": ("GLIGEN", ), + "coordinates": ("STRING", {"forceInput": True}), + "text": ("STRING", {"multiline": True}), + "width": ("INT", {"default": 128, "min": 8, "max": 4096, "step": 8}), + "height": ("INT", {"default": 128, "min": 8, "max": 4096, "step": 8}), + }, + "optional": {"size_multiplier": ("FLOAT", {"default": [1.0], "forceInput": True})}, + } + RETURN_TYPES = ("CONDITIONING", "IMAGE", ) + RETURN_NAMES = ("conditioning", "coord_preview", ) + FUNCTION = "append" + CATEGORY = "KJNodes/experimental" + DESCRIPTION = """ +This node allows scheduling GLIGEN text box positions in a batch, +to be used with AnimateDiff-Evolved. Intended to pair with the +Spline Editor -node. + +GLIGEN model can be downloaded through the Manage's "Install Models" menu. +Or directly from here: +https://huggingface.co/comfyanonymous/GLIGEN_pruned_safetensors/tree/main + +Inputs: +- **latents** input is used to calculate batch size +- **clip** is your standard text encoder, use same as for the main prompt +- **gligen_textbox_model** connects to GLIGEN Loader +- **coordinates** takes a json string of points, directly compatible +with the spline editor node. +- **text** is the part of the prompt to set position for +- **width** and **height** are the size of the GLIGEN bounding box + +Outputs: +- **conditioning** goes between to clip text encode and the sampler +- **coord_preview** is an optional preview of the coordinates and +bounding boxes. + +""" + + def append(self, latents, coordinates, conditioning_to, clip, gligen_textbox_model, text, width, height, size_multiplier=[1.0]): + coordinates = json.loads(coordinates.replace("'", '"')) + coordinates = [(coord['x'], coord['y']) for coord in coordinates] + + batch_size = sum(tensor.size(0) for tensor in latents.values()) + if len(coordinates) != batch_size: + print("GLIGENTextBoxApplyBatchCoords WARNING: The number of coordinates does not match the number of latents") + + c = [] + _, cond_pooled = clip.encode_from_tokens(clip.tokenize(text), return_pooled=True) + + for t in conditioning_to: + n = [t[0], t[1].copy()] + + position_params_batch = [[] for _ in range(batch_size)] # Initialize a list of empty lists for each batch item + if len(size_multiplier) != batch_size: + size_multiplier = size_multiplier * (batch_size // len(size_multiplier)) + size_multiplier[:batch_size % len(size_multiplier)] + + for i in range(batch_size): + x_position, y_position = coordinates[i] + position_param = (cond_pooled, int((height // 8) * size_multiplier[i]), int((width // 8) * size_multiplier[i]), (y_position - height // 2) // 8, (x_position - width // 2) // 8) + position_params_batch[i].append(position_param) # Append position_param to the correct sublist + + prev = [] + if "gligen" in n[1]: + prev = n[1]['gligen'][2] + else: + prev = [[] for _ in range(batch_size)] + # Concatenate prev and position_params_batch, ensuring both are lists of lists + # and each sublist corresponds to a batch item + combined_position_params = [prev_item + batch_item for prev_item, batch_item in zip(prev, position_params_batch)] + n[1]['gligen'] = ("position_batched", gligen_textbox_model, combined_position_params) + c.append(n) + + image_height = latents['samples'].shape[-2] * 8 + image_width = latents['samples'].shape[-1] * 8 + plot_image_tensor = plot_coordinates_to_tensor(coordinates, image_height, image_width, height, width, size_multiplier, text) + + return (c, plot_image_tensor,) + +class CreateInstanceDiffusionTracking: + + RETURN_TYPES = ("TRACKING", "STRING", "INT", "INT", "INT", "INT",) + RETURN_NAMES = ("tracking", "prompt", "width", "height", "bbox_width", "bbox_height",) + FUNCTION = "tracking" + CATEGORY = "KJNodes/InstanceDiffusion" + DESCRIPTION = """ +Creates tracking data to be used with InstanceDiffusion: +https://github.com/logtd/ComfyUI-InstanceDiffusion + +InstanceDiffusion prompt format: +"class_id.class_name": "prompt", +for example: +"1.head": "((head))", +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "coordinates": ("STRING", {"forceInput": True}), + "width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "bbox_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "bbox_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "class_name": ("STRING", {"default": "class_name"}), + "class_id": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}), + "prompt": ("STRING", {"default": "prompt", "multiline": True}), + }, + "optional": { + "size_multiplier": ("FLOAT", {"default": [1.0], "forceInput": True}), + "fit_in_frame": ("BOOLEAN", {"default": True}), + } + } + + def tracking(self, coordinates, class_name, class_id, width, height, bbox_width, bbox_height, prompt, size_multiplier=[1.0], fit_in_frame=True): + # Define the number of images in the batch + coordinates = coordinates.replace("'", '"') + coordinates = json.loads(coordinates) + + tracked = {} + tracked[class_name] = {} + batch_size = len(coordinates) + # Initialize a list to hold the coordinates for the current ID + id_coordinates = [] + if not size_multiplier or len(size_multiplier) != batch_size: + size_multiplier = [0] * batch_size + else: + size_multiplier = size_multiplier * (batch_size // len(size_multiplier)) + size_multiplier[:batch_size % len(size_multiplier)] + for i, coord in enumerate(coordinates): + x = coord['x'] + y = coord['y'] + adjusted_bbox_width = bbox_width * size_multiplier[i] + adjusted_bbox_height = bbox_height * size_multiplier[i] + # Calculate the top left and bottom right coordinates + top_left_x = x - adjusted_bbox_width // 2 + top_left_y = y - adjusted_bbox_height // 2 + bottom_right_x = x + adjusted_bbox_width // 2 + bottom_right_y = y + adjusted_bbox_height // 2 + + if fit_in_frame: + # Clip the coordinates to the frame boundaries + top_left_x = max(0, top_left_x) + top_left_y = max(0, top_left_y) + bottom_right_x = min(width, bottom_right_x) + bottom_right_y = min(height, bottom_right_y) + # Ensure width and height are positive + adjusted_bbox_width = max(1, bottom_right_x - top_left_x) + adjusted_bbox_height = max(1, bottom_right_y - top_left_y) + + # Update the coordinates with the new width and height + bottom_right_x = top_left_x + adjusted_bbox_width + bottom_right_y = top_left_y + adjusted_bbox_height + + # Append the top left and bottom right coordinates to the list for the current ID + id_coordinates.append([top_left_x, top_left_y, bottom_right_x, bottom_right_y, width, height]) + + class_id = int(class_id) + # Assign the list of coordinates to the specified ID within the class_id dictionary + tracked[class_name][class_id] = id_coordinates + + prompt_string = "" + for class_name, class_data in tracked.items(): + for class_id in class_data.keys(): + class_id_str = str(class_id) + # Use the incoming prompt for each class name and ID + prompt_string += f'"{class_id_str}.{class_name}": "({prompt})",\n' + + # Remove the last comma and newline + prompt_string = prompt_string.rstrip(",\n") + + return (tracked, prompt_string, width, height, bbox_width, bbox_height) + +class AppendInstanceDiffusionTracking: + + RETURN_TYPES = ("TRACKING", "STRING",) + RETURN_NAMES = ("tracking", "prompt",) + FUNCTION = "append" + CATEGORY = "KJNodes/InstanceDiffusion" + DESCRIPTION = """ +Appends tracking data to be used with InstanceDiffusion: +https://github.com/logtd/ComfyUI-InstanceDiffusion + +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "tracking_1": ("TRACKING", {"forceInput": True}), + "tracking_2": ("TRACKING", {"forceInput": True}), + }, + "optional": { + "prompt_1": ("STRING", {"default": "", "forceInput": True}), + "prompt_2": ("STRING", {"default": "", "forceInput": True}), + } + } + + def append(self, tracking_1, tracking_2, prompt_1="", prompt_2=""): + tracking_copy = tracking_1.copy() + # Check for existing class names and class IDs, and raise an error if they exist + for class_name, class_data in tracking_2.items(): + if class_name not in tracking_copy: + tracking_copy[class_name] = class_data + else: + # If the class name exists, merge the class data from tracking_2 into tracking_copy + # This will add new class IDs under the same class name without raising an error + tracking_copy[class_name].update(class_data) + prompt_string = prompt_1 + "," + prompt_2 + return (tracking_copy, prompt_string) + +class InterpolateCoords: + + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("coordinates",) + FUNCTION = "interpolate" + CATEGORY = "KJNodes/experimental" + DESCRIPTION = """ +Interpolates coordinates based on a curve. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "coordinates": ("STRING", {"forceInput": True}), + "interpolation_curve": ("FLOAT", {"forceInput": True}), + + }, + } + + def interpolate(self, coordinates, interpolation_curve): + # Parse the JSON string to get the list of coordinates + coordinates = json.loads(coordinates.replace("'", '"')) + + # Convert the list of dictionaries to a list of (x, y) tuples for easier processing + coordinates = [(coord['x'], coord['y']) for coord in coordinates] + + # Calculate the total length of the original path + path_length = sum(np.linalg.norm(np.array(coordinates[i]) - np.array(coordinates[i-1])) + for i in range(1, len(coordinates))) + + # Initialize variables for interpolation + interpolated_coords = [] + current_length = 0 + current_index = 0 + + # Iterate over the normalized curve + for normalized_length in interpolation_curve: + target_length = normalized_length * path_length # Convert to the original scale + while current_index < len(coordinates) - 1: + segment_start, segment_end = np.array(coordinates[current_index]), np.array(coordinates[current_index + 1]) + segment_length = np.linalg.norm(segment_end - segment_start) + if current_length + segment_length >= target_length: + break + current_length += segment_length + current_index += 1 + + # Interpolate between the last two points + if current_index < len(coordinates) - 1: + p1, p2 = np.array(coordinates[current_index]), np.array(coordinates[current_index + 1]) + segment_length = np.linalg.norm(p2 - p1) + if segment_length > 0: + t = (target_length - current_length) / segment_length + interpolated_point = p1 + t * (p2 - p1) + interpolated_coords.append(interpolated_point.tolist()) + else: + interpolated_coords.append(p1.tolist()) + else: + # If the target_length is at or beyond the end of the path, add the last coordinate + interpolated_coords.append(coordinates[-1]) + + # Convert back to string format if necessary + interpolated_coords_str = "[" + ", ".join([f"{{'x': {round(coord[0])}, 'y': {round(coord[1])}}}" for coord in interpolated_coords]) + "]" + print(interpolated_coords_str) + + return (interpolated_coords_str,) + +class DrawInstanceDiffusionTracking: + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image", ) + FUNCTION = "draw" + CATEGORY = "KJNodes/InstanceDiffusion" + DESCRIPTION = """ +Draws the tracking data from +CreateInstanceDiffusionTracking -node. + +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE", ), + "tracking": ("TRACKING", {"forceInput": True}), + "box_line_width": ("INT", {"default": 2, "min": 1, "max": 10, "step": 1}), + "draw_text": ("BOOLEAN", {"default": True}), + "font": (folder_paths.get_filename_list("kjnodes_fonts"), ), + "font_size": ("INT", {"default": 20}), + }, + } + + def draw(self, image, tracking, box_line_width, draw_text, font, font_size): + import matplotlib.cm as cm + + modified_images = [] + + colormap = cm.get_cmap('rainbow', len(tracking)) + if draw_text: + font_path = folder_paths.get_full_path("kjnodes_fonts", font) + font = ImageFont.truetype(font_path, font_size) + + # Iterate over each image in the batch + for i in range(image.shape[0]): + # Extract the current image and convert it to a PIL image + current_image = image[i, :, :, :].permute(2, 0, 1) + pil_image = transforms.ToPILImage()(current_image) + + draw = ImageDraw.Draw(pil_image) + + # Iterate over the bounding boxes for the current image + for j, (class_name, class_data) in enumerate(tracking.items()): + for class_id, bbox_list in class_data.items(): + # Check if the current index is within the bounds of the bbox_list + if i < len(bbox_list): + bbox = bbox_list[i] + # Ensure bbox is a list or tuple before unpacking + if isinstance(bbox, (list, tuple)): + x1, y1, x2, y2, _, _ = bbox + # Convert coordinates to integers + x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2) + # Generate a color from the rainbow colormap + color = tuple(int(255 * x) for x in colormap(j / len(tracking)))[:3] + # Draw the bounding box on the image with the generated color + draw.rectangle([x1, y1, x2, y2], outline=color, width=box_line_width) + if draw_text: + # Draw the class name and ID as text above the box with the generated color + text = f"{class_id}.{class_name}" + # Calculate the width and height of the text + _, _, text_width, text_height = draw.textbbox((0, 0), text=text, font=font) + # Position the text above the top-left corner of the box + text_position = (x1, y1 - text_height) + draw.text(text_position, text, fill=color, font=font) + else: + print(f"Unexpected data type for bbox: {type(bbox)}") + + # Convert the drawn image back to a torch tensor and adjust back to (H, W, C) + modified_image_tensor = transforms.ToTensor()(pil_image).permute(1, 2, 0) + modified_images.append(modified_image_tensor) + + # Stack the modified images back into a batch + image_tensor_batch = torch.stack(modified_images).cpu().float() + + return image_tensor_batch, + +class PointsEditor: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "points_store": ("STRING", {"multiline": False}), + "coordinates": ("STRING", {"multiline": False}), + "neg_coordinates": ("STRING", {"multiline": False}), + "bbox_store": ("STRING", {"multiline": False}), + "bboxes": ("STRING", {"multiline": False}), + "bbox_format": ( + [ + 'xyxy', + 'xywh', + ], + ), + "width": ("INT", {"default": 512, "min": 8, "max": 4096, "step": 8}), + "height": ("INT", {"default": 512, "min": 8, "max": 4096, "step": 8}), + "normalize": ("BOOLEAN", {"default": False}), + }, + "optional": { + "bg_image": ("IMAGE", ), + }, + } + + RETURN_TYPES = ("STRING", "STRING", "BBOX", "MASK", "IMAGE") + RETURN_NAMES = ("positive_coords", "negative_coords", "bbox", "bbox_mask", "cropped_image") + FUNCTION = "pointdata" + CATEGORY = "KJNodes/experimental" + DESCRIPTION = """ +# WORK IN PROGRESS +Do not count on this as part of your workflow yet, +probably contains lots of bugs and stability is not +guaranteed!! + +## Graphical editor to create coordinates + +**Shift + click** to add a positive (green) point. +**Shift + right click** to add a negative (red) point. +**Ctrl + click** to draw a box. +**Right click on a point** to delete it. +Note that you can't delete from start/end of the points array. + +To add an image select the node and copy/paste or drag in the image. +Or from the bg_image input on queue (first frame of the batch). + +**THE IMAGE IS SAVED TO THE NODE AND WORKFLOW METADATA** +you can clear the image from the context menu by right clicking on the canvas + +""" + + def pointdata(self, points_store, bbox_store, width, height, coordinates, neg_coordinates, normalize, bboxes, bbox_format="xyxy", bg_image=None): + coordinates = json.loads(coordinates) + pos_coordinates = [] + for coord in coordinates: + coord['x'] = int(round(coord['x'])) + coord['y'] = int(round(coord['y'])) + if normalize: + norm_x = coord['x'] / width + norm_y = coord['y'] / height + pos_coordinates.append({'x': norm_x, 'y': norm_y}) + else: + pos_coordinates.append({'x': coord['x'], 'y': coord['y']}) + + if neg_coordinates: + coordinates = json.loads(neg_coordinates) + neg_coordinates = [] + for coord in coordinates: + coord['x'] = int(round(coord['x'])) + coord['y'] = int(round(coord['y'])) + if normalize: + norm_x = coord['x'] / width + norm_y = coord['y'] / height + neg_coordinates.append({'x': norm_x, 'y': norm_y}) + else: + neg_coordinates.append({'x': coord['x'], 'y': coord['y']}) + + # Create a blank mask + mask = np.zeros((height, width), dtype=np.uint8) + bboxes = json.loads(bboxes) + print(bboxes) + valid_bboxes = [] + for bbox in bboxes: + if (bbox.get("startX") is None or + bbox.get("startY") is None or + bbox.get("endX") is None or + bbox.get("endY") is None): + continue # Skip this bounding box if any value is None + else: + # Ensure that endX and endY are greater than startX and startY + x_min = min(int(bbox["startX"]), int(bbox["endX"])) + y_min = min(int(bbox["startY"]), int(bbox["endY"])) + x_max = max(int(bbox["startX"]), int(bbox["endX"])) + y_max = max(int(bbox["startY"]), int(bbox["endY"])) + + valid_bboxes.append((x_min, y_min, x_max, y_max)) + + bboxes_xyxy = [] + for bbox in valid_bboxes: + x_min, y_min, x_max, y_max = bbox + bboxes_xyxy.append((x_min, y_min, x_max, y_max)) + mask[y_min:y_max, x_min:x_max] = 1 # Fill the bounding box area with 1s + + if bbox_format == "xywh": + bboxes_xywh = [] + for bbox in valid_bboxes: + x_min, y_min, x_max, y_max = bbox + width = x_max - x_min + height = y_max - y_min + bboxes_xywh.append((x_min, y_min, width, height)) + bboxes = bboxes_xywh + else: + bboxes = bboxes_xyxy + + mask_tensor = torch.from_numpy(mask) + mask_tensor = mask_tensor.unsqueeze(0).float().cpu() + + if bg_image is not None and len(valid_bboxes) > 0: + x_min, y_min, x_max, y_max = bboxes[0] + cropped_image = bg_image[:, y_min:y_max, x_min:x_max, :] + + elif bg_image is not None: + cropped_image = bg_image + + if bg_image is None: + return (json.dumps(pos_coordinates), json.dumps(neg_coordinates), bboxes, mask_tensor) + else: + transform = transforms.ToPILImage() + image = transform(bg_image[0].permute(2, 0, 1)) + buffered = io.BytesIO() + image.save(buffered, format="JPEG", quality=75) + + # Step 3: Encode the image bytes to a Base64 string + img_bytes = buffered.getvalue() + img_base64 = base64.b64encode(img_bytes).decode('utf-8') + + return { + "ui": {"bg_image": [img_base64]}, + "result": (json.dumps(pos_coordinates), json.dumps(neg_coordinates), bboxes, mask_tensor, cropped_image) + } + +class CutAndDragOnPath: + RETURN_TYPES = ("IMAGE", "MASK",) + RETURN_NAMES = ("image","mask", ) + FUNCTION = "cutanddrag" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Cuts the masked area from the image, and drags it along the path. If inpaint is enabled, and no bg_image is provided, the cut area is filled using cv2 TELEA algorithm. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "coordinates": ("STRING", {"forceInput": True}), + "mask": ("MASK",), + "frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "inpaint": ("BOOLEAN", {"default": True}), + }, + "optional": { + "bg_image": ("IMAGE",), + } + } + + def cutanddrag(self, image, coordinates, mask, frame_width, frame_height, inpaint, bg_image=None): + # Parse coordinates + coords_list = parse_json_tracks(coordinates) + + batch_size = len(coords_list[0]) + images_list = [] + masks_list = [] + + # Convert input image and mask to PIL + input_image = tensor2pil(image)[0] + input_mask = tensor2pil(mask)[0] + + # Find masked region bounds + mask_array = np.array(input_mask) + y_indices, x_indices = np.where(mask_array > 0) + if len(x_indices) == 0 or len(y_indices) == 0: + return (image, mask) + + x_min, x_max = x_indices.min(), x_indices.max() + y_min, y_max = y_indices.min(), y_indices.max() + + # Cut out the masked region + cut_width = x_max - x_min + cut_height = y_max - y_min + cut_image = input_image.crop((x_min, y_min, x_max, y_max)) + cut_mask = input_mask.crop((x_min, y_min, x_max, y_max)) + + # Create inpainted background + if bg_image is None: + background = input_image.copy() + # Inpaint the cut area + if inpaint: + import cv2 + border = 5 # Create small border around cut area for better inpainting + fill_mask = Image.new("L", background.size, 0) + draw = ImageDraw.Draw(fill_mask) + draw.rectangle([x_min-border, y_min-border, x_max+border, y_max+border], fill=255) + background = cv2.inpaint( + np.array(background), + np.array(fill_mask), + inpaintRadius=3, + flags=cv2.INPAINT_TELEA + ) + background = Image.fromarray(background) + else: + background = tensor2pil(bg_image)[0] + + # Create batch of images with cut region at different positions + for i in range(batch_size): + # Create new image + new_image = background.copy() + new_mask = Image.new("L", (frame_width, frame_height), 0) + + # Get target position from coordinates + for coords in coords_list: + target_x = int(coords[i]['x'] - cut_width/2) + target_y = int(coords[i]['y'] - cut_height/2) + + # Paste cut region at new position + new_image.paste(cut_image, (target_x, target_y), cut_mask) + new_mask.paste(cut_mask, (target_x, target_y)) + + # Convert to tensor and append + image_tensor = pil2tensor(new_image) + mask_tensor = pil2tensor(new_mask) + + images_list.append(image_tensor) + masks_list.append(mask_tensor) + + # Stack tensors into batches + out_images = torch.cat(images_list, dim=0).cpu().float() + out_masks = torch.cat(masks_list, dim=0) + + return (out_images, out_masks) \ No newline at end of file diff --git a/custom_nodes/comfyui-kjnodes/nodes/image_nodes.py b/custom_nodes/comfyui-kjnodes/nodes/image_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..7cb9cae1189534ed1d35bd4d018c85a28a5f29d1 --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/nodes/image_nodes.py @@ -0,0 +1,3797 @@ +import numpy as np +import time +import torch +import torch.nn.functional as F +import torchvision.transforms as T +import io +import base64 +import random +import math +import os +import re +import json +import importlib +from PIL.PngImagePlugin import PngInfo +try: + import cv2 +except: + print("OpenCV not installed") + pass +from PIL import ImageGrab, ImageDraw, ImageFont, Image, ImageOps + +from nodes import MAX_RESOLUTION, SaveImage +from comfy_extras.nodes_mask import ImageCompositeMasked +from comfy.cli_args import args +from comfy.utils import ProgressBar, common_upscale +import folder_paths +from comfy import model_management +try: + from server import PromptServer +except: + PromptServer = None +from concurrent.futures import ThreadPoolExecutor + +script_directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + +class ImagePass: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + }, + "optional": { + "image": ("IMAGE",), + }, + } + RETURN_TYPES = ("IMAGE",) + FUNCTION = "passthrough" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Passes the image through without modifying it. +""" + + def passthrough(self, image=None): + return image, + +class ColorMatch: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image_ref": ("IMAGE",), + "image_target": ("IMAGE",), + "method": ( + [ + 'mkl', + 'hm', + 'reinhard', + 'mvgd', + 'hm-mvgd-hm', + 'hm-mkl-hm', + ], { + "default": 'mkl' + }), + }, + "optional": { + "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "multithread": ("BOOLEAN", {"default": True}), + } + } + + CATEGORY = "KJNodes/image" + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + FUNCTION = "colormatch" + DESCRIPTION = """ +color-matcher enables color transfer across images which comes in handy for automatic +color-grading of photographs, paintings and film sequences as well as light-field +and stopmotion corrections. + +The methods behind the mappings are based on the approach from Reinhard et al., +the Monge-Kantorovich Linearization (MKL) as proposed by Pitie et al. and our analytical solution +to a Multi-Variate Gaussian Distribution (MVGD) transfer in conjunction with classical histogram +matching. As shown below our HM-MVGD-HM compound outperforms existing methods. +https://github.com/hahnec/color-matcher/ + +""" + + def colormatch(self, image_ref, image_target, method, strength=1.0, multithread=True): + try: + from color_matcher import ColorMatcher + except: + raise Exception("Can't import color-matcher, did you install requirements.txt? Manual install: pip install color-matcher") + + image_ref = image_ref.cpu() + image_target = image_target.cpu() + batch_size = image_target.size(0) + + images_target = image_target.squeeze() + images_ref = image_ref.squeeze() + + image_ref_np = images_ref.numpy() + images_target_np = images_target.numpy() + + def process(i): + cm = ColorMatcher() + image_target_np_i = images_target_np if batch_size == 1 else images_target[i].numpy() + image_ref_np_i = image_ref_np if image_ref.size(0) == 1 else images_ref[i].numpy() + try: + image_result = cm.transfer(src=image_target_np_i, ref=image_ref_np_i, method=method) + image_result = image_target_np_i + strength * (image_result - image_target_np_i) + return torch.from_numpy(image_result) + except Exception as e: + print(f"Thread {i} error: {e}") + return torch.from_numpy(image_target_np_i) # fallback + + if multithread and batch_size > 1: + max_threads = min(os.cpu_count() or 1, batch_size) + with ThreadPoolExecutor(max_workers=max_threads) as executor: + out = list(executor.map(process, range(batch_size))) + else: + out = [process(i) for i in range(batch_size)] + + out = torch.stack(out, dim=0).to(torch.float32) + out.clamp_(0, 1) + return (out,) + +class SaveImageWithAlpha: + def __init__(self): + self.output_dir = folder_paths.get_output_directory() + self.type = "output" + self.prefix_append = "" + + @classmethod + def INPUT_TYPES(s): + return {"required": + {"images": ("IMAGE", ), + "mask": ("MASK", ), + "filename_prefix": ("STRING", {"default": "ComfyUI"})}, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, + } + + RETURN_TYPES = () + FUNCTION = "save_images_alpha" + OUTPUT_NODE = True + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Saves an image and mask as .PNG with the mask as the alpha channel. +""" + + def save_images_alpha(self, images, mask, filename_prefix="ComfyUI_image_with_alpha", prompt=None, extra_pnginfo=None): + from PIL.PngImagePlugin import PngInfo + filename_prefix += self.prefix_append + full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0]) + results = list() + if mask.dtype == torch.float16: + mask = mask.to(torch.float32) + def file_counter(): + max_counter = 0 + # Loop through the existing files + for existing_file in os.listdir(full_output_folder): + # Check if the file matches the expected format + match = re.fullmatch(fr"{filename}_(\d+)_?\.[a-zA-Z0-9]+", existing_file) + if match: + # Extract the numeric portion of the filename + file_counter = int(match.group(1)) + # Update the maximum counter value if necessary + if file_counter > max_counter: + max_counter = file_counter + return max_counter + + for image, alpha in zip(images, mask): + i = 255. * image.cpu().numpy() + a = 255. * alpha.cpu().numpy() + img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8)) + + # Resize the mask to match the image size + a_resized = Image.fromarray(a).resize(img.size, Image.LANCZOS) + a_resized = np.clip(a_resized, 0, 255).astype(np.uint8) + img.putalpha(Image.fromarray(a_resized, mode='L')) + metadata = None + if not args.disable_metadata: + metadata = PngInfo() + if prompt is not None: + metadata.add_text("prompt", json.dumps(prompt)) + if extra_pnginfo is not None: + for x in extra_pnginfo: + metadata.add_text(x, json.dumps(extra_pnginfo[x])) + + # Increment the counter by 1 to get the next available value + counter = file_counter() + 1 + file = f"{filename}_{counter:05}.png" + img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=4) + results.append({ + "filename": file, + "subfolder": subfolder, + "type": self.type + }) + + return { "ui": { "images": results } } + +class ImageConcanate: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image1": ("IMAGE",), + "image2": ("IMAGE",), + "direction": ( + [ 'right', + 'down', + 'left', + 'up', + ], + { + "default": 'right' + }), + "match_image_size": ("BOOLEAN", {"default": True}), + }} + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "concatenate" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Concatenates the image2 to image1 in the specified direction. +""" + + def concatenate(self, image1, image2, direction, match_image_size, first_image_shape=None): + # Check if the batch sizes are different + batch_size1 = image1.shape[0] + batch_size2 = image2.shape[0] + + if batch_size1 != batch_size2: + # Calculate the number of repetitions needed + max_batch_size = max(batch_size1, batch_size2) + repeats1 = max_batch_size - batch_size1 + repeats2 = max_batch_size - batch_size2 + + # Repeat the last image to match the largest batch size + if repeats1 > 0: + last_image1 = image1[-1].unsqueeze(0).repeat(repeats1, 1, 1, 1) + image1 = torch.cat([image1.clone(), last_image1], dim=0) + if repeats2 > 0: + last_image2 = image2[-1].unsqueeze(0).repeat(repeats2, 1, 1, 1) + image2 = torch.cat([image2.clone(), last_image2], dim=0) + + if match_image_size: + # Use first_image_shape if provided; otherwise, default to image1's shape + target_shape = first_image_shape if first_image_shape is not None else image1.shape + + original_height = image2.shape[1] + original_width = image2.shape[2] + original_aspect_ratio = original_width / original_height + + if direction in ['left', 'right']: + # Match the height and adjust the width to preserve aspect ratio + target_height = target_shape[1] # B, H, W, C format + target_width = int(target_height * original_aspect_ratio) + elif direction in ['up', 'down']: + # Match the width and adjust the height to preserve aspect ratio + target_width = target_shape[2] # B, H, W, C format + target_height = int(target_width / original_aspect_ratio) + + # Adjust image2 to the expected format for common_upscale + image2_for_upscale = image2.movedim(-1, 1) # Move C to the second position (B, C, H, W) + + # Resize image2 to match the target size while preserving aspect ratio + image2_resized = common_upscale(image2_for_upscale, target_width, target_height, "lanczos", "disabled") + + # Adjust image2 back to the original format (B, H, W, C) after resizing + image2_resized = image2_resized.movedim(1, -1) + else: + image2_resized = image2 + + # Ensure both images have the same number of channels + channels_image1 = image1.shape[-1] + channels_image2 = image2_resized.shape[-1] + + if channels_image1 != channels_image2: + if channels_image1 < channels_image2: + # Add alpha channel to image1 if image2 has it + alpha_channel = torch.ones((*image1.shape[:-1], channels_image2 - channels_image1), device=image1.device) + image1 = torch.cat((image1, alpha_channel), dim=-1) + else: + # Add alpha channel to image2 if image1 has it + alpha_channel = torch.ones((*image2_resized.shape[:-1], channels_image1 - channels_image2), device=image2_resized.device) + image2_resized = torch.cat((image2_resized, alpha_channel), dim=-1) + + + # Concatenate based on the specified direction + if direction == 'right': + concatenated_image = torch.cat((image1, image2_resized), dim=2) # Concatenate along width + elif direction == 'down': + concatenated_image = torch.cat((image1, image2_resized), dim=1) # Concatenate along height + elif direction == 'left': + concatenated_image = torch.cat((image2_resized, image1), dim=2) # Concatenate along width + elif direction == 'up': + concatenated_image = torch.cat((image2_resized, image1), dim=1) # Concatenate along height + return concatenated_image, + +import torch # Make sure you have PyTorch installed + +class ImageConcatFromBatch: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "images": ("IMAGE",), + "num_columns": ("INT", {"default": 3, "min": 1, "max": 255, "step": 1}), + "match_image_size": ("BOOLEAN", {"default": False}), + "max_resolution": ("INT", {"default": 4096}), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "concat" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ + Concatenates images from a batch into a grid with a specified number of columns. + """ + + def concat(self, images, num_columns, match_image_size, max_resolution): + # Assuming images is a batch of images (B, H, W, C) + batch_size, height, width, channels = images.shape + num_rows = (batch_size + num_columns - 1) // num_columns # Calculate number of rows + + print(f"Initial dimensions: batch_size={batch_size}, height={height}, width={width}, channels={channels}") + print(f"num_rows={num_rows}, num_columns={num_columns}") + + if match_image_size: + target_shape = images[0].shape + + resized_images = [] + for image in images: + original_height = image.shape[0] + original_width = image.shape[1] + original_aspect_ratio = original_width / original_height + + if original_aspect_ratio > 1: + target_height = target_shape[0] + target_width = int(target_height * original_aspect_ratio) + else: + target_width = target_shape[1] + target_height = int(target_width / original_aspect_ratio) + + print(f"Resizing image from ({original_height}, {original_width}) to ({target_height}, {target_width})") + + # Resize the image to match the target size while preserving aspect ratio + resized_image = common_upscale(image.movedim(-1, 0), target_width, target_height, "lanczos", "disabled") + resized_image = resized_image.movedim(0, -1) # Move channels back to the last dimension + resized_images.append(resized_image) + + # Convert the list of resized images back to a tensor + images = torch.stack(resized_images) + + height, width = target_shape[:2] # Update height and width + + # Initialize an empty grid + grid_height = num_rows * height + grid_width = num_columns * width + + print(f"Grid dimensions before scaling: grid_height={grid_height}, grid_width={grid_width}") + + # Original scale factor calculation remains unchanged + scale_factor = min(max_resolution / grid_height, max_resolution / grid_width, 1.0) + + # Apply scale factor to height and width + scaled_height = height * scale_factor + scaled_width = width * scale_factor + + # Round scaled dimensions to the nearest number divisible by 8 + height = max(1, int(round(scaled_height / 8) * 8)) + width = max(1, int(round(scaled_width / 8) * 8)) + + if abs(scaled_height - height) > 4: + height = max(1, int(round((scaled_height + 4) / 8) * 8)) + if abs(scaled_width - width) > 4: + width = max(1, int(round((scaled_width + 4) / 8) * 8)) + + # Recalculate grid dimensions with adjusted height and width + grid_height = num_rows * height + grid_width = num_columns * width + print(f"Grid dimensions after scaling: grid_height={grid_height}, grid_width={grid_width}") + print(f"Final image dimensions: height={height}, width={width}") + + grid = torch.zeros((grid_height, grid_width, channels), dtype=images.dtype) + + for idx, image in enumerate(images): + resized_image = torch.nn.functional.interpolate(image.unsqueeze(0).permute(0, 3, 1, 2), size=(height, width), mode="bilinear").squeeze().permute(1, 2, 0) + row = idx // num_columns + col = idx % num_columns + grid[row*height:(row+1)*height, col*width:(col+1)*width, :] = resized_image + + return grid.unsqueeze(0), + + +class ImageGridComposite2x2: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image1": ("IMAGE",), + "image2": ("IMAGE",), + "image3": ("IMAGE",), + "image4": ("IMAGE",), + }} + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "compositegrid" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Concatenates the 4 input images into a 2x2 grid. +""" + + def compositegrid(self, image1, image2, image3, image4): + top_row = torch.cat((image1, image2), dim=2) + bottom_row = torch.cat((image3, image4), dim=2) + grid = torch.cat((top_row, bottom_row), dim=1) + return (grid,) + +class ImageGridComposite3x3: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image1": ("IMAGE",), + "image2": ("IMAGE",), + "image3": ("IMAGE",), + "image4": ("IMAGE",), + "image5": ("IMAGE",), + "image6": ("IMAGE",), + "image7": ("IMAGE",), + "image8": ("IMAGE",), + "image9": ("IMAGE",), + }} + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "compositegrid" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Concatenates the 9 input images into a 3x3 grid. +""" + + def compositegrid(self, image1, image2, image3, image4, image5, image6, image7, image8, image9): + top_row = torch.cat((image1, image2, image3), dim=2) + mid_row = torch.cat((image4, image5, image6), dim=2) + bottom_row = torch.cat((image7, image8, image9), dim=2) + grid = torch.cat((top_row, mid_row, bottom_row), dim=1) + return (grid,) + +class ImageBatchTestPattern: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "batch_size": ("INT", {"default": 1,"min": 1, "max": 255, "step": 1}), + "start_from": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}), + "text_x": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}), + "text_y": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}), + "width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "font": (folder_paths.get_filename_list("kjnodes_fonts"), ), + "font_size": ("INT", {"default": 255,"min": 8, "max": 4096, "step": 1}), + }} + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "generatetestpattern" + CATEGORY = "KJNodes/text" + + def generatetestpattern(self, batch_size, font, font_size, start_from, width, height, text_x, text_y): + out = [] + # Generate the sequential numbers for each image + numbers = np.arange(start_from, start_from + batch_size) + font_path = folder_paths.get_full_path("kjnodes_fonts", font) + + for number in numbers: + # Create a black image with the number as a random color text + image = Image.new("RGB", (width, height), color='black') + draw = ImageDraw.Draw(image) + + # Generate a random color for the text + font_color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) + + font = ImageFont.truetype(font_path, font_size) + + # Get the size of the text and position it in the center + text = str(number) + + try: + draw.text((text_x, text_y), text, font=font, fill=font_color, features=['-liga']) + except: + draw.text((text_x, text_y), text, font=font, fill=font_color,) + + # Convert the image to a numpy array and normalize the pixel values + image_np = np.array(image).astype(np.float32) / 255.0 + image_tensor = torch.from_numpy(image_np).unsqueeze(0) + out.append(image_tensor) + out_tensor = torch.cat(out, dim=0) + + return (out_tensor,) + +class ImageGrabPIL: + + @classmethod + def IS_CHANGED(cls): + + return + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + FUNCTION = "screencap" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Captures an area specified by screen coordinates. +Can be used for realtime diffusion with autoqueue. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "x": ("INT", {"default": 0,"min": 0, "max": 4096, "step": 1}), + "y": ("INT", {"default": 0,"min": 0, "max": 4096, "step": 1}), + "width": ("INT", {"default": 512,"min": 0, "max": 4096, "step": 1}), + "height": ("INT", {"default": 512,"min": 0, "max": 4096, "step": 1}), + "num_frames": ("INT", {"default": 1,"min": 1, "max": 255, "step": 1}), + "delay": ("FLOAT", {"default": 0.1,"min": 0.0, "max": 10.0, "step": 0.01}), + }, + } + + def screencap(self, x, y, width, height, num_frames, delay): + start_time = time.time() + captures = [] + bbox = (x, y, x + width, y + height) + + for _ in range(num_frames): + # Capture screen + screen_capture = ImageGrab.grab(bbox=bbox) + screen_capture_torch = torch.from_numpy(np.array(screen_capture, dtype=np.float32) / 255.0).unsqueeze(0) + captures.append(screen_capture_torch) + + # Wait for a short delay if more than one frame is to be captured + if num_frames > 1: + time.sleep(delay) + + elapsed_time = time.time() - start_time + print(f"screengrab took {elapsed_time} seconds.") + + return (torch.cat(captures, dim=0),) + +class Screencap_mss: + + @classmethod + def IS_CHANGED(s, **kwargs): + return float("NaN") + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + FUNCTION = "screencap" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Captures an area specified by screen coordinates. +Can be used for realtime diffusion with autoqueue. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "x": ("INT", {"default": 0,"min": 0, "max": 10000, "step": 1}), + "y": ("INT", {"default": 0,"min": 0, "max": 10000, "step": 1}), + "width": ("INT", {"default": 512,"min": 0, "max": 10000, "step": 1}), + "height": ("INT", {"default": 512,"min": 0, "max": 10000, "step": 1}), + "num_frames": ("INT", {"default": 1,"min": 1, "max": 255, "step": 1}), + "delay": ("FLOAT", {"default": 0.1,"min": 0.0, "max": 10.0, "step": 0.01}), + }, + } + + def screencap(self, x, y, width, height, num_frames, delay): + from mss import mss + captures = [] + with mss() as sct: + bbox = {'top': y, 'left': x, 'width': width, 'height': height} + + for _ in range(num_frames): + sct_img = sct.grab(bbox) + img_np = np.array(sct_img) + img_torch = torch.from_numpy(img_np[..., [2, 1, 0]]).float() / 255.0 + captures.append(img_torch) + + if num_frames > 1: + time.sleep(delay) + + return (torch.stack(captures, 0),) + +class WebcamCaptureCV2: + + @classmethod + def IS_CHANGED(cls): + return + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + FUNCTION = "capture" + CATEGORY = "KJNodes/experimental" + DESCRIPTION = """ +Captures a frame from a webcam using CV2. +Can be used for realtime diffusion with autoqueue. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "x": ("INT", {"default": 0,"min": 0, "max": 4096, "step": 1}), + "y": ("INT", {"default": 0,"min": 0, "max": 4096, "step": 1}), + "width": ("INT", {"default": 512,"min": 0, "max": 4096, "step": 1}), + "height": ("INT", {"default": 512,"min": 0, "max": 4096, "step": 1}), + "cam_index": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}), + "release": ("BOOLEAN", {"default": False}), + }, + } + + def capture(self, x, y, cam_index, width, height, release): + # Check if the camera index has changed or the capture object doesn't exist + if not hasattr(self, "cap") or self.cap is None or self.current_cam_index != cam_index: + if hasattr(self, "cap") and self.cap is not None: + self.cap.release() + self.current_cam_index = cam_index + self.cap = cv2.VideoCapture(cam_index) + try: + self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, width) + self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height) + except: + pass + if not self.cap.isOpened(): + raise Exception("Could not open webcam") + + ret, frame = self.cap.read() + if not ret: + raise Exception("Failed to capture image from webcam") + + # Crop the frame to the specified bbox + frame = frame[y:y+height, x:x+width] + img_torch = torch.from_numpy(frame[..., [2, 1, 0]]).float() / 255.0 + + if release: + self.cap.release() + self.cap = None + + return (img_torch.unsqueeze(0),) + +class AddLabel: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image":("IMAGE",), + "text_x": ("INT", {"default": 10, "min": 0, "max": 4096, "step": 1}), + "text_y": ("INT", {"default": 2, "min": 0, "max": 4096, "step": 1}), + "height": ("INT", {"default": 48, "min": -1, "max": 4096, "step": 1}), + "font_size": ("INT", {"default": 32, "min": 0, "max": 4096, "step": 1}), + "font_color": ("STRING", {"default": "white"}), + "label_color": ("STRING", {"default": "black"}), + "font": (folder_paths.get_filename_list("kjnodes_fonts"), ), + "text": ("STRING", {"default": "Text"}), + "direction": ( + [ 'up', + 'down', + 'left', + 'right', + 'overlay' + ], + { + "default": 'up' + }), + }, + "optional":{ + "caption": ("STRING", {"default": "", "forceInput": True}), + } + } + RETURN_TYPES = ("IMAGE",) + FUNCTION = "addlabel" + CATEGORY = "KJNodes/text" + DESCRIPTION = """ +Creates a new with the given text, and concatenates it to +either above or below the input image. +Note that this changes the input image's height! +Fonts are loaded from this folder: +ComfyUI/custom_nodes/ComfyUI-KJNodes/fonts +""" + + def addlabel(self, image, text_x, text_y, text, height, font_size, font_color, label_color, font, direction, caption=""): + batch_size = image.shape[0] + width = image.shape[2] + + font_path = os.path.join(script_directory, "fonts", "TTNorms-Black.otf") if font == "TTNorms-Black.otf" else folder_paths.get_full_path("kjnodes_fonts", font) + + def process_image(input_image, caption_text): + font = ImageFont.truetype(font_path, font_size) + words = caption_text.split() + lines = [] + current_line = [] + current_line_width = 0 + + for word in words: + word_width = font.getbbox(word)[2] + if current_line_width + word_width <= width - 2 * text_x: + current_line.append(word) + current_line_width += word_width + font.getbbox(" ")[2] # Add space width + else: + lines.append(" ".join(current_line)) + current_line = [word] + current_line_width = word_width + + if current_line: + lines.append(" ".join(current_line)) + + if direction == 'overlay': + pil_image = Image.fromarray((input_image.cpu().numpy() * 255).astype(np.uint8)) + else: + if height == -1: + # Adjust the image height automatically + margin = 8 + required_height = (text_y + len(lines) * font_size) + margin # Calculate required height + pil_image = Image.new("RGB", (width, required_height), label_color) + else: + # Initialize with a minimal height + label_image = Image.new("RGB", (width, height), label_color) + pil_image = label_image + + draw = ImageDraw.Draw(pil_image) + + + y_offset = text_y + for line in lines: + try: + draw.text((text_x, y_offset), line, font=font, fill=font_color, features=['-liga']) + except: + draw.text((text_x, y_offset), line, font=font, fill=font_color) + y_offset += font_size + + processed_image = torch.from_numpy(np.array(pil_image).astype(np.float32) / 255.0).unsqueeze(0) + return processed_image + + if caption == "": + processed_images = [process_image(img, text) for img in image] + else: + assert len(caption) == batch_size, f"Number of captions {(len(caption))} does not match number of images" + processed_images = [process_image(img, cap) for img, cap in zip(image, caption)] + processed_batch = torch.cat(processed_images, dim=0) + + # Combine images based on direction + if direction == 'down': + combined_images = torch.cat((image, processed_batch), dim=1) + elif direction == 'up': + combined_images = torch.cat((processed_batch, image), dim=1) + elif direction == 'left': + processed_batch = torch.rot90(processed_batch, 3, (2, 3)).permute(0, 3, 1, 2) + combined_images = torch.cat((processed_batch, image), dim=2) + elif direction == 'right': + processed_batch = torch.rot90(processed_batch, 3, (2, 3)).permute(0, 3, 1, 2) + combined_images = torch.cat((image, processed_batch), dim=2) + else: + combined_images = processed_batch + + return (combined_images,) + +class GetImageSizeAndCount: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE",), + }} + + RETURN_TYPES = ("IMAGE","INT", "INT", "INT",) + RETURN_NAMES = ("image", "width", "height", "count",) + FUNCTION = "getsize" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Returns width, height and batch size of the image, +and passes it through unchanged. + +""" + + def getsize(self, image): + width = image.shape[2] + height = image.shape[1] + count = image.shape[0] + return {"ui": { + "text": [f"{count}x{width}x{height}"]}, + "result": (image, width, height, count) + } + +class GetLatentSizeAndCount: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "latent": ("LATENT",), + }} + + RETURN_TYPES = ("LATENT","INT", "INT", "INT", "INT", "INT") + RETURN_NAMES = ("latent", "batch_size", "channels", "frames", "width", "height") + FUNCTION = "getsize" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Returns latent tensor dimensions, +and passes the latent through unchanged. + +""" + def getsize(self, latent): + if len(latent["samples"].shape) == 5: + B, C, T, H, W = latent["samples"].shape + elif len(latent["samples"].shape) == 4: + B, C, H, W = latent["samples"].shape + T = 0 + else: + raise ValueError("Invalid latent shape") + + return {"ui": { + "text": [f"{B}x{C}x{T}x{H}x{W}"]}, + "result": (latent, B, C, T, H, W) + } + +class ImageBatchRepeatInterleaving: + + RETURN_TYPES = ("IMAGE", "MASK",) + FUNCTION = "repeat" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Repeats each image in a batch by the specified number of times. +Example batch of 5 images: 0, 1 ,2, 3, 4 +with repeats 2 becomes batch of 10 images: 0, 0, 1, 1, 2, 2, 3, 3, 4, 4 +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "images": ("IMAGE",), + "repeats": ("INT", {"default": 1, "min": 1, "max": 4096}), + }, + "optional": { + "mask": ("MASK",), + } + } + + def repeat(self, images, repeats, mask=None): + original_count = images.shape[0] + total_count = original_count * repeats + + repeated_images = torch.repeat_interleave(images, repeats=repeats, dim=0) + if mask is not None: + mask = torch.repeat_interleave(mask, repeats=repeats, dim=0) + else: + mask = torch.zeros((total_count, images.shape[1], images.shape[2]), + device=images.device, dtype=images.dtype) + for i in range(original_count): + mask[i * repeats] = 1.0 + + print("mask shape", mask.shape) + return (repeated_images, mask) + +class ImageUpscaleWithModelBatched: + @classmethod + def INPUT_TYPES(s): + return {"required": { "upscale_model": ("UPSCALE_MODEL",), + "images": ("IMAGE",), + "per_batch": ("INT", {"default": 16, "min": 1, "max": 4096, "step": 1}), + }} + RETURN_TYPES = ("IMAGE",) + FUNCTION = "upscale" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Same as ComfyUI native model upscaling node, +but allows setting sub-batches for reduced VRAM usage. +""" + def upscale(self, upscale_model, images, per_batch): + + device = model_management.get_torch_device() + upscale_model.to(device) + in_img = images.movedim(-1,-3) + + steps = in_img.shape[0] + pbar = ProgressBar(steps) + t = [] + + for start_idx in range(0, in_img.shape[0], per_batch): + sub_images = upscale_model(in_img[start_idx:start_idx+per_batch].to(device)) + t.append(sub_images.cpu()) + # Calculate the number of images processed in this batch + batch_count = sub_images.shape[0] + # Update the progress bar by the number of images processed in this batch + pbar.update(batch_count) + upscale_model.cpu() + + t = torch.cat(t, dim=0).permute(0, 2, 3, 1).cpu() + + return (t,) + +class ImageNormalize_Neg1_To_1: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "images": ("IMAGE",), + + }} + RETURN_TYPES = ("IMAGE",) + FUNCTION = "normalize" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Normalize the images to be in the range [-1, 1] +""" + + def normalize(self,images): + images = images * 2.0 - 1.0 + return (images,) + +class RemapImageRange: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE",), + "min": ("FLOAT", {"default": 0.0,"min": -10.0, "max": 1.0, "step": 0.01}), + "max": ("FLOAT", {"default": 1.0,"min": 0.0, "max": 10.0, "step": 0.01}), + "clamp": ("BOOLEAN", {"default": True}), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "remap" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Remaps the image values to the specified range. +""" + + def remap(self, image, min, max, clamp): + if image.dtype == torch.float16: + image = image.to(torch.float32) + image = min + image * (max - min) + if clamp: + image = torch.clamp(image, min=0.0, max=1.0) + return (image, ) + +class SplitImageChannels: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE",), + }, + } + + RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "MASK") + RETURN_NAMES = ("red", "green", "blue", "mask") + FUNCTION = "split" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Splits image channels into images where the selected channel +is repeated for all channels, and the alpha as a mask. +""" + + def split(self, image): + red = image[:, :, :, 0:1] # Red channel + green = image[:, :, :, 1:2] # Green channel + blue = image[:, :, :, 2:3] # Blue channel + alpha = image[:, :, :, 3:4] # Alpha channel + alpha = alpha.squeeze(-1) + + # Repeat the selected channel for all channels + red = torch.cat([red, red, red], dim=3) + green = torch.cat([green, green, green], dim=3) + blue = torch.cat([blue, blue, blue], dim=3) + return (red, green, blue, alpha) + +class MergeImageChannels: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "red": ("IMAGE",), + "green": ("IMAGE",), + "blue": ("IMAGE",), + + }, + "optional": { + "alpha": ("MASK", {"default": None}), + }, + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + FUNCTION = "merge" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Merges channel data into an image. +""" + + def merge(self, red, green, blue, alpha=None): + image = torch.stack([ + red[..., 0, None], # Red channel + green[..., 1, None], # Green channel + blue[..., 2, None] # Blue channel + ], dim=-1) + image = image.squeeze(-2) + if alpha is not None: + image = torch.cat([image, alpha.unsqueeze(-1)], dim=-1) + return (image,) + +class ImagePadForOutpaintMasked: + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "left": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), + "top": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), + "right": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), + "bottom": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), + "feathering": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}), + }, + "optional": { + "mask": ("MASK",), + } + } + + RETURN_TYPES = ("IMAGE", "MASK") + FUNCTION = "expand_image" + + CATEGORY = "image" + + def expand_image(self, image, left, top, right, bottom, feathering, mask=None): + if mask is not None: + if torch.allclose(mask, torch.zeros_like(mask)): + print("Warning: The incoming mask is fully black. Handling it as None.") + mask = None + B, H, W, C = image.size() + + new_image = torch.ones( + (B, H + top + bottom, W + left + right, C), + dtype=torch.float32, + ) * 0.5 + + new_image[:, top:top + H, left:left + W, :] = image + + if mask is None: + new_mask = torch.ones( + (B, H + top + bottom, W + left + right), + dtype=torch.float32, + ) + + t = torch.zeros( + (B, H, W), + dtype=torch.float32 + ) + else: + # If a mask is provided, pad it to fit the new image size + mask = F.pad(mask, (left, right, top, bottom), mode='constant', value=0) + mask = 1 - mask + t = torch.zeros_like(mask) + + if feathering > 0 and feathering * 2 < H and feathering * 2 < W: + + for i in range(H): + for j in range(W): + dt = i if top != 0 else H + db = H - i if bottom != 0 else H + + dl = j if left != 0 else W + dr = W - j if right != 0 else W + + d = min(dt, db, dl, dr) + + if d >= feathering: + continue + + v = (feathering - d) / feathering + + if mask is None: + t[:, i, j] = v * v + else: + t[:, top + i, left + j] = v * v + + if mask is None: + new_mask[:, top:top + H, left:left + W] = t + return (new_image, new_mask,) + else: + return (new_image, mask,) + +class ImagePadForOutpaintTargetSize: + upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"] + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "target_width": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), + "target_height": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), + "feathering": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}), + "upscale_method": (s.upscale_methods,), + }, + "optional": { + "mask": ("MASK",), + } + } + + RETURN_TYPES = ("IMAGE", "MASK") + FUNCTION = "expand_image" + + CATEGORY = "image" + + def expand_image(self, image, target_width, target_height, feathering, upscale_method, mask=None): + B, H, W, C = image.size() + new_height = H + new_width = W + # Calculate the scaling factor while maintaining aspect ratio + scaling_factor = min(target_width / W, target_height / H) + + # Check if the image needs to be downscaled + if scaling_factor < 1: + image = image.movedim(-1,1) + # Calculate the new width and height after downscaling + new_width = int(W * scaling_factor) + new_height = int(H * scaling_factor) + + # Downscale the image + image_scaled = common_upscale(image, new_width, new_height, upscale_method, "disabled").movedim(1,-1) + if mask is not None: + mask_scaled = mask.unsqueeze(0) # Add an extra dimension for batch size + mask_scaled = F.interpolate(mask_scaled, size=(new_height, new_width), mode="nearest") + mask_scaled = mask_scaled.squeeze(0) # Remove the extra dimension after interpolation + else: + mask_scaled = mask + else: + # If downscaling is not needed, use the original image dimensions + image_scaled = image + mask_scaled = mask + + # Calculate how much padding is needed to reach the target dimensions + pad_top = max(0, (target_height - new_height) // 2) + pad_bottom = max(0, target_height - new_height - pad_top) + pad_left = max(0, (target_width - new_width) // 2) + pad_right = max(0, target_width - new_width - pad_left) + + # Now call the original expand_image with the calculated padding + return ImagePadForOutpaintMasked.expand_image(self, image_scaled, pad_left, pad_top, pad_right, pad_bottom, feathering, mask_scaled) + +class ImagePrepForICLora: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "reference_image": ("IMAGE",), + "output_width": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}), + "output_height": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}), + "border_width": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 1}), + }, + "optional": { + "latent_image": ("IMAGE",), + "latent_mask": ("MASK",), + "reference_mask": ("MASK",), + } + } + + RETURN_TYPES = ("IMAGE", "MASK") + FUNCTION = "expand_image" + + CATEGORY = "image" + + def expand_image(self, reference_image, output_width, output_height, border_width, latent_image=None, reference_mask=None, latent_mask=None): + + if reference_mask is not None: + if torch.allclose(reference_mask, torch.zeros_like(reference_mask)): + print("Warning: The incoming mask is fully black. Handling it as None.") + reference_mask = None + image = reference_image + if latent_image is not None: + if image.shape[0] != latent_image.shape[0]: + image = image.repeat(latent_image.shape[0], 1, 1, 1) + B, H, W, C = image.size() + + # Handle mask + if reference_mask is not None: + resized_mask = torch.nn.functional.interpolate( + reference_mask.unsqueeze(1), + size=(H, W), + mode='nearest' + ).squeeze(1) + print(resized_mask.shape) + image = image * resized_mask.unsqueeze(-1) + + # Calculate new width maintaining aspect ratio + new_width = int((W / H) * output_height) + + # Resize image to new height while maintaining aspect ratio + resized_image = common_upscale(image.movedim(-1,1), new_width, output_height, "lanczos", "disabled").movedim(1,-1) + + # Create padded image + if latent_image is None: + pad_image = torch.zeros((B, output_height, output_width, C), device=image.device) + else: + resized_latent_image = common_upscale(latent_image.movedim(-1,1), output_width, output_height, "lanczos", "disabled").movedim(1,-1) + pad_image = resized_latent_image + if latent_mask is not None: + resized_latent_mask = torch.nn.functional.interpolate( + latent_mask.unsqueeze(1), + size=(pad_image.shape[1], pad_image.shape[2]), + mode='nearest' + ).squeeze(1) + + if border_width > 0: + border = torch.zeros((B, output_height, border_width, C), device=image.device) + padded_image = torch.cat((resized_image, border, pad_image), dim=2) + if latent_mask is not None: + padded_mask = torch.zeros((B, padded_image.shape[1], padded_image.shape[2]), device=image.device) + padded_mask[:, :, (new_width + border_width):] = resized_latent_mask + else: + padded_mask = torch.ones((B, padded_image.shape[1], padded_image.shape[2]), device=image.device) + padded_mask[:, :, :new_width + border_width] = 0 + else: + padded_image = torch.cat((resized_image, pad_image), dim=2) + if latent_mask is not None: + padded_mask = torch.zeros((B, padded_image.shape[1], padded_image.shape[2]), device=image.device) + padded_mask[:, :, new_width:] = resized_latent_mask + else: + padded_mask = torch.ones((B, padded_image.shape[1], padded_image.shape[2]), device=image.device) + padded_mask[:, :, :new_width] = 0 + + return (padded_image, padded_mask) + + +class ImageAndMaskPreview(SaveImage): + def __init__(self): + self.output_dir = folder_paths.get_temp_directory() + self.type = "temp" + self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5)) + self.compress_level = 4 + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "mask_opacity": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "mask_color": ("STRING", {"default": "255, 255, 255"}), + "pass_through": ("BOOLEAN", {"default": False}), + }, + "optional": { + "image": ("IMAGE",), + "mask": ("MASK",), + }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, + } + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("composite",) + FUNCTION = "execute" + CATEGORY = "KJNodes/masking" + DESCRIPTION = """ +Preview an image or a mask, when both inputs are used +composites the mask on top of the image. +with pass_through on the preview is disabled and the +composite is returned from the composite slot instead, +this allows for the preview to be passed for video combine +nodes for example. +""" + + def execute(self, mask_opacity, mask_color, pass_through, filename_prefix="ComfyUI", image=None, mask=None, prompt=None, extra_pnginfo=None): + if mask is not None and image is None: + preview = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3) + elif mask is None and image is not None: + preview = image + elif mask is not None and image is not None: + mask_adjusted = mask * mask_opacity + mask_image = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3).clone() + + if ',' in mask_color: + color_list = np.clip([int(channel) for channel in mask_color.split(',')], 0, 255) # RGB format + else: + mask_color = mask_color.lstrip('#') + color_list = [int(mask_color[i:i+2], 16) for i in (0, 2, 4)] # Hex format + mask_image[:, :, :, 0] = color_list[0] / 255 # Red channel + mask_image[:, :, :, 1] = color_list[1] / 255 # Green channel + mask_image[:, :, :, 2] = color_list[2] / 255 # Blue channel + + preview, = ImageCompositeMasked.composite(self, image, mask_image, 0, 0, True, mask_adjusted) + if pass_through: + return (preview, ) + return(self.save_images(preview, filename_prefix, prompt, extra_pnginfo)) + +def crossfade(images_1, images_2, alpha): + crossfade = (1 - alpha) * images_1 + alpha * images_2 + return crossfade +def ease_in(t): + return t * t +def ease_out(t): + return 1 - (1 - t) * (1 - t) +def ease_in_out(t): + return 3 * t * t - 2 * t * t * t +def bounce(t): + if t < 0.5: + return ease_out(t * 2) * 0.5 + else: + return ease_in((t - 0.5) * 2) * 0.5 + 0.5 +def elastic(t): + return math.sin(13 * math.pi / 2 * t) * math.pow(2, 10 * (t - 1)) +def glitchy(t): + return t + 0.1 * math.sin(40 * t) +def exponential_ease_out(t): + return 1 - (1 - t) ** 4 + +easing_functions = { + "linear": lambda t: t, + "ease_in": ease_in, + "ease_out": ease_out, + "ease_in_out": ease_in_out, + "bounce": bounce, + "elastic": elastic, + "glitchy": glitchy, + "exponential_ease_out": exponential_ease_out, +} + +class CrossFadeImages: + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "crossfadeimages" + CATEGORY = "KJNodes/image" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "images_1": ("IMAGE",), + "images_2": ("IMAGE",), + "interpolation": (["linear", "ease_in", "ease_out", "ease_in_out", "bounce", "elastic", "glitchy", "exponential_ease_out"],), + "transition_start_index": ("INT", {"default": 1,"min": -4096, "max": 4096, "step": 1}), + "transitioning_frames": ("INT", {"default": 1,"min": 0, "max": 4096, "step": 1}), + "start_level": ("FLOAT", {"default": 0.0,"min": 0.0, "max": 1.0, "step": 0.01}), + "end_level": ("FLOAT", {"default": 1.0,"min": 0.0, "max": 1.0, "step": 0.01}), + }, + } + + def crossfadeimages(self, images_1, images_2, transition_start_index, transitioning_frames, interpolation, start_level, end_level): + + crossfade_images = [] + + if transition_start_index < 0: + transition_start_index = len(images_1) + transition_start_index + if transition_start_index < 0: + raise ValueError("Transition start index is out of range for images_1.") + + transitioning_frames = min(transitioning_frames, len(images_1) - transition_start_index, len(images_2)) + + alphas = torch.linspace(start_level, end_level, transitioning_frames) + for i in range(transitioning_frames): + alpha = alphas[i] + image1 = images_1[transition_start_index + i] + image2 = images_2[i] + easing_function = easing_functions.get(interpolation) + alpha = easing_function(alpha) # Apply the easing function to the alpha value + + crossfade_image = crossfade(image1, image2, alpha) + crossfade_images.append(crossfade_image) + + # Convert crossfade_images to tensor + crossfade_images = torch.stack(crossfade_images, dim=0) + + # Append the beginning of images_1 (before the transition) + beginning_images_1 = images_1[:transition_start_index] + crossfade_images = torch.cat([beginning_images_1, crossfade_images], dim=0) + + # Append the remaining frames of images_2 (after the transition) + remaining_images_2 = images_2[transitioning_frames:] + if len(remaining_images_2) > 0: + crossfade_images = torch.cat([crossfade_images, remaining_images_2], dim=0) + + return (crossfade_images, ) + +class CrossFadeImagesMulti: + RETURN_TYPES = ("IMAGE",) + FUNCTION = "crossfadeimages" + CATEGORY = "KJNodes/image" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "inputcount": ("INT", {"default": 2, "min": 2, "max": 1000, "step": 1}), + "image_1": ("IMAGE",), + "interpolation": (["linear", "ease_in", "ease_out", "ease_in_out", "bounce", "elastic", "glitchy", "exponential_ease_out"],), + "transitioning_frames": ("INT", {"default": 1,"min": 0, "max": 4096, "step": 1}), + }, + "optional": { + "image_2": ("IMAGE",), + } + } + + def crossfadeimages(self, inputcount, transitioning_frames, interpolation, **kwargs): + + image_1 = kwargs["image_1"] + first_image_shape = image_1.shape + first_image_device = image_1.device + height = image_1.shape[1] + width = image_1.shape[2] + + easing_function = easing_functions[interpolation] + + for c in range(1, inputcount): + frames = [] + new_image = kwargs.get(f"image_{c + 1}", torch.zeros(first_image_shape)).to(first_image_device) + new_image_height = new_image.shape[1] + new_image_width = new_image.shape[2] + + if new_image_height != height or new_image_width != width: + new_image = common_upscale(new_image.movedim(-1, 1), width, height, "lanczos", "disabled") + new_image = new_image.movedim(1, -1) # Move channels back to the last dimension + + last_frame_image_1 = image_1[-1] + first_frame_image_2 = new_image[0] + + for frame in range(transitioning_frames): + t = frame / (transitioning_frames - 1) + alpha = easing_function(t) + alpha_tensor = torch.tensor(alpha, dtype=last_frame_image_1.dtype, device=last_frame_image_1.device) + frame_image = crossfade(last_frame_image_1, first_frame_image_2, alpha_tensor) + frames.append(frame_image) + + frames = torch.stack(frames) + image_1 = torch.cat((image_1, frames, new_image), dim=0) + + return image_1, + +def transition_images(images_1, images_2, alpha, transition_type, blur_radius, reverse): + width = images_1.shape[1] + height = images_1.shape[0] + + mask = torch.zeros_like(images_1, device=images_1.device) + + alpha = alpha.item() + if reverse: + alpha = 1 - alpha + + #transitions from matteo's essential nodes + if "horizontal slide" in transition_type: + pos = round(width * alpha) + mask[:, :pos, :] = 1.0 + elif "vertical slide" in transition_type: + pos = round(height * alpha) + mask[:pos, :, :] = 1.0 + elif "box" in transition_type: + box_w = round(width * alpha) + box_h = round(height * alpha) + x1 = (width - box_w) // 2 + y1 = (height - box_h) // 2 + x2 = x1 + box_w + y2 = y1 + box_h + mask[y1:y2, x1:x2, :] = 1.0 + elif "circle" in transition_type: + radius = math.ceil(math.sqrt(pow(width, 2) + pow(height, 2)) * alpha / 2) + c_x = width // 2 + c_y = height // 2 + x = torch.arange(0, width, dtype=torch.float32, device="cpu") + y = torch.arange(0, height, dtype=torch.float32, device="cpu") + y, x = torch.meshgrid((y, x), indexing="ij") + circle = ((x - c_x) ** 2 + (y - c_y) ** 2) <= (radius ** 2) + mask[circle] = 1.0 + elif "horizontal door" in transition_type: + bar = math.ceil(height * alpha / 2) + if bar > 0: + mask[:bar, :, :] = 1.0 + mask[-bar:,:, :] = 1.0 + elif "vertical door" in transition_type: + bar = math.ceil(width * alpha / 2) + if bar > 0: + mask[:, :bar,:] = 1.0 + mask[:, -bar:,:] = 1.0 + elif "fade" in transition_type: + mask[:, :, :] = alpha + + mask = gaussian_blur(mask, blur_radius) + + return images_1 * (1 - mask) + images_2 * mask + +def gaussian_blur(mask, blur_radius): + if blur_radius > 0: + kernel_size = int(blur_radius * 2) + 1 + if kernel_size % 2 == 0: + kernel_size += 1 # Ensure kernel size is odd + sigma = blur_radius / 3 + x = torch.arange(-kernel_size // 2 + 1, kernel_size // 2 + 1, dtype=torch.float32) + x = torch.exp(-0.5 * (x / sigma) ** 2) + kernel1d = x / x.sum() + kernel2d = kernel1d[:, None] * kernel1d[None, :] + kernel2d = kernel2d.to(mask.device) + kernel2d = kernel2d.expand(mask.shape[2], 1, kernel2d.shape[0], kernel2d.shape[1]) + mask = mask.permute(2, 0, 1).unsqueeze(0) # Change to [C, H, W] and add batch dimension + mask = F.conv2d(mask, kernel2d, padding=kernel_size // 2, groups=mask.shape[1]) + mask = mask.squeeze(0).permute(1, 2, 0) # Change back to [H, W, C] + return mask + +easing_functions = { + "linear": lambda t: t, + "ease_in": ease_in, + "ease_out": ease_out, + "ease_in_out": ease_in_out, + "bounce": bounce, + "elastic": elastic, + "glitchy": glitchy, + "exponential_ease_out": exponential_ease_out, +} + +class TransitionImagesMulti: + RETURN_TYPES = ("IMAGE",) + FUNCTION = "transition" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Creates transitions between images. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "inputcount": ("INT", {"default": 2, "min": 2, "max": 1000, "step": 1}), + "image_1": ("IMAGE",), + "interpolation": (["linear", "ease_in", "ease_out", "ease_in_out", "bounce", "elastic", "glitchy", "exponential_ease_out"],), + "transition_type": (["horizontal slide", "vertical slide", "box", "circle", "horizontal door", "vertical door", "fade"],), + "transitioning_frames": ("INT", {"default": 2,"min": 2, "max": 4096, "step": 1}), + "blur_radius": ("FLOAT", {"default": 0.0,"min": 0.0, "max": 100.0, "step": 0.1}), + "reverse": ("BOOLEAN", {"default": False}), + "device": (["CPU", "GPU"], {"default": "CPU"}), + }, + "optional": { + "image_2": ("IMAGE",), + } + } + + def transition(self, inputcount, transitioning_frames, transition_type, interpolation, device, blur_radius, reverse, **kwargs): + + gpu = model_management.get_torch_device() + + image_1 = kwargs["image_1"] + height = image_1.shape[1] + width = image_1.shape[2] + first_image_shape = image_1.shape + first_image_device = image_1.device + + easing_function = easing_functions[interpolation] + + for c in range(1, inputcount): + frames = [] + new_image = kwargs.get(f"image_{c + 1}", torch.zeros(first_image_shape)).to(first_image_device) + new_image_height = new_image.shape[1] + new_image_width = new_image.shape[2] + + if new_image_height != height or new_image_width != width: + new_image = common_upscale(new_image.movedim(-1, 1), width, height, "lanczos", "disabled") + new_image = new_image.movedim(1, -1) # Move channels back to the last dimension + + last_frame_image_1 = image_1[-1] + first_frame_image_2 = new_image[0] + if device == "GPU": + last_frame_image_1 = last_frame_image_1.to(gpu) + first_frame_image_2 = first_frame_image_2.to(gpu) + + if reverse: + last_frame_image_1, first_frame_image_2 = first_frame_image_2, last_frame_image_1 + + for frame in range(transitioning_frames): + t = frame / (transitioning_frames - 1) + alpha = easing_function(t) + alpha_tensor = torch.tensor(alpha, dtype=last_frame_image_1.dtype, device=last_frame_image_1.device) + frame_image = transition_images(last_frame_image_1, first_frame_image_2, alpha_tensor, transition_type, blur_radius, reverse) + frames.append(frame_image) + + frames = torch.stack(frames).cpu() + image_1 = torch.cat((image_1, frames, new_image), dim=0) + + return image_1.cpu(), + +class TransitionImagesInBatch: + RETURN_TYPES = ("IMAGE",) + FUNCTION = "transition" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Creates transitions between images in a batch. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "images": ("IMAGE",), + "interpolation": (["linear", "ease_in", "ease_out", "ease_in_out", "bounce", "elastic", "glitchy", "exponential_ease_out"],), + "transition_type": (["horizontal slide", "vertical slide", "box", "circle", "horizontal door", "vertical door", "fade"],), + "transitioning_frames": ("INT", {"default": 1,"min": 0, "max": 4096, "step": 1}), + "blur_radius": ("FLOAT", {"default": 0.0,"min": 0.0, "max": 100.0, "step": 0.1}), + "reverse": ("BOOLEAN", {"default": False}), + "device": (["CPU", "GPU"], {"default": "CPU"}), + }, + } + + #transitions from matteo's essential nodes + def transition(self, images, transitioning_frames, transition_type, interpolation, device, blur_radius, reverse): + if images.shape[0] == 1: + return images, + + gpu = model_management.get_torch_device() + + easing_function = easing_functions[interpolation] + + images_list = [] + pbar = ProgressBar(images.shape[0] - 1) + for i in range(images.shape[0] - 1): + frames = [] + image_1 = images[i] + image_2 = images[i + 1] + + if device == "GPU": + image_1 = image_1.to(gpu) + image_2 = image_2.to(gpu) + + if reverse: + image_1, image_2 = image_2, image_1 + + for frame in range(transitioning_frames): + t = frame / (transitioning_frames - 1) + alpha = easing_function(t) + alpha_tensor = torch.tensor(alpha, dtype=image_1.dtype, device=image_1.device) + frame_image = transition_images(image_1, image_2, alpha_tensor, transition_type, blur_radius, reverse) + frames.append(frame_image) + pbar.update(1) + + frames = torch.stack(frames).cpu() + images_list.append(frames) + images = torch.cat(images_list, dim=0) + + return images.cpu(), + +class ImageBatchJoinWithTransition: + RETURN_TYPES = ("IMAGE",) + FUNCTION = "transition_batches" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Transitions between two batches of images, starting at a specified index in the first batch. +During the transition, frames from both batches are blended frame-by-frame, so the video keeps playing. +""" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "images_1": ("IMAGE",), + "images_2": ("IMAGE",), + "start_index": ("INT", {"default": 0, "min": -10000, "max": 10000, "step": 1}), + "interpolation": (["linear", "ease_in", "ease_out", "ease_in_out", "bounce", "elastic", "glitchy", "exponential_ease_out"],), + "transition_type": (["horizontal slide", "vertical slide", "box", "circle", "horizontal door", "vertical door", "fade"],), + "transitioning_frames": ("INT", {"default": 1, "min": 1, "max": 4096, "step": 1}), + "blur_radius": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 100.0, "step": 0.1}), + "reverse": ("BOOLEAN", {"default": False}), + "device": (["CPU", "GPU"], {"default": "CPU"}), + }, + } + + def transition_batches(self, images_1, images_2, start_index, interpolation, transition_type, transitioning_frames, blur_radius, reverse, device): + if images_1.shape[0] == 0 or images_2.shape[0] == 0: + raise ValueError("Both input batches must have at least one image.") + + if start_index < 0: + start_index = images_1.shape[0] + start_index + if start_index < 0 or start_index > images_1.shape[0]: + raise ValueError("start_index is out of range.") + + gpu = model_management.get_torch_device() + easing_function = easing_functions[interpolation] + out_frames = [] + + # Add images from images_1 up to start_index + if start_index > 0: + out_frames.append(images_1[:start_index]) + + # Determine how many frames we can blend + max_transition = min(transitioning_frames, images_1.shape[0] - start_index, images_2.shape[0]) + + # Blend corresponding frames from both batches + for i in range(max_transition): + img1 = images_1[start_index + i] + img2 = images_2[i] + if device == "GPU": + img1 = img1.to(gpu) + img2 = img2.to(gpu) + if reverse: + img1, img2 = img2, img1 + t = i / (max_transition - 1) if max_transition > 1 else 1.0 + alpha = easing_function(t) + alpha_tensor = torch.tensor(alpha, dtype=img1.dtype, device=img1.device) + frame_image = transition_images(img1, img2, alpha_tensor, transition_type, blur_radius, reverse) + out_frames.append(frame_image.cpu().unsqueeze(0)) + + # Add remaining images from images_2 after transition + if images_2.shape[0] > max_transition: + out_frames.append(images_2[max_transition:]) + + # Concatenate all frames + out = torch.cat(out_frames, dim=0) + return (out.cpu(),) + +class ShuffleImageBatch: + RETURN_TYPES = ("IMAGE",) + FUNCTION = "shuffle" + CATEGORY = "KJNodes/image" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "images": ("IMAGE",), + "seed": ("INT", {"default": 123,"min": 0, "max": 0xffffffffffffffff, "step": 1}), + }, + } + + def shuffle(self, images, seed): + torch.manual_seed(seed) + B, H, W, C = images.shape + indices = torch.randperm(B) + shuffled_images = images[indices] + + return shuffled_images, + +class GetImageRangeFromBatch: + + RETURN_TYPES = ("IMAGE", "MASK", ) + FUNCTION = "imagesfrombatch" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Returns a range of images from a batch. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "start_index": ("INT", {"default": 0,"min": -1, "max": 4096, "step": 1}), + "num_frames": ("INT", {"default": 1,"min": 1, "max": 4096, "step": 1}), + }, + "optional": { + "images": ("IMAGE",), + "masks": ("MASK",), + } + } + + def imagesfrombatch(self, start_index, num_frames, images=None, masks=None): + chosen_images = None + chosen_masks = None + + # Process images if provided + if images is not None: + if start_index == -1: + start_index = max(0, len(images) - num_frames) + if start_index < 0 or start_index >= len(images): + raise ValueError("Start index is out of range") + end_index = min(start_index + num_frames, len(images)) + chosen_images = images[start_index:end_index] + + # Process masks if provided + if masks is not None: + if start_index == -1: + start_index = max(0, len(masks) - num_frames) + if start_index < 0 or start_index >= len(masks): + raise ValueError("Start index is out of range for masks") + end_index = min(start_index + num_frames, len(masks)) + chosen_masks = masks[start_index:end_index] + + return (chosen_images, chosen_masks,) + +class GetLatentRangeFromBatch: + + RETURN_TYPES = ("LATENT", ) + FUNCTION = "latentsfrombatch" + CATEGORY = "KJNodes/latents" + DESCRIPTION = """ +Returns a range of latents from a batch. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "latents": ("LATENT",), + "start_index": ("INT", {"default": 0,"min": -1, "max": 4096, "step": 1}), + "num_frames": ("INT", {"default": 1,"min": -1, "max": 4096, "step": 1}), + }, + } + + def latentsfrombatch(self, latents, start_index, num_frames): + chosen_latents = None + samples = latents["samples"] + if len(samples.shape) == 4: + B, C, H, W = samples.shape + num_latents = B + elif len(samples.shape) == 5: + B, C, T, H, W = samples.shape + num_latents = T + + if start_index == -1: + start_index = max(0, num_latents - num_frames) + if start_index < 0 or start_index >= num_latents: + raise ValueError("Start index is out of range") + + end_index = num_latents if num_frames == -1 else min(start_index + num_frames, num_latents) + + if len(samples.shape) == 4: + chosen_latents = samples[start_index:end_index] + elif len(samples.shape) == 5: + chosen_latents = samples[:, :, start_index:end_index] + + return ({"samples": chosen_latents.contiguous(),},) + +class InsertLatentToIndex: + + RETURN_TYPES = ("LATENT", ) + FUNCTION = "insert" + CATEGORY = "KJNodes/latents" + DESCRIPTION = """ +Inserts a latent at the specified index into the original latent batch. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "source": ("LATENT",), + "destination": ("LATENT",), + "index": ("INT", {"default": 0,"min": -1, "max": 4096, "step": 1}), + }, + } + + def insert(self, source, destination, index): + samples_destination = destination["samples"] + samples_source = source["samples"].to(samples_destination) + + if len(samples_source.shape) == 4: + B, C, H, W = samples_source.shape + num_latents = B + elif len(samples_source.shape) == 5: + B, C, T, H, W = samples_source.shape + num_latents = T + + if index >= num_latents or index < 0: + raise ValueError(f"Index {index} out of bounds for tensor with {num_latents} latents") + + if len(samples_source.shape) == 4: + joined_latents = torch.cat([ + samples_destination[:index], + samples_source, + samples_destination[index+1:] + ], dim=0) + else: + joined_latents = torch.cat([ + samples_destination[:, :, :index], + samples_source, + samples_destination[:, :, index+1:] + ], dim=2) + + return ({"samples": joined_latents,},) + +class ImageBatchFilter: + + RETURN_TYPES = ("IMAGE", "STRING",) + RETURN_NAMES = ("images", "removed_indices",) + FUNCTION = "filter" + CATEGORY = "KJNodes/image" + DESCRIPTION = "Removes empty images from a batch" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "images": ("IMAGE",), + "empty_color": ("STRING", {"default": "0, 0, 0"}), + "empty_threshold": ("FLOAT", {"default": 0.01,"min": 0.0, "max": 1.0, "step": 0.01}), + }, + "optional": { + "replacement_image": ("IMAGE",), + } + } + + def filter(self, images, empty_color, empty_threshold, replacement_image=None): + B, H, W, C = images.shape + + input_images = images.clone() + + empty_color_list = [int(color.strip()) for color in empty_color.split(',')] + empty_color_tensor = torch.tensor(empty_color_list, dtype=torch.float32).to(input_images.device) + + color_diff = torch.abs(input_images - empty_color_tensor) + mean_diff = color_diff.mean(dim=(1, 2, 3)) + + empty_indices = mean_diff <= empty_threshold + empty_indices_string = ', '.join([str(i) for i in range(B) if empty_indices[i]]) + + if replacement_image is not None: + B_rep, H_rep, W_rep, C_rep = replacement_image.shape + replacement = replacement_image.clone() + if (H_rep != images.shape[1]) or (W_rep != images.shape[2]) or (C_rep != images.shape[3]): + replacement = common_upscale(replacement.movedim(-1, 1), W, H, "lanczos", "center").movedim(1, -1) + input_images[empty_indices] = replacement[0] + + return (input_images, empty_indices_string,) + else: + non_empty_images = input_images[~empty_indices] + return (non_empty_images, empty_indices_string,) + +class GetImagesFromBatchIndexed: + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "indexedimagesfrombatch" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Selects and returns the images at the specified indices as an image batch. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "images": ("IMAGE",), + "indexes": ("STRING", {"default": "0, 1, 2", "multiline": True}), + }, + } + + def indexedimagesfrombatch(self, images, indexes): + + # Parse the indexes string into a list of integers + index_list = [int(index.strip()) for index in indexes.split(',')] + + # Convert list of indices to a PyTorch tensor + indices_tensor = torch.tensor(index_list, dtype=torch.long) + + # Select the images at the specified indices + chosen_images = images[indices_tensor] + + return (chosen_images,) + +class InsertImagesToBatchIndexed: + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "insertimagesfrombatch" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Inserts images at the specified indices into the original image batch. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "original_images": ("IMAGE",), + "images_to_insert": ("IMAGE",), + "indexes": ("STRING", {"default": "0, 1, 2", "multiline": True}), + }, + "optional": { + "mode": (["replace", "insert"],), + } + } + + def insertimagesfrombatch(self, original_images, images_to_insert, indexes, mode="replace"): + if indexes == "": + return (original_images,) + + input_images = original_images.clone() + + # Parse the indexes string into a list of integers + index_list = [int(index.strip()) for index in indexes.split(',')] + + # Convert list of indices to a PyTorch tensor + indices_tensor = torch.tensor(index_list, dtype=torch.long) + + # Ensure the images_to_insert is a tensor + if not isinstance(images_to_insert, torch.Tensor): + images_to_insert = torch.tensor(images_to_insert) + + if mode == "replace": + # Replace the images at the specified indices + for index, image in zip(indices_tensor, images_to_insert): + input_images[index] = image + else: + # Create a list to hold the new image sequence + new_images = [] + insert_offset = 0 + + for i in range(len(input_images) + len(indices_tensor)): + if insert_offset < len(indices_tensor) and i == indices_tensor[insert_offset]: + # Use modulo to cycle through images_to_insert + new_images.append(images_to_insert[insert_offset % len(images_to_insert)]) + insert_offset += 1 + else: + new_images.append(input_images[i - insert_offset]) + + # Convert the list back to a tensor + input_images = torch.stack(new_images, dim=0) + + return (input_images,) + +class PadImageBatchInterleaved: + + RETURN_TYPES = ("IMAGE", "MASK",) + RETURN_NAMES = ("images", "masks",) + FUNCTION = "pad" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Inserts empty frames between the images in a batch. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "images": ("IMAGE",), + "empty_frames_per_image": ("INT", {"default": 1,"min": 0, "max": 4096, "step": 1}), + "pad_frame_value": ("FLOAT", {"default": 0.0,"min": 0.0, "max": 1.0, "step": 0.01}), + "add_after_last": ("BOOLEAN", {"default": False}), + }, + } + + def pad(self, images, empty_frames_per_image, pad_frame_value, add_after_last): + B, H, W, C = images.shape + + # Handle single frame case specifically + if B == 1: + total_frames = 1 + empty_frames_per_image if add_after_last else 1 + else: + # Original B images + (B-1) sets of empty frames between them + total_frames = B + (B-1) * empty_frames_per_image + # Add additional empty frames after the last image if requested + if add_after_last: + total_frames += empty_frames_per_image + + # Create new tensor with zeros (empty frames) + padded_batch = torch.ones((total_frames, H, W, C), + dtype=images.dtype, + device=images.device) * pad_frame_value + # Create mask tensor (1 for original frames, 0 for empty frames) + mask = torch.zeros((total_frames, H, W), + dtype=images.dtype, + device=images.device) + + # Fill in original images at their new positions + for i in range(B): + if B == 1: + # For single frame, just place it at the beginning + new_pos = 0 + else: + # Each image is separated by empty_frames_per_image blank frames + new_pos = i * (empty_frames_per_image + 1) + + padded_batch[new_pos] = images[i] + mask[new_pos] = 1.0 # Mark this as an original frame + + return (padded_batch, mask) + +class ReplaceImagesInBatch: + + RETURN_TYPES = ("IMAGE", "MASK",) + FUNCTION = "replace" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Replaces the images in a batch, starting from the specified start index, +with the replacement images. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "start_index": ("INT", {"default": 1,"min": 0, "max": 4096, "step": 1}), + }, + "optional": { + "original_images": ("IMAGE",), + "replacement_images": ("IMAGE",), + "original_masks": ("MASK",), + "replacement_masks": ("MASK",), + } + } + + def replace(self, original_images=None, replacement_images=None, start_index=1, original_masks=None, replacement_masks=None): + images = None + masks = None + + if original_images is not None and replacement_images is not None: + if start_index >= len(original_images): + raise ValueError("ReplaceImagesInBatch: Start index is out of range") + end_index = start_index + len(replacement_images) + if end_index > len(original_images): + raise ValueError("ReplaceImagesInBatch: End index is out of range") + + original_images_copy = original_images.clone() + if original_images_copy.shape[2] != replacement_images.shape[2] or original_images_copy.shape[3] != replacement_images.shape[3]: + replacement_images = common_upscale(replacement_images.movedim(-1, 1), original_images_copy.shape[1], original_images_copy.shape[2], "lanczos", "center").movedim(1, -1) + + original_images_copy[start_index:end_index] = replacement_images + images = original_images_copy + else: + images = torch.zeros((1, 64, 64, 3)) + + if original_masks is not None and replacement_masks is not None: + if start_index >= len(original_masks): + raise ValueError("ReplaceImagesInBatch: Start index is out of range") + end_index = start_index + len(replacement_masks) + if end_index > len(original_masks): + raise ValueError("ReplaceImagesInBatch: End index is out of range") + + original_masks_copy = original_masks.clone() + if original_masks_copy.shape[1] != replacement_masks.shape[1] or original_masks_copy.shape[2] != replacement_masks.shape[2]: + replacement_masks = common_upscale(replacement_masks.unsqueeze(1), original_masks_copy.shape[1], original_masks_copy.shape[2], "nearest-exact", "center").squeeze(0) + + original_masks_copy[start_index:end_index] = replacement_masks + masks = original_masks_copy + else: + masks = torch.zeros((1, 64, 64)) + + return (images, masks) + + +class ReverseImageBatch: + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "reverseimagebatch" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Reverses the order of the images in a batch. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "images": ("IMAGE",), + }, + } + + def reverseimagebatch(self, images): + reversed_images = torch.flip(images, [0]) + return (reversed_images, ) + +class ImageBatchMulti: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "inputcount": ("INT", {"default": 2, "min": 2, "max": 1000, "step": 1}), + "image_1": ("IMAGE", ), + + }, + "optional": { + "image_2": ("IMAGE", ), + } + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("images",) + FUNCTION = "combine" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Creates an image batch from multiple images. +You can set how many inputs the node has, +with the **inputcount** and clicking update. +""" + + def combine(self, inputcount, **kwargs): + from nodes import ImageBatch + image_batch_node = ImageBatch() + image = kwargs["image_1"].cpu() + first_image_shape = image.shape + for c in range(1, inputcount): + new_image = kwargs.get(f"image_{c + 1}", torch.zeros(first_image_shape)).cpu() + image, = image_batch_node.batch(image, new_image) + return (image,) + + +class ImageTensorList: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image1": ("IMAGE",), + "image2": ("IMAGE",), + }} + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "append" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Creates an image list from the input images. +""" + + def append(self, image1, image2): + image_list = [] + if isinstance(image1, torch.Tensor) and isinstance(image2, torch.Tensor): + image_list = [image1, image2] + elif isinstance(image1, list) and isinstance(image2, torch.Tensor): + image_list = image1 + [image2] + elif isinstance(image1, torch.Tensor) and isinstance(image2, list): + image_list = [image1] + image2 + elif isinstance(image1, list) and isinstance(image2, list): + image_list = image1 + image2 + return image_list, + +class ImageAddMulti: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "inputcount": ("INT", {"default": 2, "min": 2, "max": 1000, "step": 1}), + "image_1": ("IMAGE", ), + "image_2": ("IMAGE", ), + "blending": ( + [ 'add', + 'subtract', + 'multiply', + 'difference', + ], + { + "default": 'add' + }), + "blend_amount": ("FLOAT", {"default": 0.5, "min": 0, "max": 1, "step": 0.01}), + }, + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("images",) + FUNCTION = "add" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Add blends multiple images together. +You can set how many inputs the node has, +with the **inputcount** and clicking update. +""" + + def add(self, inputcount, blending, blend_amount, **kwargs): + image = kwargs["image_1"] + for c in range(1, inputcount): + new_image = kwargs[f"image_{c + 1}"] + if blending == "add": + image = torch.add(image * blend_amount, new_image * blend_amount) + elif blending == "subtract": + image = torch.sub(image * blend_amount, new_image * blend_amount) + elif blending == "multiply": + image = torch.mul(image * blend_amount, new_image * blend_amount) + elif blending == "difference": + image = torch.sub(image, new_image) + return (image,) + +class ImageConcatMulti: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "inputcount": ("INT", {"default": 2, "min": 2, "max": 1000, "step": 1}), + "image_1": ("IMAGE", ), + + "direction": ( + [ 'right', + 'down', + 'left', + 'up', + ], + { + "default": 'right' + }), + "match_image_size": ("BOOLEAN", {"default": False}), + }, + "optional": { + "image_2": ("IMAGE", ), + }, + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("images",) + FUNCTION = "combine" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Creates an image from multiple images. +You can set how many inputs the node has, +with the **inputcount** and clicking update. +""" + + def combine(self, inputcount, direction, match_image_size, **kwargs): + image = kwargs["image_1"] + first_image_shape = None + if first_image_shape is None: + first_image_shape = image.shape + for c in range(1, inputcount): + new_image = kwargs.get(f"image_{c + 1}", torch.zeros(first_image_shape)) + image, = ImageConcanate.concatenate(self, image, new_image, direction, match_image_size, first_image_shape=first_image_shape) + first_image_shape = None + return (image,) + +class PreviewAnimation: + def __init__(self): + self.output_dir = folder_paths.get_temp_directory() + self.type = "temp" + self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5)) + self.compress_level = 1 + + methods = {"default": 4, "fastest": 0, "slowest": 6} + @classmethod + def INPUT_TYPES(s): + return {"required": + { + "fps": ("FLOAT", {"default": 8.0, "min": 0.01, "max": 1000.0, "step": 0.01}), + }, + "optional": { + "images": ("IMAGE", ), + "masks": ("MASK", ), + }, + } + + RETURN_TYPES = () + FUNCTION = "preview" + OUTPUT_NODE = True + CATEGORY = "KJNodes/image" + + def preview(self, fps, images=None, masks=None): + filename_prefix = "AnimPreview" + full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir) + results = list() + + pil_images = [] + + if images is not None and masks is not None: + for image in images: + i = 255. * image.cpu().numpy() + img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8)) + pil_images.append(img) + for mask in masks: + if pil_images: + mask_np = mask.cpu().numpy() + mask_np = np.clip(mask_np * 255, 0, 255).astype(np.uint8) # Convert to values between 0 and 255 + mask_img = Image.fromarray(mask_np, mode='L') + img = pil_images.pop(0) # Remove and get the first image + img = img.convert("RGBA") # Convert base image to RGBA + + # Create a new RGBA image based on the grayscale mask + rgba_mask_img = Image.new("RGBA", img.size, (255, 255, 255, 255)) + rgba_mask_img.putalpha(mask_img) # Use the mask image as the alpha channel + + # Composite the RGBA mask onto the base image + composited_img = Image.alpha_composite(img, rgba_mask_img) + pil_images.append(composited_img) # Add the composited image back + + elif images is not None and masks is None: + for image in images: + i = 255. * image.cpu().numpy() + img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8)) + pil_images.append(img) + + elif masks is not None and images is None: + for mask in masks: + mask_np = 255. * mask.cpu().numpy() + mask_img = Image.fromarray(np.clip(mask_np, 0, 255).astype(np.uint8)) + pil_images.append(mask_img) + else: + print("PreviewAnimation: No images or masks provided") + return { "ui": { "images": results, "animated": (None,), "text": "empty" }} + + num_frames = len(pil_images) + + c = len(pil_images) + for i in range(0, c, num_frames): + file = f"{filename}_{counter:05}_.webp" + pil_images[i].save(os.path.join(full_output_folder, file), save_all=True, duration=int(1000.0/fps), append_images=pil_images[i + 1:i + num_frames], lossless=False, quality=50, method=0) + results.append({ + "filename": file, + "subfolder": subfolder, + "type": self.type + }) + counter += 1 + + animated = num_frames != 1 + return { "ui": { "images": results, "animated": (animated,), "text": [f"{num_frames}x{pil_images[0].size[0]}x{pil_images[0].size[1]}"] } } + +class ImageResizeKJ: + upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"] + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "width": ("INT", { "default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 1, }), + "height": ("INT", { "default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 1, }), + "upscale_method": (s.upscale_methods,), + "keep_proportion": ("BOOLEAN", { "default": False }), + "divisible_by": ("INT", { "default": 2, "min": 0, "max": 512, "step": 1, }), + }, + "optional" : { + #"width_input": ("INT", { "forceInput": True}), + #"height_input": ("INT", { "forceInput": True}), + "get_image_size": ("IMAGE",), + "crop": (["disabled","center", 0], { "tooltip": "0 will do the default center crop, this is a workaround for the widget order changing with the new frontend, as in old workflows the value of this widget becomes 0 automatically" }), + } + } + + RETURN_TYPES = ("IMAGE", "INT", "INT",) + RETURN_NAMES = ("IMAGE", "width", "height",) + FUNCTION = "resize" + CATEGORY = "KJNodes/image" + DEPRECATED = True + DESCRIPTION = """ +DEPRECATED! + +Due to ComfyUI frontend changes, this node should no longer be used, please check the +v2 of the node. This node is only kept to not completely break older workflows. + +""" + + def resize(self, image, width, height, keep_proportion, upscale_method, divisible_by, + width_input=None, height_input=None, get_image_size=None, crop="disabled"): + B, H, W, C = image.shape + + if width_input: + width = width_input + if height_input: + height = height_input + if get_image_size is not None: + _, height, width, _ = get_image_size.shape + + if keep_proportion and get_image_size is None: + # If one of the dimensions is zero, calculate it to maintain the aspect ratio + if width == 0 and height != 0: + ratio = height / H + width = round(W * ratio) + elif height == 0 and width != 0: + ratio = width / W + height = round(H * ratio) + elif width != 0 and height != 0: + # Scale based on which dimension is smaller in proportion to the desired dimensions + ratio = min(width / W, height / H) + width = round(W * ratio) + height = round(H * ratio) + else: + if width == 0: + width = W + if height == 0: + height = H + + if divisible_by > 1 and get_image_size is None: + width = width - (width % divisible_by) + height = height - (height % divisible_by) + + if crop == 0: #workaround for old workflows + crop = "center" + + image = image.movedim(-1,1) + image = common_upscale(image, width, height, upscale_method, crop) + image = image.movedim(1,-1) + + return(image, image.shape[2], image.shape[1],) + +class ImageResizeKJv2: + upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"] + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "width": ("INT", { "default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 1, }), + "height": ("INT", { "default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 1, }), + "upscale_method": (s.upscale_methods,), + "keep_proportion": (["stretch", "resize", "pad", "pad_edge", "crop"], { "default": False }), + "pad_color": ("STRING", { "default": "0, 0, 0", "tooltip": "Color to use for padding."}), + "crop_position": (["center", "top", "bottom", "left", "right"], { "default": "center" }), + "divisible_by": ("INT", { "default": 2, "min": 0, "max": 512, "step": 1, }), + }, + "optional" : { + "mask": ("MASK",), + "device": (["cpu", "gpu"],), + }, + "hidden": { + "unique_id": "UNIQUE_ID", + }, + } + + RETURN_TYPES = ("IMAGE", "INT", "INT", "MASK",) + RETURN_NAMES = ("IMAGE", "width", "height", "mask",) + FUNCTION = "resize" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Resizes the image to the specified width and height. +Size can be retrieved from the input. + +Keep proportions keeps the aspect ratio of the image, by +highest dimension. +""" + + def resize(self, image, width, height, keep_proportion, upscale_method, divisible_by, pad_color, crop_position, unique_id, device="cpu", mask=None): + B, H, W, C = image.shape + + if device == "gpu": + if upscale_method == "lanczos": + raise Exception("Lanczos is not supported on the GPU") + device = model_management.get_torch_device() + else: + device = torch.device("cpu") + + if width == 0: + width = W + if height == 0: + height = H + + if keep_proportion == "resize" or keep_proportion.startswith("pad"): + # If one of the dimensions is zero, calculate it to maintain the aspect ratio + if width == 0 and height != 0: + ratio = height / H + new_width = round(W * ratio) + elif height == 0 and width != 0: + ratio = width / W + new_height = round(H * ratio) + elif width != 0 and height != 0: + # Scale based on which dimension is smaller in proportion to the desired dimensions + ratio = min(width / W, height / H) + new_width = round(W * ratio) + new_height = round(H * ratio) + + if keep_proportion.startswith("pad"): + # Calculate padding based on position + if crop_position == "center": + pad_left = (width - new_width) // 2 + pad_right = width - new_width - pad_left + pad_top = (height - new_height) // 2 + pad_bottom = height - new_height - pad_top + elif crop_position == "top": + pad_left = (width - new_width) // 2 + pad_right = width - new_width - pad_left + pad_top = 0 + pad_bottom = height - new_height + elif crop_position == "bottom": + pad_left = (width - new_width) // 2 + pad_right = width - new_width - pad_left + pad_top = height - new_height + pad_bottom = 0 + elif crop_position == "left": + pad_left = 0 + pad_right = width - new_width + pad_top = (height - new_height) // 2 + pad_bottom = height - new_height - pad_top + elif crop_position == "right": + pad_left = width - new_width + pad_right = 0 + pad_top = (height - new_height) // 2 + pad_bottom = height - new_height - pad_top + + width = new_width + height = new_height + + if divisible_by > 1: + width = width - (width % divisible_by) + height = height - (height % divisible_by) + + out_image = image.clone().to(device) + + if mask is not None: + out_mask = mask.clone().to(device) + else: + out_mask = None + + if keep_proportion == "crop": + old_width = W + old_height = H + old_aspect = old_width / old_height + new_aspect = width / height + + # Calculate dimensions to keep + if old_aspect > new_aspect: # Image is wider than target + crop_w = round(old_height * new_aspect) + crop_h = old_height + else: # Image is taller than target + crop_w = old_width + crop_h = round(old_width / new_aspect) + + # Calculate crop position + if crop_position == "center": + x = (old_width - crop_w) // 2 + y = (old_height - crop_h) // 2 + elif crop_position == "top": + x = (old_width - crop_w) // 2 + y = 0 + elif crop_position == "bottom": + x = (old_width - crop_w) // 2 + y = old_height - crop_h + elif crop_position == "left": + x = 0 + y = (old_height - crop_h) // 2 + elif crop_position == "right": + x = old_width - crop_w + y = (old_height - crop_h) // 2 + + # Apply crop + out_image = out_image.narrow(-2, x, crop_w).narrow(-3, y, crop_h) + if mask is not None: + out_mask = out_mask.narrow(-1, x, crop_w).narrow(-2, y, crop_h) + + out_image = common_upscale(out_image.movedim(-1,1), width, height, upscale_method, crop="disabled").movedim(1,-1) + + if mask is not None: + if upscale_method == "lanczos": + out_mask = common_upscale(out_mask.unsqueeze(1).repeat(1, 3, 1, 1), width, height, upscale_method, crop="disabled").movedim(1,-1)[:, :, :, 0] + else: + out_mask = common_upscale(out_mask.unsqueeze(1), width, height, upscale_method, crop="disabled").squeeze(1) + + if keep_proportion.startswith("pad"): + if pad_left > 0 or pad_right > 0 or pad_top > 0 or pad_bottom > 0: + padded_width = width + pad_left + pad_right + padded_height = height + pad_top + pad_bottom + if divisible_by > 1: + width_remainder = padded_width % divisible_by + height_remainder = padded_height % divisible_by + if width_remainder > 0: + extra_width = divisible_by - width_remainder + pad_right += extra_width + if height_remainder > 0: + extra_height = divisible_by - height_remainder + pad_bottom += extra_height + out_image, _ = ImagePadKJ.pad(self, out_image, pad_left, pad_right, pad_top, pad_bottom, 0, pad_color, "edge" if keep_proportion == "pad_edge" else "color") + if mask is not None: + out_mask = out_mask.unsqueeze(1).repeat(1, 3, 1, 1).movedim(1,-1) + out_mask, _ = ImagePadKJ.pad(self, out_mask, pad_left, pad_right, pad_top, pad_bottom, 0, pad_color, "edge" if keep_proportion == "pad_edge" else "color") + out_mask = out_mask[:, :, :, 0] + else: + B, H_pad, W_pad, _ = out_image.shape + out_mask = torch.ones((B, H_pad, W_pad), dtype=out_image.dtype, device=out_image.device) + out_mask[:, pad_top:pad_top+height, pad_left:pad_left+width] = 0.0 + + + if unique_id and PromptServer is not None: + try: + num_elements = out_image.numel() + element_size = out_image.element_size() + memory_size_mb = (num_elements * element_size) / (1024 * 1024) + + PromptServer.instance.send_progress_text( + f"Output: {out_image.shape[0]} x {out_image.shape[2]} x {out_image.shape[1]} | {memory_size_mb:.2f}MB", + unique_id + ) + except: + pass + + return(out_image.cpu(), out_image.shape[2], out_image.shape[1], out_mask.cpu() if out_mask is not None else torch.zeros(64,64, device=torch.device("cpu"), dtype=torch.float32)) + +import pathlib +class LoadAndResizeImage: + _color_channels = ["alpha", "red", "green", "blue"] + @classmethod + def INPUT_TYPES(s): + input_dir = folder_paths.get_input_directory() + files = [f.name for f in pathlib.Path(input_dir).iterdir() if f.is_file()] + return {"required": + { + "image": (sorted(files), {"image_upload": True}), + "resize": ("BOOLEAN", { "default": False }), + "width": ("INT", { "default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8, }), + "height": ("INT", { "default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8, }), + "repeat": ("INT", { "default": 1, "min": 1, "max": 4096, "step": 1, }), + "keep_proportion": ("BOOLEAN", { "default": False }), + "divisible_by": ("INT", { "default": 2, "min": 0, "max": 512, "step": 1, }), + "mask_channel": (s._color_channels, {"tooltip": "Channel to use for the mask output"}), + "background_color": ("STRING", { "default": "", "tooltip": "Fills the alpha channel with the specified color."}), + }, + } + + CATEGORY = "KJNodes/image" + RETURN_TYPES = ("IMAGE", "MASK", "INT", "INT", "STRING",) + RETURN_NAMES = ("image", "mask", "width", "height","image_path",) + FUNCTION = "load_image" + + def load_image(self, image, resize, width, height, repeat, keep_proportion, divisible_by, mask_channel, background_color): + from PIL import ImageColor, Image, ImageOps, ImageSequence + import numpy as np + import torch + image_path = folder_paths.get_annotated_filepath(image) + + import node_helpers + img = node_helpers.pillow(Image.open, image_path) + + # Process the background_color + if background_color: + try: + # Try to parse as RGB tuple + bg_color_rgba = tuple(int(x.strip()) for x in background_color.split(',')) + except ValueError: + # If parsing fails, it might be a hex color or named color + if background_color.startswith('#') or background_color.lower() in ImageColor.colormap: + bg_color_rgba = ImageColor.getrgb(background_color) + else: + raise ValueError(f"Invalid background color: {background_color}") + + bg_color_rgba += (255,) # Add alpha channel + else: + bg_color_rgba = None # No background color specified + + output_images = [] + output_masks = [] + w, h = None, None + + excluded_formats = ['MPO'] + + W, H = img.size + if resize: + if keep_proportion: + ratio = min(width / W, height / H) + width = round(W * ratio) + height = round(H * ratio) + else: + if width == 0: + width = W + if height == 0: + height = H + + if divisible_by > 1: + width = width - (width % divisible_by) + height = height - (height % divisible_by) + else: + width, height = W, H + + for frame in ImageSequence.Iterator(img): + frame = node_helpers.pillow(ImageOps.exif_transpose, frame) + + if frame.mode == 'I': + frame = frame.point(lambda i: i * (1 / 255)) + + if frame.mode == 'P': + frame = frame.convert("RGBA") + elif 'A' in frame.getbands(): + frame = frame.convert("RGBA") + + # Extract alpha channel if it exists + if 'A' in frame.getbands() and bg_color_rgba: + alpha_mask = np.array(frame.getchannel('A')).astype(np.float32) / 255.0 + alpha_mask = 1. - torch.from_numpy(alpha_mask) + bg_image = Image.new("RGBA", frame.size, bg_color_rgba) + # Composite the frame onto the background + frame = Image.alpha_composite(bg_image, frame) + else: + alpha_mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") + + image = frame.convert("RGB") + + if len(output_images) == 0: + w = image.size[0] + h = image.size[1] + + if image.size[0] != w or image.size[1] != h: + continue + if resize: + image = image.resize((width, height), Image.Resampling.BILINEAR) + + image = np.array(image).astype(np.float32) / 255.0 + image = torch.from_numpy(image)[None,] + + c = mask_channel[0].upper() + if c in frame.getbands(): + if resize: + frame = frame.resize((width, height), Image.Resampling.BILINEAR) + mask = np.array(frame.getchannel(c)).astype(np.float32) / 255.0 + mask = torch.from_numpy(mask) + if c == 'A' and bg_color_rgba: + mask = alpha_mask + elif c == 'A': + mask = 1. - mask + else: + mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") + + output_images.append(image) + output_masks.append(mask.unsqueeze(0)) + + if len(output_images) > 1 and img.format not in excluded_formats: + output_image = torch.cat(output_images, dim=0) + output_mask = torch.cat(output_masks, dim=0) + else: + output_image = output_images[0] + output_mask = output_masks[0] + if repeat > 1: + output_image = output_image.repeat(repeat, 1, 1, 1) + output_mask = output_mask.repeat(repeat, 1, 1) + + return (output_image, output_mask, width, height, image_path) + + + # @classmethod + # def IS_CHANGED(s, image, **kwargs): + # image_path = folder_paths.get_annotated_filepath(image) + # m = hashlib.sha256() + # with open(image_path, 'rb') as f: + # m.update(f.read()) + # return m.digest().hex() + + @classmethod + def VALIDATE_INPUTS(s, image): + if not folder_paths.exists_annotated_filepath(image): + return "Invalid image file: {}".format(image) + + return True + +import hashlib +class LoadImagesFromFolderKJ: + # Dictionary to store folder hashes + folder_hashes = {} + + @classmethod + def IS_CHANGED(cls, folder, **kwargs): + if not os.path.isdir(folder): + return float("NaN") + + valid_extensions = ['.jpg', '.jpeg', '.png', '.webp', '.tga'] + include_subfolders = kwargs.get('include_subfolders', False) + + file_data = [] + if include_subfolders: + for root, _, files in os.walk(folder): + for file in files: + if any(file.lower().endswith(ext) for ext in valid_extensions): + path = os.path.join(root, file) + try: + mtime = os.path.getmtime(path) + file_data.append((path, mtime)) + except OSError: + pass + else: + for file in os.listdir(folder): + if any(file.lower().endswith(ext) for ext in valid_extensions): + path = os.path.join(folder, file) + try: + mtime = os.path.getmtime(path) + file_data.append((path, mtime)) + except OSError: + pass + + file_data.sort() + + combined_hash = hashlib.md5() + combined_hash.update(folder.encode('utf-8')) + combined_hash.update(str(len(file_data)).encode('utf-8')) + + for path, mtime in file_data: + combined_hash.update(f"{path}:{mtime}".encode('utf-8')) + + current_hash = combined_hash.hexdigest() + + old_hash = cls.folder_hashes.get(folder) + cls.folder_hashes[folder] = current_hash + + if old_hash == current_hash: + return old_hash + + return current_hash + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "folder": ("STRING", {"default": ""}), + "width": ("INT", {"default": 1024, "min": -1, "step": 1}), + "height": ("INT", {"default": 1024, "min": -1, "step": 1}), + "keep_aspect_ratio": (["crop", "pad", "stretch",],), + }, + "optional": { + "image_load_cap": ("INT", {"default": 0, "min": 0, "step": 1}), + "start_index": ("INT", {"default": 0, "min": 0, "step": 1}), + "include_subfolders": ("BOOLEAN", {"default": False}), + } + } + + RETURN_TYPES = ("IMAGE", "MASK", "INT", "STRING",) + RETURN_NAMES = ("image", "mask", "count", "image_path",) + FUNCTION = "load_images" + CATEGORY = "KJNodes/image" + DESCRIPTION = """Loads images from a folder into a batch, images are resized and loaded into a batch.""" + + def load_images(self, folder, width, height, image_load_cap, start_index, keep_aspect_ratio, include_subfolders=False): + if not os.path.isdir(folder): + raise FileNotFoundError(f"Folder '{folder} cannot be found.'") + + valid_extensions = ['.jpg', '.jpeg', '.png', '.webp', '.tga'] + image_paths = [] + if include_subfolders: + for root, _, files in os.walk(folder): + for file in files: + if any(file.lower().endswith(ext) for ext in valid_extensions): + image_paths.append(os.path.join(root, file)) + else: + for file in os.listdir(folder): + if any(file.lower().endswith(ext) for ext in valid_extensions): + image_paths.append(os.path.join(folder, file)) + + dir_files = sorted(image_paths) + + if len(dir_files) == 0: + raise FileNotFoundError(f"No files in directory '{folder}'.") + + # start at start_index + dir_files = dir_files[start_index:] + + images = [] + masks = [] + image_path_list = [] + + limit_images = False + if image_load_cap > 0: + limit_images = True + image_count = 0 + + for image_path in dir_files: + if os.path.isdir(image_path): + continue + if limit_images and image_count >= image_load_cap: + break + i = Image.open(image_path) + i = ImageOps.exif_transpose(i) + + # Resize image to maximum dimensions + if width == -1 and height == -1: + width = i.size[0] + height = i.size[1] + if i.size != (width, height): + i = self.resize_with_aspect_ratio(i, width, height, keep_aspect_ratio) + + + image = i.convert("RGB") + image = np.array(image).astype(np.float32) / 255.0 + image = torch.from_numpy(image)[None,] + + if 'A' in i.getbands(): + mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0 + mask = 1. - torch.from_numpy(mask) + if mask.shape != (height, width): + mask = torch.nn.functional.interpolate(mask.unsqueeze(0).unsqueeze(0), + size=(height, width), + mode='bilinear', + align_corners=False).squeeze() + else: + mask = torch.zeros((height, width), dtype=torch.float32, device="cpu") + + images.append(image) + masks.append(mask) + image_path_list.append(image_path) + image_count += 1 + + if len(images) == 1: + return (images[0], masks[0], 1, image_path_list) + + elif len(images) > 1: + image1 = images[0] + mask1 = masks[0].unsqueeze(0) + + for image2 in images[1:]: + image1 = torch.cat((image1, image2), dim=0) + + for mask2 in masks[1:]: + mask1 = torch.cat((mask1, mask2.unsqueeze(0)), dim=0) + + return (image1, mask1, len(images), image_path_list) + def resize_with_aspect_ratio(self, img, width, height, mode): + if mode == "stretch": + return img.resize((width, height), Image.Resampling.LANCZOS) + + img_width, img_height = img.size + aspect_ratio = img_width / img_height + target_ratio = width / height + + if mode == "crop": + # Calculate dimensions for center crop + if aspect_ratio > target_ratio: + # Image is wider - crop width + new_width = int(height * aspect_ratio) + img = img.resize((new_width, height), Image.Resampling.LANCZOS) + left = (new_width - width) // 2 + return img.crop((left, 0, left + width, height)) + else: + # Image is taller - crop height + new_height = int(width / aspect_ratio) + img = img.resize((width, new_height), Image.Resampling.LANCZOS) + top = (new_height - height) // 2 + return img.crop((0, top, width, top + height)) + + elif mode == "pad": + pad_color = self.get_edge_color(img) + # Calculate dimensions for padding + if aspect_ratio > target_ratio: + # Image is wider - pad height + new_height = int(width / aspect_ratio) + img = img.resize((width, new_height), Image.Resampling.LANCZOS) + padding = (height - new_height) // 2 + padded = Image.new('RGBA', (width, height), pad_color) + padded.paste(img, (0, padding)) + return padded + else: + # Image is taller - pad width + new_width = int(height * aspect_ratio) + img = img.resize((new_width, height), Image.Resampling.LANCZOS) + padding = (width - new_width) // 2 + padded = Image.new('RGBA', (width, height), pad_color) + padded.paste(img, (padding, 0)) + return padded + def get_edge_color(self, img): + from PIL import ImageStat + """Sample edges and return dominant color""" + width, height = img.size + img = img.convert('RGBA') + + # Create 1-pixel high/wide images from edges + top = img.crop((0, 0, width, 1)) + bottom = img.crop((0, height-1, width, height)) + left = img.crop((0, 0, 1, height)) + right = img.crop((width-1, 0, width, height)) + + # Combine edges into single image + edges = Image.new('RGBA', (width*2 + height*2, 1)) + edges.paste(top, (0, 0)) + edges.paste(bottom, (width, 0)) + edges.paste(left.resize((height, 1)), (width*2, 0)) + edges.paste(right.resize((height, 1)), (width*2 + height, 0)) + + # Get median color + stat = ImageStat.Stat(edges) + median = tuple(map(int, stat.median)) + return median + + +class ImageGridtoBatch: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE", ), + "columns": ("INT", {"default": 3, "min": 1, "max": 8, "tooltip": "The number of columns in the grid."}), + "rows": ("INT", {"default": 0, "min": 1, "max": 8, "tooltip": "The number of rows in the grid. Set to 0 for automatic calculation."}), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "decompose" + CATEGORY = "KJNodes/image" + DESCRIPTION = "Converts a grid of images to a batch of images." + + def decompose(self, image, columns, rows): + B, H, W, C = image.shape + print("input size: ", image.shape) + + # Calculate cell width, rounding down + cell_width = W // columns + + if rows == 0: + # If rows is 0, calculate number of full rows + rows = H // cell_height + else: + # If rows is specified, adjust cell_height + cell_height = H // rows + + # Crop the image to fit full cells + image = image[:, :rows*cell_height, :columns*cell_width, :] + + # Reshape and permute the image to get the grid + image = image.view(B, rows, cell_height, columns, cell_width, C) + image = image.permute(0, 1, 3, 2, 4, 5).contiguous() + image = image.view(B, rows * columns, cell_height, cell_width, C) + + # Reshape to the final batch tensor + img_tensor = image.view(-1, cell_height, cell_width, C) + + return (img_tensor,) + +class SaveImageKJ: + def __init__(self): + self.type = "output" + self.prefix_append = "" + self.compress_level = 4 + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "images": ("IMAGE", {"tooltip": "The images to save."}), + "filename_prefix": ("STRING", {"default": "ComfyUI", "tooltip": "The prefix for the file to save. This may include formatting information such as %date:yyyy-MM-dd% or %Empty Latent Image.width% to include values from nodes."}), + "output_folder": ("STRING", {"default": "output", "tooltip": "The folder to save the images to."}), + }, + "optional": { + "caption_file_extension": ("STRING", {"default": ".txt", "tooltip": "The extension for the caption file."}), + "caption": ("STRING", {"forceInput": True, "tooltip": "string to save as .txt file"}), + }, + "hidden": { + "prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO" + }, + } + + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("filename",) + FUNCTION = "save_images" + + OUTPUT_NODE = True + + CATEGORY = "KJNodes/image" + DESCRIPTION = "Saves the input images to your ComfyUI output directory." + + def save_images(self, images, output_folder, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None, caption=None, caption_file_extension=".txt"): + filename_prefix += self.prefix_append + + if os.path.isabs(output_folder): + if not os.path.exists(output_folder): + os.makedirs(output_folder, exist_ok=True) + full_output_folder = output_folder + _, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, output_folder, images[0].shape[1], images[0].shape[0]) + else: + self.output_dir = folder_paths.get_output_directory() + full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0]) + + results = list() + for (batch_number, image) in enumerate(images): + i = 255. * image.cpu().numpy() + img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8)) + metadata = None + if not args.disable_metadata: + metadata = PngInfo() + if prompt is not None: + metadata.add_text("prompt", json.dumps(prompt)) + if extra_pnginfo is not None: + for x in extra_pnginfo: + metadata.add_text(x, json.dumps(extra_pnginfo[x])) + + filename_with_batch_num = filename.replace("%batch_num%", str(batch_number)) + base_file_name = f"{filename_with_batch_num}_{counter:05}_" + file = f"{base_file_name}.png" + img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=self.compress_level) + results.append({ + "filename": file, + "subfolder": subfolder, + "type": self.type + }) + if caption is not None: + txt_file = base_file_name + caption_file_extension + file_path = os.path.join(full_output_folder, txt_file) + with open(file_path, 'w') as f: + f.write(caption) + + counter += 1 + + return file, + +class SaveStringKJ: + def __init__(self): + self.output_dir = folder_paths.get_output_directory() + self.type = "output" + self.prefix_append = "" + self.compress_level = 4 + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "string": ("STRING", {"forceInput": True, "tooltip": "string to save as .txt file"}), + "filename_prefix": ("STRING", {"default": "text", "tooltip": "The prefix for the file to save. This may include formatting information such as %date:yyyy-MM-dd% or %Empty Latent Image.width% to include values from nodes."}), + "output_folder": ("STRING", {"default": "output", "tooltip": "The folder to save the images to."}), + }, + "optional": { + "file_extension": ("STRING", {"default": ".txt", "tooltip": "The extension for the caption file."}), + }, + } + + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("filename",) + FUNCTION = "save_string" + + OUTPUT_NODE = True + + CATEGORY = "KJNodes/misc" + DESCRIPTION = "Saves the input string to your ComfyUI output directory." + + def save_string(self, string, output_folder, filename_prefix="text", file_extension=".txt"): + filename_prefix += self.prefix_append + + full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir) + if output_folder != "output": + if not os.path.exists(output_folder): + os.makedirs(output_folder, exist_ok=True) + full_output_folder = output_folder + + base_file_name = f"{filename_prefix}_{counter:05}_" + results = list() + + txt_file = base_file_name + file_extension + file_path = os.path.join(full_output_folder, txt_file) + with open(file_path, 'w') as f: + f.write(string) + + return results, + +to_pil_image = T.ToPILImage() + +class FastPreview: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE", ), + "format": (["JPEG", "PNG", "WEBP"], {"default": "JPEG"}), + "quality" : ("INT", {"default": 75, "min": 1, "max": 100, "step": 1}), + }, + } + + RETURN_TYPES = () + FUNCTION = "preview" + CATEGORY = "KJNodes/experimental" + OUTPUT_NODE = True + DESCRIPTION = "Experimental node for faster image previews by displaying through base64 it without saving to disk." + + def preview(self, image, format, quality): + pil_image = to_pil_image(image[0].permute(2, 0, 1)) + + with io.BytesIO() as buffered: + pil_image.save(buffered, format=format, quality=quality) + img_bytes = buffered.getvalue() + + img_base64 = base64.b64encode(img_bytes).decode('utf-8') + + return { + "ui": {"bg_image": [img_base64]}, + "result": () + } + +class ImageCropByMaskAndResize: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE", ), + "mask": ("MASK", ), + "base_resolution": ("INT", { "default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8, }), + "padding": ("INT", { "default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1, }), + "min_crop_resolution": ("INT", { "default": 128, "min": 0, "max": MAX_RESOLUTION, "step": 8, }), + "max_crop_resolution": ("INT", { "default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8, }), + + }, + } + + RETURN_TYPES = ("IMAGE", "MASK", "BBOX", ) + RETURN_NAMES = ("images", "masks", "bbox",) + FUNCTION = "crop" + CATEGORY = "KJNodes/image" + + def crop_by_mask(self, mask, padding=0, min_crop_resolution=None, max_crop_resolution=None): + iy, ix = (mask == 1).nonzero(as_tuple=True) + h0, w0 = mask.shape + + if iy.numel() == 0: + x_c = w0 / 2.0 + y_c = h0 / 2.0 + width = 0 + height = 0 + else: + x_min = ix.min().item() + x_max = ix.max().item() + y_min = iy.min().item() + y_max = iy.max().item() + + width = x_max - x_min + height = y_max - y_min + + if width > w0 or height > h0: + raise Exception("Masked area out of bounds") + + x_c = (x_min + x_max) / 2.0 + y_c = (y_min + y_max) / 2.0 + + if min_crop_resolution: + width = max(width, min_crop_resolution) + height = max(height, min_crop_resolution) + + if max_crop_resolution: + width = min(width, max_crop_resolution) + height = min(height, max_crop_resolution) + + if w0 <= width: + x0 = 0 + w = w0 + else: + x0 = max(0, x_c - width / 2 - padding) + w = width + 2 * padding + if x0 + w > w0: + x0 = w0 - w + + if h0 <= height: + y0 = 0 + h = h0 + else: + y0 = max(0, y_c - height / 2 - padding) + h = height + 2 * padding + if y0 + h > h0: + y0 = h0 - h + + return (int(x0), int(y0), int(w), int(h)) + + def crop(self, image, mask, base_resolution, padding=0, min_crop_resolution=128, max_crop_resolution=512): + mask = mask.round() + image_list = [] + mask_list = [] + bbox_list = [] + + # First, collect all bounding boxes + bbox_params = [] + aspect_ratios = [] + for i in range(image.shape[0]): + x0, y0, w, h = self.crop_by_mask(mask[i], padding, min_crop_resolution, max_crop_resolution) + bbox_params.append((x0, y0, w, h)) + aspect_ratios.append(w / h) + + # Find maximum width and height + max_w = max([w for x0, y0, w, h in bbox_params]) + max_h = max([h for x0, y0, w, h in bbox_params]) + max_aspect_ratio = max(aspect_ratios) + + # Ensure dimensions are divisible by 16 + max_w = (max_w + 15) // 16 * 16 + max_h = (max_h + 15) // 16 * 16 + # Calculate common target dimensions + if max_aspect_ratio > 1: + target_width = base_resolution + target_height = int(base_resolution / max_aspect_ratio) + else: + target_height = base_resolution + target_width = int(base_resolution * max_aspect_ratio) + + for i in range(image.shape[0]): + x0, y0, w, h = bbox_params[i] + + # Adjust cropping to use maximum width and height + x_center = x0 + w / 2 + y_center = y0 + h / 2 + + x0_new = int(max(0, x_center - max_w / 2)) + y0_new = int(max(0, y_center - max_h / 2)) + x1_new = int(min(x0_new + max_w, image.shape[2])) + y1_new = int(min(y0_new + max_h, image.shape[1])) + x0_new = x1_new - max_w + y0_new = y1_new - max_h + + cropped_image = image[i][y0_new:y1_new, x0_new:x1_new, :] + cropped_mask = mask[i][y0_new:y1_new, x0_new:x1_new] + + # Ensure dimensions are divisible by 16 + target_width = (target_width + 15) // 16 * 16 + target_height = (target_height + 15) // 16 * 16 + + cropped_image = cropped_image.unsqueeze(0).movedim(-1, 1) # Move C to the second position (B, C, H, W) + cropped_image = common_upscale(cropped_image, target_width, target_height, "lanczos", "disabled") + cropped_image = cropped_image.movedim(1, -1).squeeze(0) + + cropped_mask = cropped_mask.unsqueeze(0).unsqueeze(0) + cropped_mask = common_upscale(cropped_mask, target_width, target_height, 'bilinear', "disabled") + cropped_mask = cropped_mask.squeeze(0).squeeze(0) + + image_list.append(cropped_image) + mask_list.append(cropped_mask) + bbox_list.append((x0_new, y0_new, x1_new, y1_new)) + + + return (torch.stack(image_list), torch.stack(mask_list), bbox_list) + +class ImageCropByMask: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE", ), + "mask": ("MASK", ), + }, + } + + RETURN_TYPES = ("IMAGE", ) + RETURN_NAMES = ("image", ) + FUNCTION = "crop" + CATEGORY = "KJNodes/image" + DESCRIPTION = "Crops the input images based on the provided mask." + + def crop(self, image, mask): + B, H, W, C = image.shape + mask = mask.round() + + # Find bounding box for each batch + crops = [] + + for b in range(B): + # Get coordinates of non-zero elements + rows = torch.any(mask[min(b, mask.shape[0]-1)] > 0, dim=1) + cols = torch.any(mask[min(b, mask.shape[0]-1)] > 0, dim=0) + + # Find boundaries + y_min, y_max = torch.where(rows)[0][[0, -1]] + x_min, x_max = torch.where(cols)[0][[0, -1]] + + # Crop image and mask + crop = image[b:b+1, y_min:y_max+1, x_min:x_max+1, :] + crops.append(crop) + + # Stack results back together + cropped_images = torch.cat(crops, dim=0) + + return (cropped_images, ) + + + +class ImageUncropByMask: + + @classmethod + def INPUT_TYPES(s): + return {"required": + { + "destination": ("IMAGE",), + "source": ("IMAGE",), + "mask": ("MASK",), + "bbox": ("BBOX",), + }, + } + + CATEGORY = "KJNodes/image" + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + FUNCTION = "uncrop" + + def uncrop(self, destination, source, mask, bbox=None): + + output_list = [] + + B, H, W, C = destination.shape + + for i in range(source.shape[0]): + x0, y0, x1, y1 = bbox[i] + bbox_height = y1 - y0 + bbox_width = x1 - x0 + + # Resize source image to match the bounding box dimensions + #resized_source = F.interpolate(source[i].unsqueeze(0).movedim(-1, 1), size=(bbox_height, bbox_width), mode='bilinear', align_corners=False) + resized_source = common_upscale(source[i].unsqueeze(0).movedim(-1, 1), bbox_width, bbox_height, "lanczos", "disabled") + resized_source = resized_source.movedim(1, -1).squeeze(0) + + # Resize mask to match the bounding box dimensions + resized_mask = common_upscale(mask[i].unsqueeze(0).unsqueeze(0), bbox_width, bbox_height, "bilinear", "disabled") + resized_mask = resized_mask.squeeze(0).squeeze(0) + + # Calculate padding values + pad_left = x0 + pad_right = W - x1 + pad_top = y0 + pad_bottom = H - y1 + + # Pad the resized source image and mask to fit the destination dimensions + padded_source = F.pad(resized_source, pad=(0, 0, pad_left, pad_right, pad_top, pad_bottom), mode='constant', value=0) + padded_mask = F.pad(resized_mask, pad=(pad_left, pad_right, pad_top, pad_bottom), mode='constant', value=0) + + # Ensure the padded mask has the correct shape + padded_mask = padded_mask.unsqueeze(2).expand(-1, -1, destination[i].shape[2]) + # Ensure the padded source has the correct shape + padded_source = padded_source.unsqueeze(2).expand(-1, -1, -1, destination[i].shape[2]).squeeze(2) + + # Combine the destination and padded source images using the mask + result = destination[i] * (1.0 - padded_mask) + padded_source * padded_mask + + output_list.append(result) + + + return (torch.stack(output_list),) + +class ImageCropByMaskBatch: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE", ), + "masks": ("MASK", ), + "width": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8, }), + "height": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8, }), + "padding": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 1, }), + "preserve_size": ("BOOLEAN", {"default": False}), + "bg_color": ("STRING", {"default": "0, 0, 0", "tooltip": "Color as RGB values in range 0-255, separated by commas."}), + } + } + + RETURN_TYPES = ("IMAGE", "MASK", ) + RETURN_NAMES = ("images", "masks",) + FUNCTION = "crop" + CATEGORY = "KJNodes/image" + DESCRIPTION = "Crops the input images based on the provided masks." + + def crop(self, image, masks, width, height, bg_color, padding, preserve_size): + B, H, W, C = image.shape + BM, HM, WM = masks.shape + mask_count = BM + if HM != H or WM != W: + masks = F.interpolate(masks.unsqueeze(1), size=(H, W), mode='nearest-exact').squeeze(1) + print(masks.shape) + output_images = [] + output_masks = [] + + bg_color = [int(x.strip())/255.0 for x in bg_color.split(",")] + + # For each mask + for i in range(mask_count): + curr_mask = masks[i] + + # Find bounds + y_indices, x_indices = torch.nonzero(curr_mask, as_tuple=True) + if len(y_indices) == 0 or len(x_indices) == 0: + continue + + # Get exact bounds with padding + min_y = max(0, y_indices.min().item() - padding) + max_y = min(H, y_indices.max().item() + 1 + padding) + min_x = max(0, x_indices.min().item() - padding) + max_x = min(W, x_indices.max().item() + 1 + padding) + + # Ensure mask has correct shape for multiplication + curr_mask = curr_mask.unsqueeze(-1).expand(-1, -1, C) + + # Crop image and mask together + cropped_img = image[0, min_y:max_y, min_x:max_x, :] + cropped_mask = curr_mask[min_y:max_y, min_x:max_x, :] + + crop_h, crop_w = cropped_img.shape[0:2] + new_w = crop_w + new_h = crop_h + + if not preserve_size or crop_w > width or crop_h > height: + scale = min(width/crop_w, height/crop_h) + new_w = int(crop_w * scale) + new_h = int(crop_h * scale) + + # Resize RGB + resized_img = common_upscale(cropped_img.permute(2,0,1).unsqueeze(0), new_w, new_h, "lanczos", "disabled").squeeze(0).permute(1,2,0) + resized_mask = torch.nn.functional.interpolate( + cropped_mask.permute(2,0,1).unsqueeze(0), + size=(new_h, new_w), + mode='nearest' + ).squeeze(0).permute(1,2,0) + else: + resized_img = cropped_img + resized_mask = cropped_mask + + # Create empty tensors + new_img = torch.zeros((height, width, 3), dtype=image.dtype) + new_mask = torch.zeros((height, width), dtype=image.dtype) + + # Pad both + pad_x = (width - new_w) // 2 + pad_y = (height - new_h) // 2 + new_img[pad_y:pad_y+new_h, pad_x:pad_x+new_w, :] = resized_img + if len(resized_mask.shape) == 3: + resized_mask = resized_mask[:,:,0] # Take first channel if 3D + new_mask[pad_y:pad_y+new_h, pad_x:pad_x+new_w] = resized_mask + + output_images.append(new_img) + output_masks.append(new_mask) + + if not output_images: + return (torch.zeros((0, height, width, 3), dtype=image.dtype),) + + out_rgb = torch.stack(output_images, dim=0) + out_masks = torch.stack(output_masks, dim=0) + + # Apply mask to RGB + mask_expanded = out_masks.unsqueeze(-1).expand(-1, -1, -1, 3) + background_color = torch.tensor(bg_color, dtype=torch.float32, device=image.device) + out_rgb = out_rgb * mask_expanded + background_color * (1 - mask_expanded) + + return (out_rgb, out_masks) + +class ImagePadKJ: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE", ), + "left": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1, }), + "right": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1, }), + "top": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1, }), + "bottom": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1, }), + "extra_padding": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1, }), + "pad_mode": (["edge", "color"],), + "color": ("STRING", {"default": "0, 0, 0", "tooltip": "Color as RGB values in range 0-255, separated by commas."}), + }, + "optional": { + "mask": ("MASK", ), + "target_width": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 1, "forceInput": True}), + "target_height": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 1, "forceInput": True}), + } + } + + RETURN_TYPES = ("IMAGE", "MASK", ) + RETURN_NAMES = ("images", "masks",) + FUNCTION = "pad" + CATEGORY = "KJNodes/image" + DESCRIPTION = "Pad the input image and optionally mask with the specified padding." + + def pad(self, image, left, right, top, bottom, extra_padding, color, pad_mode, mask=None, target_width=None, target_height=None): + B, H, W, C = image.shape + + # Resize masks to image dimensions if necessary + if mask is not None: + BM, HM, WM = mask.shape + if HM != H or WM != W: + mask = F.interpolate(mask.unsqueeze(1), size=(H, W), mode='nearest-exact').squeeze(1) + + # Parse background color + bg_color = [int(x.strip())/255.0 for x in color.split(",")] + if len(bg_color) == 1: + bg_color = bg_color * 3 # Grayscale to RGB + bg_color = torch.tensor(bg_color, dtype=image.dtype, device=image.device) + + # Calculate padding sizes with extra padding + if target_width is not None and target_height is not None: + if extra_padding > 0: + image = common_upscale(image.movedim(-1, 1), W - extra_padding, H - extra_padding, "lanczos", "disabled").movedim(1, -1) + B, H, W, C = image.shape + + padded_width = target_width + padded_height = target_height + pad_left = (padded_width - W) // 2 + pad_right = padded_width - W - pad_left + pad_top = (padded_height - H) // 2 + pad_bottom = padded_height - H - pad_top + else: + pad_left = left + extra_padding + pad_right = right + extra_padding + pad_top = top + extra_padding + pad_bottom = bottom + extra_padding + + padded_width = W + pad_left + pad_right + padded_height = H + pad_top + pad_bottom + out_image = torch.zeros((B, padded_height, padded_width, C), dtype=image.dtype, device=image.device) + + # Fill padded areas + for b in range(B): + if pad_mode == "edge": + # Pad with edge color + # Define edge pixels + top_edge = image[b, 0, :, :] + bottom_edge = image[b, H-1, :, :] + left_edge = image[b, :, 0, :] + right_edge = image[b, :, W-1, :] + + # Fill borders with edge colors + out_image[b, :pad_top, :, :] = top_edge.mean(dim=0) + out_image[b, pad_top+H:, :, :] = bottom_edge.mean(dim=0) + out_image[b, :, :pad_left, :] = left_edge.mean(dim=0) + out_image[b, :, pad_left+W:, :] = right_edge.mean(dim=0) + out_image[b, pad_top:pad_top+H, pad_left:pad_left+W, :] = image[b] + else: + # Pad with specified background color + out_image[b, :, :, :] = bg_color.unsqueeze(0).unsqueeze(0) # Expand for H and W dimensions + out_image[b, pad_top:pad_top+H, pad_left:pad_left+W, :] = image[b] + + + if mask is not None: + out_masks = torch.nn.functional.pad( + mask, + (pad_left, pad_right, pad_top, pad_bottom), + mode='replicate' + ) + else: + out_masks = torch.ones((B, padded_height, padded_width), dtype=image.dtype, device=image.device) + for m in range(B): + out_masks[m, pad_top:pad_top+H, pad_left:pad_left+W] = 0.0 + + return (out_image, out_masks) + +# extends https://github.com/Kosinkadink/ComfyUI-VideoHelperSuite +class LoadVideosFromFolder: + @classmethod + def __init__(cls): + try: + cls.vhs_nodes = importlib.import_module("ComfyUI-VideoHelperSuite.videohelpersuite") + except ImportError: + try: + cls.vhs_nodes = importlib.import_module("comfyui-videohelpersuite.videohelpersuite") + except ImportError: + raise ImportError("This node requires ComfyUI-VideoHelperSuite to be installed.") + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "video": ("STRING", {"default": "X://insert/path/"},), + "force_rate": ("FLOAT", {"default": 0, "min": 0, "max": 60, "step": 1, "disable": 0}), + "custom_width": ("INT", {"default": 0, "min": 0, "max": 4096, 'disable': 0}), + "custom_height": ("INT", {"default": 0, "min": 0, "max": 4096, 'disable': 0}), + "frame_load_cap": ("INT", {"default": 0, "min": 0, "max": 10000, "step": 1, "disable": 0}), + "skip_first_frames": ("INT", {"default": 0, "min": 0, "max": 10000, "step": 1}), + "select_every_nth": ("INT", {"default": 1, "min": 1, "max": 1000, "step": 1}), + "output_type": (["batch", "grid"], {"default": "batch"}), + "grid_max_columns": ("INT", {"default": 4, "min": 1, "max": 16, "step": 1, "disable": 1}), + "add_label": ( "BOOLEAN", {"default": False} ), + }, + "hidden": { + "force_size": "STRING", + "unique_id": "UNIQUE_ID" + }, + } + + CATEGORY = "KJNodes/misc" + + RETURN_TYPES = ("IMAGE", ) + RETURN_NAMES = ("IMAGE", ) + + FUNCTION = "load_video" + + def load_video(self, output_type, grid_max_columns, add_label=False, **kwargs): + if self.vhs_nodes is None: + raise ImportError("This node requires ComfyUI-VideoHelperSuite to be installed.") + videos_list = [] + filenames = [] + for f in os.listdir(kwargs['video']): + if os.path.isfile(os.path.join(kwargs['video'], f)): + file_parts = f.split('.') + if len(file_parts) > 1 and (file_parts[-1].lower() in ['webm', 'mp4', 'mkv', 'gif', 'mov']): + videos_list.append(os.path.join(kwargs['video'], f)) + filenames.append(f) + print(videos_list) + kwargs.pop('video') + loaded_videos = [] + for idx, video in enumerate(videos_list): + video_tensor = self.vhs_nodes.load_video_nodes.load_video(video=video, **kwargs)[0] + if add_label: + # Add filename label above video (without extension) + if video_tensor.dim() == 4: + _, h, w, c = video_tensor.shape + else: + h, w, c = video_tensor.shape + # Remove extension from filename + label_text = filenames[idx].rsplit('.', 1)[0] + font_size = max(16, w // 20) + try: + font = ImageFont.truetype("arial.ttf", font_size) + except: + font = ImageFont.load_default() + dummy_img = Image.new("RGB", (w, 10), (0,0,0)) + draw = ImageDraw.Draw(dummy_img) + text_bbox = draw.textbbox((0,0), label_text, font=font) + extra_padding = max(12, font_size // 2) # More padding under the font + label_height = text_bbox[3] - text_bbox[1] + extra_padding + label_img = Image.new("RGB", (w, label_height), (0,0,0)) + draw = ImageDraw.Draw(label_img) + draw.text((w//2 - (text_bbox[2]-text_bbox[0])//2, 4), label_text, font=font, fill=(255,255,255)) + label_np = np.asarray(label_img).astype(np.float32) / 255.0 + label_tensor = torch.from_numpy(label_np) + if c == 1: + label_tensor = label_tensor.mean(dim=2, keepdim=True) + elif c == 4: + alpha = torch.ones((label_height, w, 1), dtype=label_tensor.dtype) + label_tensor = torch.cat([label_tensor, alpha], dim=2) + if video_tensor.dim() == 4: + label_tensor = label_tensor.unsqueeze(0).expand(video_tensor.shape[0], -1, -1, -1) + video_tensor = torch.cat([label_tensor, video_tensor], dim=1) + else: + video_tensor = torch.cat([label_tensor, video_tensor], dim=0) + loaded_videos.append(video_tensor) + if output_type == "batch": + out_tensor = torch.cat(loaded_videos) + elif output_type == "grid": + rows = (len(loaded_videos) + grid_max_columns - 1) // grid_max_columns + # Pad the last row if needed + total_slots = rows * grid_max_columns + while len(loaded_videos) < total_slots: + loaded_videos.append(torch.zeros_like(loaded_videos[0])) + # Create grid by rows + row_tensors = [] + for row_idx in range(rows): + start_idx = row_idx * grid_max_columns + end_idx = start_idx + grid_max_columns + row_videos = loaded_videos[start_idx:end_idx] + # Pad all videos in this row to the same height + heights = [v.shape[1] for v in row_videos] + max_height = max(heights) + padded_row_videos = [] + for v in row_videos: + pad_height = max_height - v.shape[1] + if pad_height > 0: + # Pad (frames, H, W, C) or (H, W, C) + if v.dim() == 4: + pad = (0,0, 0,0, 0,pad_height, 0,0) # (C,W,H,F) + v = torch.nn.functional.pad(v, (0,0,0,0,0,pad_height,0,0)) + else: + v = torch.nn.functional.pad(v, (0,0,0,0,pad_height,0)) + padded_row_videos.append(v) + row_tensor = torch.cat(padded_row_videos, dim=2) # Concatenate horizontally + row_tensors.append(row_tensor) + out_tensor = torch.cat(row_tensors, dim=1) # Concatenate rows vertically + print(out_tensor.shape) + return out_tensor, + + @classmethod + def IS_CHANGED(s, video, **kwargs): + if s.vhs_nodes is not None: + return s.vhs_nodes.utils.hash_path(video) + return None \ No newline at end of file diff --git a/custom_nodes/comfyui-kjnodes/nodes/intrinsic_lora_nodes.py b/custom_nodes/comfyui-kjnodes/nodes/intrinsic_lora_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..c8f125363836cc7721b4b61d100702594522d389 --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/nodes/intrinsic_lora_nodes.py @@ -0,0 +1,115 @@ +import folder_paths +import os +import torch +import torch.nn.functional as F +from comfy.utils import ProgressBar, load_torch_file +import comfy.sample +from nodes import CLIPTextEncode + +script_directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +folder_paths.add_model_folder_path("intrinsic_loras", os.path.join(script_directory, "intrinsic_loras")) + +class Intrinsic_lora_sampling: + def __init__(self): + self.loaded_lora = None + + @classmethod + def INPUT_TYPES(s): + return {"required": { "model": ("MODEL",), + "lora_name": (folder_paths.get_filename_list("intrinsic_loras"), ), + "task": ( + [ + 'depth map', + 'surface normals', + 'albedo', + 'shading', + ], + { + "default": 'depth map' + }), + "text": ("STRING", {"multiline": True, "default": ""}), + "clip": ("CLIP", ), + "vae": ("VAE", ), + "per_batch": ("INT", {"default": 16, "min": 1, "max": 4096, "step": 1}), + }, + "optional": { + "image": ("IMAGE",), + "optional_latent": ("LATENT",), + }, + } + + RETURN_TYPES = ("IMAGE", "LATENT",) + FUNCTION = "onestepsample" + CATEGORY = "KJNodes" + DESCRIPTION = """ +Sampler to use the intrinsic loras: +https://github.com/duxiaodan/intrinsic-lora +These LoRAs are tiny and thus included +with this node pack. +""" + + def onestepsample(self, model, lora_name, clip, vae, text, task, per_batch, image=None, optional_latent=None): + pbar = ProgressBar(3) + + if optional_latent is None: + image_list = [] + for start_idx in range(0, image.shape[0], per_batch): + sub_pixels = vae.vae_encode_crop_pixels(image[start_idx:start_idx+per_batch]) + image_list.append(vae.encode(sub_pixels[:,:,:,:3])) + sample = torch.cat(image_list, dim=0) + else: + sample = optional_latent["samples"] + noise = torch.zeros(sample.size(), dtype=sample.dtype, layout=sample.layout, device="cpu") + prompt = task + "," + text + positive, = CLIPTextEncode.encode(self, clip, prompt) + negative = positive #negative shouldn't do anything in this scenario + + pbar.update(1) + + #custom model sampling to pass latent through as it is + class X0_PassThrough(comfy.model_sampling.EPS): + def calculate_denoised(self, sigma, model_output, model_input): + return model_output + def calculate_input(self, sigma, noise): + return noise + sampling_base = comfy.model_sampling.ModelSamplingDiscrete + sampling_type = X0_PassThrough + + class ModelSamplingAdvanced(sampling_base, sampling_type): + pass + model_sampling = ModelSamplingAdvanced(model.model.model_config) + + #load lora + model_clone = model.clone() + lora_path = folder_paths.get_full_path("intrinsic_loras", lora_name) + lora = load_torch_file(lora_path, safe_load=True) + self.loaded_lora = (lora_path, lora) + + model_clone_with_lora = comfy.sd.load_lora_for_models(model_clone, None, lora, 1.0, 0)[0] + + model_clone_with_lora.add_object_patch("model_sampling", model_sampling) + + samples = {"samples": comfy.sample.sample(model_clone_with_lora, noise, 1, 1.0, "euler", "simple", positive, negative, sample, + denoise=1.0, disable_noise=True, start_step=0, last_step=1, + force_full_denoise=True, noise_mask=None, callback=None, disable_pbar=True, seed=None)} + pbar.update(1) + + decoded = [] + for start_idx in range(0, samples["samples"].shape[0], per_batch): + decoded.append(vae.decode(samples["samples"][start_idx:start_idx+per_batch])) + image_out = torch.cat(decoded, dim=0) + + pbar.update(1) + + if task == 'depth map': + imax = image_out.max() + imin = image_out.min() + image_out = (image_out-imin)/(imax-imin) + image_out = torch.max(image_out, dim=3, keepdim=True)[0].repeat(1, 1, 1, 3) + elif task == 'surface normals': + image_out = F.normalize(image_out * 2 - 1, dim=3) / 2 + 0.5 + image_out = 1.0 - image_out + else: + image_out = image_out.clamp(-1.,1.) + + return (image_out, samples,) \ No newline at end of file diff --git a/custom_nodes/comfyui-kjnodes/nodes/lora_nodes.py b/custom_nodes/comfyui-kjnodes/nodes/lora_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..fe9959c01bcad0a27641b0cd3ba13b53cfa9434e --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/nodes/lora_nodes.py @@ -0,0 +1,190 @@ +import torch +import comfy.model_management +import comfy.utils +import folder_paths +import os +import logging +from tqdm import tqdm + +device = comfy.model_management.get_torch_device() + +CLAMP_QUANTILE = 0.99 + +def extract_lora(diff, key, rank, algorithm, lora_type, lowrank_iters=7, adaptive_param=1.0): + """ + Extracts LoRA weights from a weight difference tensor using SVD. + """ + conv2d = (len(diff.shape) == 4) + kernel_size = None if not conv2d else diff.size()[2:4] + conv2d_3x3 = conv2d and kernel_size != (1, 1) + out_dim, in_dim = diff.size()[0:2] + + if conv2d: + if conv2d_3x3: + diff = diff.flatten(start_dim=1) + else: + diff = diff.squeeze() + + diff_float = diff.float() + if algorithm == "svd_lowrank": + U, S, V = torch.svd_lowrank(diff_float, q=min(rank, in_dim, out_dim), niter=lowrank_iters) + U = U @ torch.diag(S) + Vh = V.t() + else: + #torch.linalg.svdvals() + U, S, Vh = torch.linalg.svd(diff_float) + # Flexible rank selection logic like locon: https://github.com/KohakuBlueleaf/LyCORIS/blob/main/tools/extract_locon.py + if "adaptive" in lora_type: + if lora_type == "adaptive_ratio": + min_s = torch.max(S) * adaptive_param + lora_rank = torch.sum(S > min_s).item() + elif lora_type == "adaptive_energy": + energy = torch.cumsum(S**2, dim=0) + total_energy = torch.sum(S**2) + threshold = adaptive_param * total_energy # e.g., adaptive_param=0.95 for 95% + lora_rank = torch.sum(energy < threshold).item() + 1 + elif lora_type == "adaptive_quantile": + s_cum = torch.cumsum(S, dim=0) + min_cum_sum = adaptive_param * torch.sum(S) + lora_rank = torch.sum(s_cum < min_cum_sum).item() + print(f"{key} Extracted LoRA rank: {lora_rank}") + else: + lora_rank = rank + + lora_rank = max(1, lora_rank) + lora_rank = min(out_dim, in_dim, lora_rank) + + U = U[:, :lora_rank] + S = S[:lora_rank] + U = U @ torch.diag(S) + Vh = Vh[:lora_rank, :] + + dist = torch.cat([U.flatten(), Vh.flatten()]) + if dist.numel() > 100_000: + # Sample 100,000 elements for quantile estimation + idx = torch.randperm(dist.numel(), device=dist.device)[:100_000] + dist_sample = dist[idx] + hi_val = torch.quantile(dist_sample, CLAMP_QUANTILE) + else: + hi_val = torch.quantile(dist, CLAMP_QUANTILE) + low_val = -hi_val + + U = U.clamp(low_val, hi_val) + Vh = Vh.clamp(low_val, hi_val) + if conv2d: + U = U.reshape(out_dim, lora_rank, 1, 1) + Vh = Vh.reshape(lora_rank, in_dim, kernel_size[0], kernel_size[1]) + return (U, Vh) + + +def calc_lora_model(model_diff, rank, prefix_model, prefix_lora, output_sd, lora_type, algorithm, lowrank_iters, out_dtype, bias_diff=False, adaptive_param=1.0): + comfy.model_management.load_models_gpu([model_diff], force_patch_weights=True) + model_diff.model.diffusion_model.cpu() + sd = model_diff.model_state_dict(filter_prefix=prefix_model) + del model_diff + comfy.model_management.soft_empty_cache() + for k, v in sd.items(): + if isinstance(v, torch.Tensor): + sd[k] = v.cpu() + + # Get total number of keys to process for progress bar + total_keys = len([k for k in sd if k.endswith(".weight") or (bias_diff and k.endswith(".bias"))]) + + # Create progress bar + progress_bar = tqdm(total=total_keys, desc=f"Extracting LoRA ({prefix_lora.strip('.')})") + comfy_pbar = comfy.utils.ProgressBar(total_keys) + + for k in sd: + if k.endswith(".weight"): + weight_diff = sd[k] + if weight_diff.ndim == 5: + logging.info(f"Skipping 5D tensor for key {k}") #skip patch embed + progress_bar.update(1) + comfy_pbar.update(1) + continue + if lora_type != "full": + if weight_diff.ndim < 2: + if bias_diff: + output_sd["{}{}.diff".format(prefix_lora, k[len(prefix_model):-7])] = weight_diff.contiguous().to(out_dtype).cpu() + progress_bar.update(1) + comfy_pbar.update(1) + continue + try: + out = extract_lora(weight_diff.to(device), k, rank, algorithm, lora_type, lowrank_iters=lowrank_iters, adaptive_param=adaptive_param) + output_sd["{}{}.lora_up.weight".format(prefix_lora, k[len(prefix_model):-7])] = out[0].contiguous().to(out_dtype).cpu() + output_sd["{}{}.lora_down.weight".format(prefix_lora, k[len(prefix_model):-7])] = out[1].contiguous().to(out_dtype).cpu() + except Exception as e: + logging.warning(f"Could not generate lora weights for key {k}, error {e}") + else: + output_sd["{}{}.diff".format(prefix_lora, k[len(prefix_model):-7])] = weight_diff.contiguous().to(out_dtype).cpu() + + progress_bar.update(1) + comfy_pbar.update(1) + + elif bias_diff and k.endswith(".bias"): + output_sd["{}{}.diff_b".format(prefix_lora, k[len(prefix_model):-5])] = sd[k].contiguous().to(out_dtype).cpu() + progress_bar.update(1) + comfy_pbar.update(1) + progress_bar.close() + return output_sd + +class LoraExtractKJ: + def __init__(self): + self.output_dir = folder_paths.get_output_directory() + + @classmethod + def INPUT_TYPES(s): + return {"required": + { + "finetuned_model": ("MODEL",), + "original_model": ("MODEL",), + "filename_prefix": ("STRING", {"default": "loras/ComfyUI_extracted_lora"}), + "rank": ("INT", {"default": 8, "min": 1, "max": 4096, "step": 1}), + "lora_type": (["standard", "full", "adaptive_ratio", "adaptive_quantile", "adaptive_energy"],), + "algorithm": (["svd_linalg", "svd_lowrank"], {"default": "svd_linalg", "tooltip": "SVD algorithm to use, svd_lowrank is faster but less accurate."}), + "lowrank_iters": ("INT", {"default": 7, "min": 1, "max": 100, "step": 1, "tooltip": "The number of subspace iterations for lowrank SVD algorithm."}), + "output_dtype": (["fp16", "bf16", "fp32"], {"default": "fp16"}), + "bias_diff": ("BOOLEAN", {"default": True}), + "adaptive_param": ("FLOAT", {"default": 0.15, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "For ratio mode, this is the ratio of the maximum singular value. For quantile mode, this is the quantile of the singular values."}), + }, + + } + RETURN_TYPES = () + FUNCTION = "save" + OUTPUT_NODE = True + + CATEGORY = "KJNodes/lora" + + def save(self, finetuned_model, original_model, filename_prefix, rank, lora_type, algorithm, lowrank_iters, output_dtype, bias_diff, adaptive_param): + if algorithm == "svd_lowrank" and lora_type != "standard": + raise ValueError("svd_lowrank algorithm is only supported for standard LoRA extraction.") + + dtype = {"fp8_e4m3fn": torch.float8_e4m3fn, "bf16": torch.bfloat16, "fp16": torch.float16, "fp16_fast": torch.float16, "fp32": torch.float32}[output_dtype] + m = finetuned_model.clone() + kp = original_model.get_key_patches("diffusion_model.") + for k in kp: + m.add_patches({k: kp[k]}, - 1.0, 1.0) + model_diff = m + + full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir) + + output_sd = {} + if model_diff is not None: + output_sd = calc_lora_model(model_diff, rank, "diffusion_model.", "diffusion_model.", output_sd, lora_type, algorithm, lowrank_iters, dtype, bias_diff=bias_diff, adaptive_param=adaptive_param) + if "adaptive" in lora_type: + rank_str = f"{lora_type}_{adaptive_param:.2f}" + else: + rank_str = rank + output_checkpoint = f"{filename}_rank_{rank_str}_{output_dtype}_{counter:05}_.safetensors" + output_checkpoint = os.path.join(full_output_folder, output_checkpoint) + + comfy.utils.save_torch_file(output_sd, output_checkpoint, metadata=None) + return {} + +NODE_CLASS_MAPPINGS = { + "LoraExtractKJ": LoraExtractKJ +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "LoraExtractKJ": "LoraExtractKJ" +} diff --git a/custom_nodes/comfyui-kjnodes/nodes/mask_nodes.py b/custom_nodes/comfyui-kjnodes/nodes/mask_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..9a84a127dd618ebaabb2e9e522bf84594d9549e1 --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/nodes/mask_nodes.py @@ -0,0 +1,1427 @@ +import torch +import torch.nn.functional as F +from torchvision.transforms import functional as TF +from PIL import Image, ImageDraw, ImageFilter, ImageFont +import scipy.ndimage +import numpy as np +from contextlib import nullcontext +import os + +from comfy import model_management +from comfy.utils import ProgressBar +from comfy.utils import common_upscale +from nodes import MAX_RESOLUTION + +import folder_paths + +from ..utility.utility import tensor2pil, pil2tensor + +script_directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + +class BatchCLIPSeg: + + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + + return {"required": + { + "images": ("IMAGE",), + "text": ("STRING", {"multiline": False}), + "threshold": ("FLOAT", {"default": 0.5,"min": 0.0, "max": 10.0, "step": 0.001}), + "binary_mask": ("BOOLEAN", {"default": True}), + "combine_mask": ("BOOLEAN", {"default": False}), + "use_cuda": ("BOOLEAN", {"default": True}), + }, + "optional": + { + "blur_sigma": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 100.0, "step": 0.1}), + "opt_model": ("CLIPSEGMODEL", ), + "prev_mask": ("MASK", {"default": None}), + "image_bg_level": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "invert": ("BOOLEAN", {"default": False}), + } + } + + CATEGORY = "KJNodes/masking" + RETURN_TYPES = ("MASK", "IMAGE", ) + RETURN_NAMES = ("Mask", "Image", ) + FUNCTION = "segment_image" + DESCRIPTION = """ +Segments an image or batch of images using CLIPSeg. +""" + + def segment_image(self, images, text, threshold, binary_mask, combine_mask, use_cuda, blur_sigma=0.0, opt_model=None, prev_mask=None, invert= False, image_bg_level=0.5): + from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation + import torchvision.transforms as transforms + offload_device = model_management.unet_offload_device() + device = model_management.get_torch_device() + if not use_cuda: + device = torch.device("cpu") + dtype = model_management.unet_dtype() + + if opt_model is None: + checkpoint_path = os.path.join(folder_paths.models_dir,'clip_seg', 'clipseg-rd64-refined-fp16') + if not hasattr(self, "model"): + try: + if not os.path.exists(checkpoint_path): + from huggingface_hub import snapshot_download + snapshot_download(repo_id="Kijai/clipseg-rd64-refined-fp16", local_dir=checkpoint_path, local_dir_use_symlinks=False) + self.model = CLIPSegForImageSegmentation.from_pretrained(checkpoint_path) + except: + checkpoint_path = "CIDAS/clipseg-rd64-refined" + self.model = CLIPSegForImageSegmentation.from_pretrained(checkpoint_path) + processor = CLIPSegProcessor.from_pretrained(checkpoint_path) + + else: + self.model = opt_model['model'] + processor = opt_model['processor'] + + self.model.to(dtype).to(device) + + B, H, W, C = images.shape + images = images.to(device) + + autocast_condition = (dtype != torch.float32) and not model_management.is_device_mps(device) + with torch.autocast(model_management.get_autocast_device(device), dtype=dtype) if autocast_condition else nullcontext(): + + PIL_images = [Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) for image in images ] + prompt = [text] * len(images) + input_prc = processor(text=prompt, images=PIL_images, return_tensors="pt") + + for key in input_prc: + input_prc[key] = input_prc[key].to(device) + outputs = self.model(**input_prc) + + mask_tensor = torch.sigmoid(outputs.logits) + mask_tensor = (mask_tensor - mask_tensor.min()) / (mask_tensor.max() - mask_tensor.min()) + mask_tensor = torch.where(mask_tensor > (threshold), mask_tensor, torch.tensor(0, dtype=torch.float)) + print(mask_tensor.shape) + if len(mask_tensor.shape) == 2: + mask_tensor = mask_tensor.unsqueeze(0) + mask_tensor = F.interpolate(mask_tensor.unsqueeze(1), size=(H, W), mode='nearest') + mask_tensor = mask_tensor.squeeze(1) + + self.model.to(offload_device) + + if binary_mask: + mask_tensor = (mask_tensor > 0).float() + if blur_sigma > 0: + kernel_size = int(6 * int(blur_sigma) + 1) + blur = transforms.GaussianBlur(kernel_size=(kernel_size, kernel_size), sigma=(blur_sigma, blur_sigma)) + mask_tensor = blur(mask_tensor) + + if combine_mask: + mask_tensor = torch.max(mask_tensor, dim=0)[0] + mask_tensor = mask_tensor.unsqueeze(0).repeat(len(images),1,1) + + del outputs + model_management.soft_empty_cache() + + if prev_mask is not None: + if prev_mask.shape != mask_tensor.shape: + prev_mask = F.interpolate(prev_mask.unsqueeze(1), size=(H, W), mode='nearest') + mask_tensor = mask_tensor + prev_mask.to(device) + torch.clamp(mask_tensor, min=0.0, max=1.0) + + if invert: + mask_tensor = 1 - mask_tensor + + image_tensor = images * mask_tensor.unsqueeze(-1) + (1 - mask_tensor.unsqueeze(-1)) * image_bg_level + image_tensor = torch.clamp(image_tensor, min=0.0, max=1.0).cpu().float() + + mask_tensor = mask_tensor.cpu().float() + + return mask_tensor, image_tensor, + +class DownloadAndLoadCLIPSeg: + + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + + return {"required": + { + "model": ( + [ 'Kijai/clipseg-rd64-refined-fp16', + 'CIDAS/clipseg-rd64-refined', + ], + ), + }, + } + + CATEGORY = "KJNodes/masking" + RETURN_TYPES = ("CLIPSEGMODEL",) + RETURN_NAMES = ("clipseg_model",) + FUNCTION = "segment_image" + DESCRIPTION = """ +Downloads and loads CLIPSeg model with huggingface_hub, +to ComfyUI/models/clip_seg +""" + + def segment_image(self, model): + from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation + checkpoint_path = os.path.join(folder_paths.models_dir,'clip_seg', os.path.basename(model)) + if not hasattr(self, "model"): + if not os.path.exists(checkpoint_path): + from huggingface_hub import snapshot_download + snapshot_download(repo_id=model, local_dir=checkpoint_path, local_dir_use_symlinks=False) + self.model = CLIPSegForImageSegmentation.from_pretrained(checkpoint_path) + + processor = CLIPSegProcessor.from_pretrained(checkpoint_path) + + clipseg_model = {} + clipseg_model['model'] = self.model + clipseg_model['processor'] = processor + + return clipseg_model, + +class CreateTextMask: + + RETURN_TYPES = ("IMAGE", "MASK",) + FUNCTION = "createtextmask" + CATEGORY = "KJNodes/text" + DESCRIPTION = """ +Creates a text image and mask. +Looks for fonts from this folder: +ComfyUI/custom_nodes/ComfyUI-KJNodes/fonts + +If start_rotation and/or end_rotation are different values, +creates animation between them. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "invert": ("BOOLEAN", {"default": False}), + "frames": ("INT", {"default": 1,"min": 1, "max": 4096, "step": 1}), + "text_x": ("INT", {"default": 0,"min": 0, "max": 4096, "step": 1}), + "text_y": ("INT", {"default": 0,"min": 0, "max": 4096, "step": 1}), + "font_size": ("INT", {"default": 32,"min": 8, "max": 4096, "step": 1}), + "font_color": ("STRING", {"default": "white"}), + "text": ("STRING", {"default": "HELLO!", "multiline": True}), + "font": (folder_paths.get_filename_list("kjnodes_fonts"), ), + "width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "start_rotation": ("INT", {"default": 0,"min": 0, "max": 359, "step": 1}), + "end_rotation": ("INT", {"default": 0,"min": -359, "max": 359, "step": 1}), + }, + } + + def createtextmask(self, frames, width, height, invert, text_x, text_y, text, font_size, font_color, font, start_rotation, end_rotation): + # Define the number of images in the batch + batch_size = frames + out = [] + masks = [] + rotation = start_rotation + if start_rotation != end_rotation: + rotation_increment = (end_rotation - start_rotation) / (batch_size - 1) + + font_path = folder_paths.get_full_path("kjnodes_fonts", font) + # Generate the text + for i in range(batch_size): + image = Image.new("RGB", (width, height), "black") + draw = ImageDraw.Draw(image) + font = ImageFont.truetype(font_path, font_size) + + # Split the text into words + words = text.split() + + # Initialize variables for line creation + lines = [] + current_line = [] + current_line_width = 0 + try: #new pillow + # Iterate through words to create lines + for word in words: + word_width = font.getbbox(word)[2] + if current_line_width + word_width <= width - 2 * text_x: + current_line.append(word) + current_line_width += word_width + font.getbbox(" ")[2] # Add space width + else: + lines.append(" ".join(current_line)) + current_line = [word] + current_line_width = word_width + except: #old pillow + for word in words: + word_width = font.getsize(word)[0] + if current_line_width + word_width <= width - 2 * text_x: + current_line.append(word) + current_line_width += word_width + font.getsize(" ")[0] # Add space width + else: + lines.append(" ".join(current_line)) + current_line = [word] + current_line_width = word_width + + # Add the last line if it's not empty + if current_line: + lines.append(" ".join(current_line)) + + # Draw each line of text separately + y_offset = text_y + for line in lines: + text_width = font.getlength(line) + text_height = font_size + text_center_x = text_x + text_width / 2 + text_center_y = y_offset + text_height / 2 + try: + draw.text((text_x, y_offset), line, font=font, fill=font_color, features=['-liga']) + except: + draw.text((text_x, y_offset), line, font=font, fill=font_color) + y_offset += text_height # Move to the next line + + if start_rotation != end_rotation: + image = image.rotate(rotation, center=(text_center_x, text_center_y)) + rotation += rotation_increment + + image = np.array(image).astype(np.float32) / 255.0 + image = torch.from_numpy(image)[None,] + mask = image[:, :, :, 0] + masks.append(mask) + out.append(image) + + if invert: + return (1.0 - torch.cat(out, dim=0), 1.0 - torch.cat(masks, dim=0),) + return (torch.cat(out, dim=0),torch.cat(masks, dim=0),) + +class ColorToMask: + + RETURN_TYPES = ("MASK",) + FUNCTION = "clip" + CATEGORY = "KJNodes/masking" + DESCRIPTION = """ +Converts chosen RGB value to a mask. +With batch inputs, the **per_batch** +controls the number of images processed at once. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "images": ("IMAGE",), + "invert": ("BOOLEAN", {"default": False}), + "red": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}), + "green": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}), + "blue": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}), + "threshold": ("INT", {"default": 10,"min": 0, "max": 255, "step": 1}), + "per_batch": ("INT", {"default": 16, "min": 1, "max": 4096, "step": 1}), + }, + } + + def clip(self, images, red, green, blue, threshold, invert, per_batch): + + color = torch.tensor([red, green, blue], dtype=torch.uint8) + black = torch.tensor([0, 0, 0], dtype=torch.uint8) + white = torch.tensor([255, 255, 255], dtype=torch.uint8) + + if invert: + black, white = white, black + + steps = images.shape[0] + pbar = ProgressBar(steps) + tensors_out = [] + + for start_idx in range(0, images.shape[0], per_batch): + + # Calculate color distances + color_distances = torch.norm(images[start_idx:start_idx+per_batch] * 255 - color, dim=-1) + + # Create a mask based on the threshold + mask = color_distances <= threshold + + # Apply the mask to create new images + mask_out = torch.where(mask.unsqueeze(-1), white, black).float() + mask_out = mask_out.mean(dim=-1) + + tensors_out.append(mask_out.cpu()) + batch_count = mask_out.shape[0] + pbar.update(batch_count) + + tensors_out = torch.cat(tensors_out, dim=0) + tensors_out = torch.clamp(tensors_out, min=0.0, max=1.0) + return tensors_out, + +class CreateFluidMask: + + RETURN_TYPES = ("IMAGE", "MASK") + FUNCTION = "createfluidmask" + CATEGORY = "KJNodes/masking/generate" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "invert": ("BOOLEAN", {"default": False}), + "frames": ("INT", {"default": 1,"min": 1, "max": 4096, "step": 1}), + "width": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}), + "height": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}), + "inflow_count": ("INT", {"default": 3,"min": 0, "max": 255, "step": 1}), + "inflow_velocity": ("INT", {"default": 1,"min": 0, "max": 255, "step": 1}), + "inflow_radius": ("INT", {"default": 8,"min": 0, "max": 255, "step": 1}), + "inflow_padding": ("INT", {"default": 50,"min": 0, "max": 255, "step": 1}), + "inflow_duration": ("INT", {"default": 60,"min": 0, "max": 255, "step": 1}), + }, + } + #using code from https://github.com/GregTJ/stable-fluids + def createfluidmask(self, frames, width, height, invert, inflow_count, inflow_velocity, inflow_radius, inflow_padding, inflow_duration): + from ..utility.fluid import Fluid + try: + from scipy.special import erf + except: + from scipy.spatial import erf + out = [] + masks = [] + RESOLUTION = width, height + DURATION = frames + + INFLOW_PADDING = inflow_padding + INFLOW_DURATION = inflow_duration + INFLOW_RADIUS = inflow_radius + INFLOW_VELOCITY = inflow_velocity + INFLOW_COUNT = inflow_count + + print('Generating fluid solver, this may take some time.') + fluid = Fluid(RESOLUTION, 'dye') + + center = np.floor_divide(RESOLUTION, 2) + r = np.min(center) - INFLOW_PADDING + + points = np.linspace(-np.pi, np.pi, INFLOW_COUNT, endpoint=False) + points = tuple(np.array((np.cos(p), np.sin(p))) for p in points) + normals = tuple(-p for p in points) + points = tuple(r * p + center for p in points) + + inflow_velocity = np.zeros_like(fluid.velocity) + inflow_dye = np.zeros(fluid.shape) + for p, n in zip(points, normals): + mask = np.linalg.norm(fluid.indices - p[:, None, None], axis=0) <= INFLOW_RADIUS + inflow_velocity[:, mask] += n[:, None] * INFLOW_VELOCITY + inflow_dye[mask] = 1 + + + for f in range(DURATION): + print(f'Computing frame {f + 1} of {DURATION}.') + if f <= INFLOW_DURATION: + fluid.velocity += inflow_velocity + fluid.dye += inflow_dye + + curl = fluid.step()[1] + # Using the error function to make the contrast a bit higher. + # Any other sigmoid function e.g. smoothstep would work. + curl = (erf(curl * 2) + 1) / 4 + + color = np.dstack((curl, np.ones(fluid.shape), fluid.dye)) + color = (np.clip(color, 0, 1) * 255).astype('uint8') + image = np.array(color).astype(np.float32) / 255.0 + image = torch.from_numpy(image)[None,] + mask = image[:, :, :, 0] + masks.append(mask) + out.append(image) + + if invert: + return (1.0 - torch.cat(out, dim=0),1.0 - torch.cat(masks, dim=0),) + return (torch.cat(out, dim=0),torch.cat(masks, dim=0),) + +class CreateAudioMask: + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "createaudiomask" + CATEGORY = "KJNodes/deprecated" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "invert": ("BOOLEAN", {"default": False}), + "frames": ("INT", {"default": 16,"min": 1, "max": 255, "step": 1}), + "scale": ("FLOAT", {"default": 0.5,"min": 0.0, "max": 2.0, "step": 0.01}), + "audio_path": ("STRING", {"default": "audio.wav"}), + "width": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}), + "height": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}), + }, + } + + def createaudiomask(self, frames, width, height, invert, audio_path, scale): + try: + import librosa + except ImportError: + raise Exception("Can not import librosa. Install it with 'pip install librosa'") + batch_size = frames + out = [] + masks = [] + if audio_path == "audio.wav": #I don't know why relative path won't work otherwise... + audio_path = os.path.join(script_directory, audio_path) + audio, sr = librosa.load(audio_path) + spectrogram = np.abs(librosa.stft(audio)) + + for i in range(batch_size): + image = Image.new("RGB", (width, height), "black") + draw = ImageDraw.Draw(image) + frame = spectrogram[:, i] + circle_radius = int(height * np.mean(frame)) + circle_radius *= scale + circle_center = (width // 2, height // 2) # Calculate the center of the image + + draw.ellipse([(circle_center[0] - circle_radius, circle_center[1] - circle_radius), + (circle_center[0] + circle_radius, circle_center[1] + circle_radius)], + fill='white') + + image = np.array(image).astype(np.float32) / 255.0 + image = torch.from_numpy(image)[None,] + mask = image[:, :, :, 0] + masks.append(mask) + out.append(image) + + if invert: + return (1.0 - torch.cat(out, dim=0),) + return (torch.cat(out, dim=0),torch.cat(masks, dim=0),) + +class CreateGradientMask: + + RETURN_TYPES = ("MASK",) + FUNCTION = "createmask" + CATEGORY = "KJNodes/masking/generate" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "invert": ("BOOLEAN", {"default": False}), + "frames": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}), + "width": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}), + "height": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}), + }, + } + def createmask(self, frames, width, height, invert): + # Define the number of images in the batch + batch_size = frames + out = [] + # Create an empty array to store the image batch + image_batch = np.zeros((batch_size, height, width), dtype=np.float32) + # Generate the black to white gradient for each image + for i in range(batch_size): + gradient = np.linspace(1.0, 0.0, width, dtype=np.float32) + time = i / frames # Calculate the time variable + offset_gradient = gradient - time # Offset the gradient values based on time + image_batch[i] = offset_gradient.reshape(1, -1) + output = torch.from_numpy(image_batch) + mask = output + out.append(mask) + if invert: + return (1.0 - torch.cat(out, dim=0),) + return (torch.cat(out, dim=0),) + +class CreateFadeMask: + + RETURN_TYPES = ("MASK",) + FUNCTION = "createfademask" + CATEGORY = "KJNodes/deprecated" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "invert": ("BOOLEAN", {"default": False}), + "frames": ("INT", {"default": 2,"min": 2, "max": 10000, "step": 1}), + "width": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}), + "height": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}), + "interpolation": (["linear", "ease_in", "ease_out", "ease_in_out"],), + "start_level": ("FLOAT", {"default": 1.0,"min": 0.0, "max": 1.0, "step": 0.01}), + "midpoint_level": ("FLOAT", {"default": 0.5,"min": 0.0, "max": 1.0, "step": 0.01}), + "end_level": ("FLOAT", {"default": 0.0,"min": 0.0, "max": 1.0, "step": 0.01}), + "midpoint_frame": ("INT", {"default": 0,"min": 0, "max": 4096, "step": 1}), + }, + } + + def createfademask(self, frames, width, height, invert, interpolation, start_level, midpoint_level, end_level, midpoint_frame): + def ease_in(t): + return t * t + + def ease_out(t): + return 1 - (1 - t) * (1 - t) + + def ease_in_out(t): + return 3 * t * t - 2 * t * t * t + + batch_size = frames + out = [] + image_batch = np.zeros((batch_size, height, width), dtype=np.float32) + + if midpoint_frame == 0: + midpoint_frame = batch_size // 2 + + for i in range(batch_size): + if i <= midpoint_frame: + t = i / midpoint_frame + if interpolation == "ease_in": + t = ease_in(t) + elif interpolation == "ease_out": + t = ease_out(t) + elif interpolation == "ease_in_out": + t = ease_in_out(t) + color = start_level - t * (start_level - midpoint_level) + else: + t = (i - midpoint_frame) / (batch_size - midpoint_frame) + if interpolation == "ease_in": + t = ease_in(t) + elif interpolation == "ease_out": + t = ease_out(t) + elif interpolation == "ease_in_out": + t = ease_in_out(t) + color = midpoint_level - t * (midpoint_level - end_level) + + color = np.clip(color, 0, 255) + image = np.full((height, width), color, dtype=np.float32) + image_batch[i] = image + + output = torch.from_numpy(image_batch) + mask = output + out.append(mask) + + if invert: + return (1.0 - torch.cat(out, dim=0),) + return (torch.cat(out, dim=0),) + +class CreateFadeMaskAdvanced: + + RETURN_TYPES = ("MASK",) + FUNCTION = "createfademask" + CATEGORY = "KJNodes/masking/generate" + DESCRIPTION = """ +Create a batch of masks interpolated between given frames and values. +Uses same syntax as Fizz' BatchValueSchedule. +First value is the frame index (not that this starts from 0, not 1) +and the second value inside the brackets is the float value of the mask in range 0.0 - 1.0 + +For example the default values: +0:(0.0) +7:(1.0) +15:(0.0) + +Would create a mask batch fo 16 frames, starting from black, +interpolating with the chosen curve to fully white at the 8th frame, +and interpolating from that to fully black at the 16th frame. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "points_string": ("STRING", {"default": "0:(0.0),\n7:(1.0),\n15:(0.0)\n", "multiline": True}), + "invert": ("BOOLEAN", {"default": False}), + "frames": ("INT", {"default": 16,"min": 2, "max": 10000, "step": 1}), + "width": ("INT", {"default": 512,"min": 1, "max": 4096, "step": 1}), + "height": ("INT", {"default": 512,"min": 1, "max": 4096, "step": 1}), + "interpolation": (["linear", "ease_in", "ease_out", "ease_in_out", "none", "default_to_black"],), + }, + } + + def createfademask(self, frames, width, height, invert, points_string, interpolation): + def ease_in(t): + return t * t + + def ease_out(t): + return 1 - (1 - t) * (1 - t) + + def ease_in_out(t): + return 3 * t * t - 2 * t * t * t + + # Parse the input string into a list of tuples + points = [] + points_string = points_string.rstrip(',\n') + for point_str in points_string.split(','): + frame_str, color_str = point_str.split(':') + frame = int(frame_str.strip()) + color = float(color_str.strip()[1:-1]) # Remove parentheses around color + points.append((frame, color)) + + # Check if the last frame is already in the points + if (interpolation != "default_to_black") and (len(points) == 0 or points[-1][0] != frames - 1): + # If not, add it with the color of the last specified frame + points.append((frames - 1, points[-1][1] if points else 0)) + + # Sort the points by frame number + points.sort(key=lambda x: x[0]) + + batch_size = frames + out = [] + image_batch = np.zeros((batch_size, height, width), dtype=np.float32) + + # Index of the next point to interpolate towards + next_point = 1 + + for i in range(batch_size): + while next_point < len(points) and i > points[next_point][0]: + next_point += 1 + + # Interpolate between the previous point and the next point + prev_point = next_point - 1 + + if interpolation == "none": + exact_match = False + for p in points: + if p[0] == i: # Exact frame match + color = p[1] + exact_match = True + break + if not exact_match: + color = points[prev_point][1] + + elif interpolation == "default_to_black": + exact_match = False + for p in points: + if p[0] == i: # Exact frame match + color = p[1] + exact_match = True + break + if not exact_match: + color = 0 + else: + t = (i - points[prev_point][0]) / (points[next_point][0] - points[prev_point][0]) + if interpolation == "ease_in": + t = ease_in(t) + elif interpolation == "ease_out": + t = ease_out(t) + elif interpolation == "ease_in_out": + t = ease_in_out(t) + elif interpolation == "linear": + pass # No need to modify `t` for linear interpolation + + color = points[prev_point][1] - t * (points[prev_point][1] - points[next_point][1]) + + color = np.clip(color, 0, 255) + image = np.full((height, width), color, dtype=np.float32) + image_batch[i] = image + + output = torch.from_numpy(image_batch) + mask = output + out.append(mask) + + if invert: + return (1.0 - torch.cat(out, dim=0),) + return (torch.cat(out, dim=0),) + +class CreateMagicMask: + + RETURN_TYPES = ("MASK", "MASK",) + RETURN_NAMES = ("mask", "mask_inverted",) + FUNCTION = "createmagicmask" + CATEGORY = "KJNodes/masking/generate" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "frames": ("INT", {"default": 16,"min": 2, "max": 4096, "step": 1}), + "depth": ("INT", {"default": 12,"min": 1, "max": 500, "step": 1}), + "distortion": ("FLOAT", {"default": 1.5,"min": 0.0, "max": 100.0, "step": 0.01}), + "seed": ("INT", {"default": 123,"min": 0, "max": 99999999, "step": 1}), + "transitions": ("INT", {"default": 1,"min": 1, "max": 20, "step": 1}), + "frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + }, + } + + def createmagicmask(self, frames, transitions, depth, distortion, seed, frame_width, frame_height): + from ..utility.magictex import coordinate_grid, random_transform, magic + import matplotlib.pyplot as plt + rng = np.random.default_rng(seed) + out = [] + coords = coordinate_grid((frame_width, frame_height)) + + # Calculate the number of frames for each transition + frames_per_transition = frames // transitions + + # Generate a base set of parameters + base_params = { + "coords": random_transform(coords, rng), + "depth": depth, + "distortion": distortion, + } + for t in range(transitions): + # Generate a second set of parameters that is at most max_diff away from the base parameters + params1 = base_params.copy() + params2 = base_params.copy() + + params1['coords'] = random_transform(coords, rng) + params2['coords'] = random_transform(coords, rng) + + for i in range(frames_per_transition): + # Compute the interpolation factor + alpha = i / frames_per_transition + + # Interpolate between the two sets of parameters + params = params1.copy() + params['coords'] = (1 - alpha) * params1['coords'] + alpha * params2['coords'] + + tex = magic(**params) + + dpi = frame_width / 10 + fig = plt.figure(figsize=(10, 10), dpi=dpi) + + ax = fig.add_subplot(111) + plt.subplots_adjust(left=0, right=1, bottom=0, top=1) + + ax.get_yaxis().set_ticks([]) + ax.get_xaxis().set_ticks([]) + ax.imshow(tex, aspect='auto') + + fig.canvas.draw() + img = np.array(fig.canvas.renderer._renderer) + + plt.close(fig) + + pil_img = Image.fromarray(img).convert("L") + mask = torch.tensor(np.array(pil_img)) / 255.0 + + out.append(mask) + + return (torch.stack(out, dim=0), 1.0 - torch.stack(out, dim=0),) + +class CreateShapeMask: + + RETURN_TYPES = ("MASK", "MASK",) + RETURN_NAMES = ("mask", "mask_inverted",) + FUNCTION = "createshapemask" + CATEGORY = "KJNodes/masking/generate" + DESCRIPTION = """ +Creates a mask or batch of masks with the specified shape. +Locations are center locations. +Grow value is the amount to grow the shape on each frame, creating animated masks. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "shape": ( + [ 'circle', + 'square', + 'triangle', + ], + { + "default": 'circle' + }), + "frames": ("INT", {"default": 1,"min": 1, "max": 4096, "step": 1}), + "location_x": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}), + "location_y": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}), + "grow": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}), + "frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "shape_width": ("INT", {"default": 128,"min": 8, "max": 4096, "step": 1}), + "shape_height": ("INT", {"default": 128,"min": 8, "max": 4096, "step": 1}), + }, + } + + def createshapemask(self, frames, frame_width, frame_height, location_x, location_y, shape_width, shape_height, grow, shape): + # Define the number of images in the batch + batch_size = frames + out = [] + color = "white" + for i in range(batch_size): + image = Image.new("RGB", (frame_width, frame_height), "black") + draw = ImageDraw.Draw(image) + + # Calculate the size for this frame and ensure it's not less than 0 + current_width = max(0, shape_width + i*grow) + current_height = max(0, shape_height + i*grow) + + if shape == 'circle' or shape == 'square': + # Define the bounding box for the shape + left_up_point = (location_x - current_width // 2, location_y - current_height // 2) + right_down_point = (location_x + current_width // 2, location_y + current_height // 2) + two_points = [left_up_point, right_down_point] + + if shape == 'circle': + draw.ellipse(two_points, fill=color) + elif shape == 'square': + draw.rectangle(two_points, fill=color) + + elif shape == 'triangle': + # Define the points for the triangle + left_up_point = (location_x - current_width // 2, location_y + current_height // 2) # bottom left + right_down_point = (location_x + current_width // 2, location_y + current_height // 2) # bottom right + top_point = (location_x, location_y - current_height // 2) # top point + draw.polygon([top_point, left_up_point, right_down_point], fill=color) + + image = pil2tensor(image) + mask = image[:, :, :, 0] + out.append(mask) + outstack = torch.cat(out, dim=0) + return (outstack, 1.0 - outstack,) + +class CreateVoronoiMask: + + RETURN_TYPES = ("MASK", "MASK",) + RETURN_NAMES = ("mask", "mask_inverted",) + FUNCTION = "createvoronoi" + CATEGORY = "KJNodes/masking/generate" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "frames": ("INT", {"default": 16,"min": 2, "max": 4096, "step": 1}), + "num_points": ("INT", {"default": 15,"min": 1, "max": 4096, "step": 1}), + "line_width": ("INT", {"default": 4,"min": 1, "max": 4096, "step": 1}), + "speed": ("FLOAT", {"default": 0.5,"min": 0.0, "max": 1.0, "step": 0.01}), + "frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + }, + } + + def createvoronoi(self, frames, num_points, line_width, speed, frame_width, frame_height): + from scipy.spatial import Voronoi + # Define the number of images in the batch + batch_size = frames + out = [] + + # Calculate aspect ratio + aspect_ratio = frame_width / frame_height + + # Create start and end points for each point, considering the aspect ratio + start_points = np.random.rand(num_points, 2) + start_points[:, 0] *= aspect_ratio + + end_points = np.random.rand(num_points, 2) + end_points[:, 0] *= aspect_ratio + + for i in range(batch_size): + # Interpolate the points' positions based on the current frame + t = (i * speed) / (batch_size - 1) # normalize to [0, 1] over the frames + t = np.clip(t, 0, 1) # ensure t is in [0, 1] + points = (1 - t) * start_points + t * end_points # lerp + + # Adjust points for aspect ratio + points[:, 0] *= aspect_ratio + + vor = Voronoi(points) + + # Create a blank image with a white background + fig, ax = plt.subplots() + plt.subplots_adjust(left=0, right=1, bottom=0, top=1) + ax.set_xlim([0, aspect_ratio]); ax.set_ylim([0, 1]) # adjust x limits + ax.axis('off') + ax.margins(0, 0) + fig.set_size_inches(aspect_ratio * frame_height/100, frame_height/100) # adjust figure size + ax.fill_between([0, 1], [0, 1], color='white') + + # Plot each Voronoi ridge + for simplex in vor.ridge_vertices: + simplex = np.asarray(simplex) + if np.all(simplex >= 0): + plt.plot(vor.vertices[simplex, 0], vor.vertices[simplex, 1], 'k-', linewidth=line_width) + + fig.canvas.draw() + img = np.array(fig.canvas.renderer._renderer) + + plt.close(fig) + + pil_img = Image.fromarray(img).convert("L") + mask = torch.tensor(np.array(pil_img)) / 255.0 + + out.append(mask) + + return (torch.stack(out, dim=0), 1.0 - torch.stack(out, dim=0),) + +class GetMaskSizeAndCount: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask": ("MASK",), + }} + + RETURN_TYPES = ("MASK","INT", "INT", "INT",) + RETURN_NAMES = ("mask", "width", "height", "count",) + FUNCTION = "getsize" + CATEGORY = "KJNodes/masking" + DESCRIPTION = """ +Returns the width, height and batch size of the mask, +and passes it through unchanged. + +""" + + def getsize(self, mask): + width = mask.shape[2] + height = mask.shape[1] + count = mask.shape[0] + return {"ui": { + "text": [f"{count}x{width}x{height}"]}, + "result": (mask, width, height, count) + } + +class GrowMaskWithBlur: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "mask": ("MASK",), + "expand": ("INT", {"default": 0, "min": -MAX_RESOLUTION, "max": MAX_RESOLUTION, "step": 1}), + "incremental_expandrate": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 100.0, "step": 0.1}), + "tapered_corners": ("BOOLEAN", {"default": True}), + "flip_input": ("BOOLEAN", {"default": False}), + "blur_radius": ("FLOAT", { + "default": 0.0, + "min": 0.0, + "max": 100, + "step": 0.1 + }), + "lerp_alpha": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "decay_factor": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + "optional": { + "fill_holes": ("BOOLEAN", {"default": False}), + }, + } + + CATEGORY = "KJNodes/masking" + RETURN_TYPES = ("MASK", "MASK",) + RETURN_NAMES = ("mask", "mask_inverted",) + FUNCTION = "expand_mask" + DESCRIPTION = """ +# GrowMaskWithBlur +- mask: Input mask or mask batch +- expand: Expand or contract mask or mask batch by a given amount +- incremental_expandrate: increase expand rate by a given amount per frame +- tapered_corners: use tapered corners +- flip_input: flip input mask +- blur_radius: value higher than 0 will blur the mask +- lerp_alpha: alpha value for interpolation between frames +- decay_factor: decay value for interpolation between frames +- fill_holes: fill holes in the mask (slow)""" + + def expand_mask(self, mask, expand, tapered_corners, flip_input, blur_radius, incremental_expandrate, lerp_alpha, decay_factor, fill_holes=False): + alpha = lerp_alpha + decay = decay_factor + if flip_input: + mask = 1.0 - mask + c = 0 if tapered_corners else 1 + kernel = np.array([[c, 1, c], + [1, 1, 1], + [c, 1, c]]) + growmask = mask.reshape((-1, mask.shape[-2], mask.shape[-1])).cpu() + out = [] + previous_output = None + current_expand = expand + for m in growmask: + output = m.numpy().astype(np.float32) + for _ in range(abs(round(current_expand))): + if current_expand < 0: + output = scipy.ndimage.grey_erosion(output, footprint=kernel) + else: + output = scipy.ndimage.grey_dilation(output, footprint=kernel) + if current_expand < 0: + current_expand -= abs(incremental_expandrate) + else: + current_expand += abs(incremental_expandrate) + if fill_holes: + binary_mask = output > 0 + output = scipy.ndimage.binary_fill_holes(binary_mask) + output = output.astype(np.float32) * 255 + output = torch.from_numpy(output) + if alpha < 1.0 and previous_output is not None: + # Interpolate between the previous and current frame + output = alpha * output + (1 - alpha) * previous_output + if decay < 1.0 and previous_output is not None: + # Add the decayed previous output to the current frame + output += decay * previous_output + output = output / output.max() + previous_output = output + out.append(output) + + if blur_radius != 0: + # Convert the tensor list to PIL images, apply blur, and convert back + for idx, tensor in enumerate(out): + # Convert tensor to PIL image + pil_image = tensor2pil(tensor.cpu().detach())[0] + # Apply Gaussian blur + pil_image = pil_image.filter(ImageFilter.GaussianBlur(blur_radius)) + # Convert back to tensor + out[idx] = pil2tensor(pil_image) + blurred = torch.cat(out, dim=0) + return (blurred, 1.0 - blurred) + else: + return (torch.stack(out, dim=0), 1.0 - torch.stack(out, dim=0),) + +class MaskBatchMulti: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "inputcount": ("INT", {"default": 2, "min": 2, "max": 1000, "step": 1}), + "mask_1": ("MASK", ), + "mask_2": ("MASK", ), + }, + } + + RETURN_TYPES = ("MASK",) + RETURN_NAMES = ("masks",) + FUNCTION = "combine" + CATEGORY = "KJNodes/masking" + DESCRIPTION = """ +Creates an image batch from multiple masks. +You can set how many inputs the node has, +with the **inputcount** and clicking update. +""" + + def combine(self, inputcount, **kwargs): + mask = kwargs["mask_1"] + for c in range(1, inputcount): + new_mask = kwargs[f"mask_{c + 1}"] + if mask.shape[1:] != new_mask.shape[1:]: + new_mask = F.interpolate(new_mask.unsqueeze(1), size=(mask.shape[1], mask.shape[2]), mode="bicubic").squeeze(1) + mask = torch.cat((mask, new_mask), dim=0) + return (mask,) + +class OffsetMask: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "mask": ("MASK",), + "x": ("INT", { "default": 0, "min": -4096, "max": MAX_RESOLUTION, "step": 1, "display": "number" }), + "y": ("INT", { "default": 0, "min": -4096, "max": MAX_RESOLUTION, "step": 1, "display": "number" }), + "angle": ("INT", { "default": 0, "min": -360, "max": 360, "step": 1, "display": "number" }), + "duplication_factor": ("INT", { "default": 1, "min": 1, "max": 1000, "step": 1, "display": "number" }), + "roll": ("BOOLEAN", { "default": False }), + "incremental": ("BOOLEAN", { "default": False }), + "padding_mode": ( + [ + 'empty', + 'border', + 'reflection', + + ], { + "default": 'empty' + }), + } + } + + RETURN_TYPES = ("MASK",) + RETURN_NAMES = ("mask",) + FUNCTION = "offset" + CATEGORY = "KJNodes/masking" + DESCRIPTION = """ +Offsets the mask by the specified amount. + - mask: Input mask or mask batch + - x: Horizontal offset + - y: Vertical offset + - angle: Angle in degrees + - roll: roll edge wrapping + - duplication_factor: Number of times to duplicate the mask to form a batch + - border padding_mode: Padding mode for the mask +""" + + def offset(self, mask, x, y, angle, roll=False, incremental=False, duplication_factor=1, padding_mode="empty"): + # Create duplicates of the mask batch + mask = mask.repeat(duplication_factor, 1, 1).clone() + + batch_size, height, width = mask.shape + + if angle != 0 and incremental: + for i in range(batch_size): + rotation_angle = angle * (i+1) + mask[i] = TF.rotate(mask[i].unsqueeze(0), rotation_angle).squeeze(0) + elif angle > 0: + for i in range(batch_size): + mask[i] = TF.rotate(mask[i].unsqueeze(0), angle).squeeze(0) + + if roll: + if incremental: + for i in range(batch_size): + shift_x = min(x*(i+1), width-1) + shift_y = min(y*(i+1), height-1) + if shift_x != 0: + mask[i] = torch.roll(mask[i], shifts=shift_x, dims=1) + if shift_y != 0: + mask[i] = torch.roll(mask[i], shifts=shift_y, dims=0) + else: + shift_x = min(x, width-1) + shift_y = min(y, height-1) + if shift_x != 0: + mask = torch.roll(mask, shifts=shift_x, dims=2) + if shift_y != 0: + mask = torch.roll(mask, shifts=shift_y, dims=1) + else: + + for i in range(batch_size): + if incremental: + temp_x = min(x * (i+1), width-1) + temp_y = min(y * (i+1), height-1) + else: + temp_x = min(x, width-1) + temp_y = min(y, height-1) + if temp_x > 0: + if padding_mode == 'empty': + mask[i] = torch.cat([torch.zeros((height, temp_x)), mask[i, :, :-temp_x]], dim=1) + elif padding_mode in ['replicate', 'reflect']: + mask[i] = F.pad(mask[i, :, :-temp_x], (0, temp_x), mode=padding_mode) + elif temp_x < 0: + if padding_mode == 'empty': + mask[i] = torch.cat([mask[i, :, :temp_x], torch.zeros((height, -temp_x))], dim=1) + elif padding_mode in ['replicate', 'reflect']: + mask[i] = F.pad(mask[i, :, -temp_x:], (temp_x, 0), mode=padding_mode) + + if temp_y > 0: + if padding_mode == 'empty': + mask[i] = torch.cat([torch.zeros((temp_y, width)), mask[i, :-temp_y, :]], dim=0) + elif padding_mode in ['replicate', 'reflect']: + mask[i] = F.pad(mask[i, :-temp_y, :], (0, temp_y), mode=padding_mode) + elif temp_y < 0: + if padding_mode == 'empty': + mask[i] = torch.cat([mask[i, :temp_y, :], torch.zeros((-temp_y, width))], dim=0) + elif padding_mode in ['replicate', 'reflect']: + mask[i] = F.pad(mask[i, -temp_y:, :], (temp_y, 0), mode=padding_mode) + + return mask, + +class RoundMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask": ("MASK",), + }} + + RETURN_TYPES = ("MASK",) + FUNCTION = "round" + CATEGORY = "KJNodes/masking" + DESCRIPTION = """ +Rounds the mask or batch of masks to a binary mask. +RoundMask example + +""" + + def round(self, mask): + mask = mask.round() + return (mask,) + +class ResizeMask: + upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"] + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "mask": ("MASK",), + "width": ("INT", { "default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 1, "display": "number" }), + "height": ("INT", { "default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 1, "display": "number" }), + "keep_proportions": ("BOOLEAN", { "default": False }), + "upscale_method": (s.upscale_methods,), + "crop": (["disabled","center"],), + } + } + + RETURN_TYPES = ("MASK", "INT", "INT",) + RETURN_NAMES = ("mask", "width", "height",) + FUNCTION = "resize" + CATEGORY = "KJNodes/masking" + DESCRIPTION = """ +Resizes the mask or batch of masks to the specified width and height. +""" + + def resize(self, mask, width, height, keep_proportions, upscale_method,crop): + if keep_proportions: + _, oh, ow = mask.shape + width = ow if width == 0 else width + height = oh if height == 0 else height + ratio = min(width / ow, height / oh) + width = round(ow*ratio) + height = round(oh*ratio) + + if upscale_method == "lanczos": + out_mask = common_upscale(mask.unsqueeze(1).repeat(1, 3, 1, 1), width, height, upscale_method, crop=crop).movedim(1,-1)[:, :, :, 0] + else: + out_mask = common_upscale(mask.unsqueeze(1), width, height, upscale_method, crop=crop).squeeze(1) + + return(out_mask, out_mask.shape[2], out_mask.shape[1],) + +class RemapMaskRange: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "mask": ("MASK",), + "min": ("FLOAT", {"default": 0.0,"min": -10.0, "max": 1.0, "step": 0.01}), + "max": ("FLOAT", {"default": 1.0,"min": 0.0, "max": 10.0, "step": 0.01}), + } + } + + RETURN_TYPES = ("MASK",) + RETURN_NAMES = ("mask",) + FUNCTION = "remap" + CATEGORY = "KJNodes/masking" + DESCRIPTION = """ +Sets new min and max values for the mask. +""" + + def remap(self, mask, min, max): + + # Find the maximum value in the mask + mask_max = torch.max(mask) + + # If the maximum mask value is zero, avoid division by zero by setting it to 1 + mask_max = mask_max if mask_max > 0 else 1 + + # Scale the mask values to the new range defined by min and max + # The highest pixel value in the mask will be scaled to max + scaled_mask = (mask / mask_max) * (max - min) + min + + # Clamp the values to ensure they are within [0.0, 1.0] + scaled_mask = torch.clamp(scaled_mask, min=0.0, max=1.0) + + return (scaled_mask, ) + + +def get_mask_polygon(self, mask_np): + import cv2 + """Helper function to get polygon points from mask""" + # Find contours + contours, _ = cv2.findContours(mask_np, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) + + if not contours: + return None + + # Get the largest contour + largest_contour = max(contours, key=cv2.contourArea) + + # Approximate polygon + epsilon = 0.02 * cv2.arcLength(largest_contour, True) + polygon = cv2.approxPolyDP(largest_contour, epsilon, True) + + return polygon.squeeze() + +import cv2 +class SeparateMasks: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "mask": ("MASK", ), + "size_threshold_width" : ("INT", {"default": 256, "min": 0.0, "max": 4096, "step": 1}), + "size_threshold_height" : ("INT", {"default": 256, "min": 0.0, "max": 4096, "step": 1}), + "mode": (["convex_polygons", "area", "box"],), + "max_poly_points": ("INT", {"default": 8, "min": 3, "max": 32, "step": 1}), + + }, + } + + RETURN_TYPES = ("MASK",) + RETURN_NAMES = ("mask",) + FUNCTION = "separate" + CATEGORY = "KJNodes/masking" + OUTPUT_NODE = True + DESCRIPTION = "Separates a mask into multiple masks based on the size of the connected components." + + def polygon_to_mask(self, polygon, shape): + mask = np.zeros((shape[0], shape[1]), dtype=np.uint8) # Fixed shape handling + + if len(polygon.shape) == 2: # Check if polygon points are valid + polygon = polygon.astype(np.int32) + cv2.fillPoly(mask, [polygon], 1) + return mask + + def get_mask_polygon(self, mask_np, max_points): + contours, _ = cv2.findContours(mask_np, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) + if not contours: + return None + + largest_contour = max(contours, key=cv2.contourArea) + hull = cv2.convexHull(largest_contour) + + # Initialize with smaller epsilon for more points + perimeter = cv2.arcLength(hull, True) + epsilon = perimeter * 0.01 # Start smaller + + min_eps = perimeter * 0.001 # Much smaller minimum + max_eps = perimeter * 0.2 # Smaller maximum + + best_approx = None + best_diff = float('inf') + max_iterations = 20 + + #print(f"Target points: {max_points}, Perimeter: {perimeter}") + + for i in range(max_iterations): + curr_eps = (min_eps + max_eps) / 2 + approx = cv2.approxPolyDP(hull, curr_eps, True) + points_diff = len(approx) - max_points + + #print(f"Iteration {i}: points={len(approx)}, eps={curr_eps:.4f}") + + if abs(points_diff) < best_diff: + best_approx = approx + best_diff = abs(points_diff) + + if len(approx) > max_points: + min_eps = curr_eps * 1.1 # More gradual adjustment + elif len(approx) < max_points: + max_eps = curr_eps * 0.9 # More gradual adjustment + else: + return approx.squeeze() + + if abs(max_eps - min_eps) < perimeter * 0.0001: # Relative tolerance + break + + # If we didn't find exact match, return best approximation + return best_approx.squeeze() if best_approx is not None else hull.squeeze() + + def separate(self, mask: torch.Tensor, size_threshold_width: int, size_threshold_height: int, max_poly_points: int, mode: str): + from scipy.ndimage import label, center_of_mass + import numpy as np + + B, H, W = mask.shape + separated = [] + + mask = mask.round() + + for b in range(B): + mask_np = mask[b].cpu().numpy().astype(np.uint8) + structure = np.ones((3, 3), dtype=np.int8) + labeled, ncomponents = label(mask_np, structure=structure) + pbar = ProgressBar(ncomponents) + + for component in range(1, ncomponents + 1): + component_mask_np = (labeled == component).astype(np.uint8) + + rows = np.any(component_mask_np, axis=1) + cols = np.any(component_mask_np, axis=0) + y_min, y_max = np.where(rows)[0][[0, -1]] + x_min, x_max = np.where(cols)[0][[0, -1]] + + width = x_max - x_min + 1 + height = y_max - y_min + 1 + centroid_x = (x_min + x_max) / 2 # Calculate x centroid + print(f"Component {component}: width={width}, height={height}, x_pos={centroid_x}") + + if width >= size_threshold_width and height >= size_threshold_height: + if mode == "convex_polygons": + polygon = self.get_mask_polygon(component_mask_np, max_poly_points) + if polygon is not None: + poly_mask = self.polygon_to_mask(polygon, (H, W)) + poly_mask = torch.tensor(poly_mask, device=mask.device) + separated.append((centroid_x, poly_mask)) + elif mode == "box": + # Create bounding box mask + box_mask = np.zeros((H, W), dtype=np.uint8) + box_mask[y_min:y_max+1, x_min:x_max+1] = 1 + box_mask = torch.tensor(box_mask, device=mask.device) + separated.append((centroid_x, box_mask)) + else: + area_mask = torch.tensor(component_mask_np, device=mask.device) + separated.append((centroid_x, area_mask)) + pbar.update(1) + + if len(separated) > 0: + # Sort by x position and extract only the masks + separated.sort(key=lambda x: x[0]) + separated = [x[1] for x in separated] + out_masks = torch.stack(separated, dim=0) + return out_masks, + else: + return torch.empty((1, 64, 64), device=mask.device), + \ No newline at end of file diff --git a/custom_nodes/comfyui-kjnodes/nodes/model_optimization_nodes.py b/custom_nodes/comfyui-kjnodes/nodes/model_optimization_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..c1f2e88d276d177a6db75dc53cff14e5b63b4542 --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/nodes/model_optimization_nodes.py @@ -0,0 +1,1885 @@ +from comfy.ldm.modules import attention as comfy_attention +import logging +import comfy.model_patcher +import comfy.utils +import comfy.sd +import torch +import folder_paths +import comfy.model_management as mm +from comfy.cli_args import args +from typing import Optional, Tuple + + +sageattn_modes = ["disabled", "auto", "sageattn_qk_int8_pv_fp16_cuda", "sageattn_qk_int8_pv_fp16_triton", "sageattn_qk_int8_pv_fp8_cuda", "sageattn_qk_int8_pv_fp8_cuda++"] + +_initialized = False +_original_functions = {} + +if not _initialized: + _original_functions["orig_attention"] = comfy_attention.optimized_attention + _original_functions["original_patch_model"] = comfy.model_patcher.ModelPatcher.patch_model + _original_functions["original_load_lora_for_models"] = comfy.sd.load_lora_for_models + try: + _original_functions["original_qwen_forward"] = comfy.ldm.qwen_image.model.Attention.forward + except: + pass + _initialized = True + +class BaseLoaderKJ: + original_linear = None + cublas_patched = False + + @torch.compiler.disable() + def _patch_modules(self, patch_cublaslinear, sage_attention): + try: + from comfy.ldm.qwen_image.model import apply_rotary_emb + def qwen_sage_forward( + self, + hidden_states: torch.FloatTensor, # Image stream + encoder_hidden_states: torch.FloatTensor = None, # Text stream + encoder_hidden_states_mask: torch.FloatTensor = None, + attention_mask: Optional[torch.FloatTensor] = None, + image_rotary_emb: Optional[torch.Tensor] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + seq_txt = encoder_hidden_states.shape[1] + + img_query = self.to_q(hidden_states).unflatten(-1, (self.heads, -1)) + img_key = self.to_k(hidden_states).unflatten(-1, (self.heads, -1)) + img_value = self.to_v(hidden_states).unflatten(-1, (self.heads, -1)) + + txt_query = self.add_q_proj(encoder_hidden_states).unflatten(-1, (self.heads, -1)) + txt_key = self.add_k_proj(encoder_hidden_states).unflatten(-1, (self.heads, -1)) + txt_value = self.add_v_proj(encoder_hidden_states).unflatten(-1, (self.heads, -1)) + + img_query = self.norm_q(img_query) + img_key = self.norm_k(img_key) + txt_query = self.norm_added_q(txt_query) + txt_key = self.norm_added_k(txt_key) + + joint_query = torch.cat([txt_query, img_query], dim=1) + joint_key = torch.cat([txt_key, img_key], dim=1) + joint_value = torch.cat([txt_value, img_value], dim=1) + + joint_query = apply_rotary_emb(joint_query, image_rotary_emb) + joint_key = apply_rotary_emb(joint_key, image_rotary_emb) + + joint_query = joint_query.flatten(start_dim=2) + joint_key = joint_key.flatten(start_dim=2) + joint_value = joint_value.flatten(start_dim=2) + + joint_hidden_states = attention_sage(joint_query, joint_key, joint_value, self.heads, attention_mask) + + txt_attn_output = joint_hidden_states[:, :seq_txt, :] + img_attn_output = joint_hidden_states[:, seq_txt:, :] + + img_attn_output = self.to_out[0](img_attn_output) + img_attn_output = self.to_out[1](img_attn_output) + txt_attn_output = self.to_add_out(txt_attn_output) + + return img_attn_output, txt_attn_output + except: + print("Failed to patch QwenImage attention, Comfy not updated, skipping") + + from comfy.ops import disable_weight_init, CastWeightBiasOp, cast_bias_weight + + if sage_attention != "disabled": + print("Patching comfy attention to use sageattn") + from sageattention import sageattn + def set_sage_func(sage_attention): + if sage_attention == "auto": + def func(q, k, v, is_causal=False, attn_mask=None, tensor_layout="NHD"): + return sageattn(q, k, v, is_causal=is_causal, attn_mask=attn_mask, tensor_layout=tensor_layout) + return func + elif sage_attention == "sageattn_qk_int8_pv_fp16_cuda": + from sageattention import sageattn_qk_int8_pv_fp16_cuda + def func(q, k, v, is_causal=False, attn_mask=None, tensor_layout="NHD"): + return sageattn_qk_int8_pv_fp16_cuda(q, k, v, is_causal=is_causal, attn_mask=attn_mask, pv_accum_dtype="fp32", tensor_layout=tensor_layout) + return func + elif sage_attention == "sageattn_qk_int8_pv_fp16_triton": + from sageattention import sageattn_qk_int8_pv_fp16_triton + def func(q, k, v, is_causal=False, attn_mask=None, tensor_layout="NHD"): + return sageattn_qk_int8_pv_fp16_triton(q, k, v, is_causal=is_causal, attn_mask=attn_mask, tensor_layout=tensor_layout) + return func + elif sage_attention == "sageattn_qk_int8_pv_fp8_cuda": + from sageattention import sageattn_qk_int8_pv_fp8_cuda + def func(q, k, v, is_causal=False, attn_mask=None, tensor_layout="NHD"): + return sageattn_qk_int8_pv_fp8_cuda(q, k, v, is_causal=is_causal, attn_mask=attn_mask, pv_accum_dtype="fp32+fp32", tensor_layout=tensor_layout) + return func + elif sage_attention == "sageattn_qk_int8_pv_fp8_cuda++": + from sageattention import sageattn_qk_int8_pv_fp8_cuda + def func(q, k, v, is_causal=False, attn_mask=None, tensor_layout="NHD"): + return sageattn_qk_int8_pv_fp8_cuda(q, k, v, is_causal=is_causal, attn_mask=attn_mask, pv_accum_dtype="fp32+fp16", tensor_layout=tensor_layout) + return func + + sage_func = set_sage_func(sage_attention) + + @torch.compiler.disable() + def attention_sage(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False, skip_output_reshape=False): + if skip_reshape: + b, _, _, dim_head = q.shape + tensor_layout="HND" + else: + b, _, dim_head = q.shape + dim_head //= heads + q, k, v = map( + lambda t: t.view(b, -1, heads, dim_head), + (q, k, v), + ) + tensor_layout="NHD" + if mask is not None: + # add a batch dimension if there isn't already one + if mask.ndim == 2: + mask = mask.unsqueeze(0) + # add a heads dimension if there isn't already one + if mask.ndim == 3: + mask = mask.unsqueeze(1) + out = sage_func(q, k, v, attn_mask=mask, is_causal=False, tensor_layout=tensor_layout) + if tensor_layout == "HND": + if not skip_output_reshape: + out = ( + out.transpose(1, 2).reshape(b, -1, heads * dim_head) + ) + else: + if skip_output_reshape: + out = out.transpose(1, 2) + else: + out = out.reshape(b, -1, heads * dim_head) + return out + + comfy_attention.optimized_attention = attention_sage + comfy.ldm.hunyuan_video.model.optimized_attention = attention_sage + comfy.ldm.flux.math.optimized_attention = attention_sage + comfy.ldm.genmo.joint_model.asymm_models_joint.optimized_attention = attention_sage + comfy.ldm.cosmos.blocks.optimized_attention = attention_sage + comfy.ldm.wan.model.optimized_attention = attention_sage + try: + comfy.ldm.qwen_image.model.Attention.forward = qwen_sage_forward + except: + pass + + else: + print("Restoring initial comfy attention") + comfy_attention.optimized_attention = _original_functions.get("orig_attention") + comfy.ldm.hunyuan_video.model.optimized_attention = _original_functions.get("orig_attention") + comfy.ldm.flux.math.optimized_attention = _original_functions.get("orig_attention") + comfy.ldm.genmo.joint_model.asymm_models_joint.optimized_attention = _original_functions.get("orig_attention") + comfy.ldm.cosmos.blocks.optimized_attention = _original_functions.get("orig_attention") + comfy.ldm.wan.model.optimized_attention = _original_functions.get("orig_attention") + try: + comfy.ldm.qwen_image.model.Attention.forward = _original_functions.get("original_qwen_forward") + except: + pass + + if patch_cublaslinear: + if not BaseLoaderKJ.cublas_patched: + BaseLoaderKJ.original_linear = disable_weight_init.Linear + try: + from cublas_ops import CublasLinear + except ImportError: + raise Exception("Can't import 'torch-cublas-hgemm', install it from here https://github.com/aredden/torch-cublas-hgemm") + + class PatchedLinear(CublasLinear, CastWeightBiasOp): + def reset_parameters(self): + pass + + def forward_comfy_cast_weights(self, input): + weight, bias = cast_bias_weight(self, input) + return torch.nn.functional.linear(input, weight, bias) + + def forward(self, *args, **kwargs): + if self.comfy_cast_weights: + return self.forward_comfy_cast_weights(*args, **kwargs) + else: + return super().forward(*args, **kwargs) + + disable_weight_init.Linear = PatchedLinear + BaseLoaderKJ.cublas_patched = True + else: + if BaseLoaderKJ.cublas_patched: + disable_weight_init.Linear = BaseLoaderKJ.original_linear + BaseLoaderKJ.cublas_patched = False + + +from comfy.patcher_extension import CallbacksMP +class PathchSageAttentionKJ(BaseLoaderKJ): + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "sage_attention": (sageattn_modes, {"default": False, "tooltip": "Global patch comfy attention to use sageattn, once patched to revert back to normal you would need to run this node again with disabled option."}), + }} + + RETURN_TYPES = ("MODEL", ) + FUNCTION = "patch" + DESCRIPTION = "Experimental node for patching attention mode. This doesn't use the model patching system and thus can't be disabled without running the node again with 'disabled' option." + EXPERIMENTAL = True + CATEGORY = "KJNodes/experimental" + + def patch(self, model, sage_attention): + model_clone = model.clone() + @torch.compiler.disable() + def patch_attention_enable(model): + self._patch_modules(False, sage_attention) + @torch.compiler.disable() + def patch_attention_disable(model): + self._patch_modules(False, "disabled") + + model_clone.add_callback(CallbacksMP.ON_PRE_RUN, patch_attention_enable) + model_clone.add_callback(CallbacksMP.ON_CLEANUP, patch_attention_disable) + + return model_clone, + +class CheckpointLoaderKJ(BaseLoaderKJ): + @classmethod + def INPUT_TYPES(s): + return {"required": { + "ckpt_name": (folder_paths.get_filename_list("checkpoints"), {"tooltip": "The name of the checkpoint (model) to load."}), + "weight_dtype": (["default", "fp8_e4m3fn", "fp8_e4m3fn_fast", "fp8_e5m2", "fp16", "bf16", "fp32"],), + "compute_dtype": (["default", "fp16", "bf16", "fp32"], {"default": "default", "tooltip": "The compute dtype to use for the model."}), + "patch_cublaslinear": ("BOOLEAN", {"default": False, "tooltip": "Enable or disable the patching, won't take effect on already loaded models!"}), + "sage_attention": (sageattn_modes, {"default": False, "tooltip": "Patch comfy attention to use sageattn."}), + "enable_fp16_accumulation": ("BOOLEAN", {"default": False, "tooltip": "Enable torch.backends.cuda.matmul.allow_fp16_accumulation, requires pytorch 2.7.0 nightly."}), + }} + + RETURN_TYPES = ("MODEL", "CLIP", "VAE") + FUNCTION = "patch" + DESCRIPTION = "Experimental node for patching torch.nn.Linear with CublasLinear." + EXPERIMENTAL = True + CATEGORY = "KJNodes/experimental" + + def patch(self, ckpt_name, weight_dtype, compute_dtype, patch_cublaslinear, sage_attention, enable_fp16_accumulation): + DTYPE_MAP = { + "fp8_e4m3fn": torch.float8_e4m3fn, + "fp8_e5m2": torch.float8_e5m2, + "fp16": torch.float16, + "bf16": torch.bfloat16, + "fp32": torch.float32 + } + model_options = {} + if dtype := DTYPE_MAP.get(weight_dtype): + model_options["dtype"] = dtype + print(f"Setting {ckpt_name} weight dtype to {dtype}") + + if weight_dtype == "fp8_e4m3fn_fast": + model_options["dtype"] = torch.float8_e4m3fn + model_options["fp8_optimizations"] = True + + ckpt_path = folder_paths.get_full_path_or_raise("checkpoints", ckpt_name) + sd, metadata = comfy.utils.load_torch_file(ckpt_path, return_metadata=True) + + model, clip, vae = self.load_state_dict_guess_config( + sd, + output_vae=True, + output_clip=True, + embedding_directory=folder_paths.get_folder_paths("embeddings"), + metadata=metadata, + model_options=model_options) + + if dtype := DTYPE_MAP.get(compute_dtype): + model.set_model_compute_dtype(dtype) + model.force_cast_weights = False + print(f"Setting {ckpt_name} compute dtype to {dtype}") + + if enable_fp16_accumulation: + if hasattr(torch.backends.cuda.matmul, "allow_fp16_accumulation"): + torch.backends.cuda.matmul.allow_fp16_accumulation = True + else: + raise RuntimeError("Failed to set fp16 accumulation, this requires pytorch 2.7.0 nightly currently") + else: + if hasattr(torch.backends.cuda.matmul, "allow_fp16_accumulation"): + torch.backends.cuda.matmul.allow_fp16_accumulation = False + + def patch_attention(model): + self._patch_modules(patch_cublaslinear, sage_attention) + model.add_callback(CallbacksMP.ON_PRE_RUN,patch_attention) + return model, clip, vae + + def load_state_dict_guess_config(self, sd, output_vae=True, output_clip=True, embedding_directory=None, output_model=True, model_options={}, te_model_options={}, metadata=None): + from comfy.sd import load_diffusion_model_state_dict, model_detection, VAE, CLIP + clip = None + vae = None + model = None + model_patcher = None + + diffusion_model_prefix = model_detection.unet_prefix_from_state_dict(sd) + parameters = comfy.utils.calculate_parameters(sd, diffusion_model_prefix) + weight_dtype = comfy.utils.weight_dtype(sd, diffusion_model_prefix) + load_device = mm.get_torch_device() + + model_config = model_detection.model_config_from_unet(sd, diffusion_model_prefix, metadata=metadata) + if model_config is None: + logging.warning("Warning, This is not a checkpoint file, trying to load it as a diffusion model only.") + diffusion_model = load_diffusion_model_state_dict(sd, model_options={}) + if diffusion_model is None: + return None + return (diffusion_model, None, VAE(sd={}), None) # The VAE object is there to throw an exception if it's actually used' + + + unet_weight_dtype = list(model_config.supported_inference_dtypes) + if model_config.scaled_fp8 is not None: + weight_dtype = None + + model_config.custom_operations = model_options.get("custom_operations", None) + unet_dtype = model_options.get("dtype", model_options.get("weight_dtype", None)) + + if unet_dtype is None: + unet_dtype = mm.unet_dtype(model_params=parameters, supported_dtypes=unet_weight_dtype, weight_dtype=weight_dtype) + + manual_cast_dtype = mm.unet_manual_cast(unet_dtype, load_device, model_config.supported_inference_dtypes) + model_config.set_inference_dtype(unet_dtype, manual_cast_dtype) + + if output_model: + inital_load_device = mm.unet_inital_load_device(parameters, unet_dtype) + model = model_config.get_model(sd, diffusion_model_prefix, device=inital_load_device) + model.load_model_weights(sd, diffusion_model_prefix) + + if output_vae: + vae_sd = comfy.utils.state_dict_prefix_replace(sd, {k: "" for k in model_config.vae_key_prefix}, filter_keys=True) + vae_sd = model_config.process_vae_state_dict(vae_sd) + vae = VAE(sd=vae_sd, metadata=metadata) + + if output_clip: + clip_target = model_config.clip_target(state_dict=sd) + if clip_target is not None: + clip_sd = model_config.process_clip_state_dict(sd) + if len(clip_sd) > 0: + parameters = comfy.utils.calculate_parameters(clip_sd) + clip = CLIP(clip_target, embedding_directory=embedding_directory, tokenizer_data=clip_sd, parameters=parameters, model_options=te_model_options) + m, u = clip.load_sd(clip_sd, full_model=True) + if len(m) > 0: + m_filter = list(filter(lambda a: ".logit_scale" not in a and ".transformer.text_projection.weight" not in a, m)) + if len(m_filter) > 0: + logging.warning("clip missing: {}".format(m)) + else: + logging.debug("clip missing: {}".format(m)) + + if len(u) > 0: + logging.debug("clip unexpected {}:".format(u)) + else: + logging.warning("no CLIP/text encoder weights in checkpoint, the text encoder model will not be loaded.") + + left_over = sd.keys() + if len(left_over) > 0: + logging.debug("left over keys: {}".format(left_over)) + + if output_model: + model_patcher = comfy.model_patcher.ModelPatcher(model, load_device=load_device, offload_device=mm.unet_offload_device()) + if inital_load_device != torch.device("cpu"): + logging.info("loaded diffusion model directly to GPU") + mm.load_models_gpu([model_patcher], force_full_load=True) + + return (model_patcher, clip, vae) + +class DiffusionModelSelector(): + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model_name": (folder_paths.get_filename_list("diffusion_models"), {"tooltip": "The name of the checkpoint (model) to load."}), + }, + } + + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("model_path",) + FUNCTION = "get_path" + DESCRIPTION = "Returns the path to the model as a string." + EXPERIMENTAL = True + CATEGORY = "KJNodes/experimental" + + def get_path(self, model_name): + model_path = folder_paths.get_full_path_or_raise("diffusion_models", model_name) + return (model_path,) + +class DiffusionModelLoaderKJ(BaseLoaderKJ): + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model_name": (folder_paths.get_filename_list("diffusion_models"), {"tooltip": "The name of the checkpoint (model) to load."}), + "weight_dtype": (["default", "fp8_e4m3fn", "fp8_e4m3fn_fast", "fp8_e5m2", "fp16", "bf16", "fp32"],), + "compute_dtype": (["default", "fp16", "bf16", "fp32"], {"default": "default", "tooltip": "The compute dtype to use for the model."}), + "patch_cublaslinear": ("BOOLEAN", {"default": False, "tooltip": "Enable or disable the patching, won't take effect on already loaded models!"}), + "sage_attention": (sageattn_modes, {"default": False, "tooltip": "Patch comfy attention to use sageattn."}), + "enable_fp16_accumulation": ("BOOLEAN", {"default": False, "tooltip": "Enable torch.backends.cuda.matmul.allow_fp16_accumulation, requires pytorch 2.7.0 nightly."}), + }, + "optional": { + "extra_state_dict": ("STRING", {"forceInput": True, "tooltip": "The full path to an additional state dict to load, this will be merged with the main state dict. Useful for example to add VACE module to a WanVideoModel. You can use DiffusionModelSelector to easily get the path."}), + } + } + + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch_and_load" + DESCRIPTION = "Node for patching torch.nn.Linear with CublasLinear." + EXPERIMENTAL = True + CATEGORY = "KJNodes/experimental" + + def patch_and_load(self, model_name, weight_dtype, compute_dtype, patch_cublaslinear, sage_attention, enable_fp16_accumulation, extra_state_dict=None): + DTYPE_MAP = { + "fp8_e4m3fn": torch.float8_e4m3fn, + "fp8_e5m2": torch.float8_e5m2, + "fp16": torch.float16, + "bf16": torch.bfloat16, + "fp32": torch.float32 + } + model_options = {} + if dtype := DTYPE_MAP.get(weight_dtype): + model_options["dtype"] = dtype + print(f"Setting {model_name} weight dtype to {dtype}") + + if weight_dtype == "fp8_e4m3fn_fast": + model_options["dtype"] = torch.float8_e4m3fn + model_options["fp8_optimizations"] = True + + if enable_fp16_accumulation: + if hasattr(torch.backends.cuda.matmul, "allow_fp16_accumulation"): + torch.backends.cuda.matmul.allow_fp16_accumulation = True + else: + raise RuntimeError("Failed to set fp16 accumulation, this requires pytorch 2.7.0 nightly currently") + else: + if hasattr(torch.backends.cuda.matmul, "allow_fp16_accumulation"): + torch.backends.cuda.matmul.allow_fp16_accumulation = False + + unet_path = folder_paths.get_full_path_or_raise("diffusion_models", model_name) + + sd = comfy.utils.load_torch_file(unet_path) + if extra_state_dict is not None: + extra_sd = comfy.utils.load_torch_file(extra_state_dict) + sd.update(extra_sd) + del extra_sd + + model = comfy.sd.load_diffusion_model_state_dict(sd, model_options=model_options) + if dtype := DTYPE_MAP.get(compute_dtype): + model.set_model_compute_dtype(dtype) + model.force_cast_weights = False + print(f"Setting {model_name} compute dtype to {dtype}") + + def patch_attention(model): + self._patch_modules(patch_cublaslinear, sage_attention) + model.add_callback(CallbacksMP.ON_PRE_RUN,patch_attention) + + return (model,) + +class ModelPatchTorchSettings: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "enable_fp16_accumulation": ("BOOLEAN", {"default": False, "tooltip": "Enable torch.backends.cuda.matmul.allow_fp16_accumulation, requires pytorch 2.7.0 nightly."}), + }} + + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + DESCRIPTION = "Adds callbacks to model to set torch settings before and after running the model." + EXPERIMENTAL = True + CATEGORY = "KJNodes/experimental" + + def patch(self, model, enable_fp16_accumulation): + model_clone = model.clone() + + def patch_enable_fp16_accum(model): + print("Patching torch settings: torch.backends.cuda.matmul.allow_fp16_accumulation = True") + torch.backends.cuda.matmul.allow_fp16_accumulation = True + def patch_disable_fp16_accum(model): + print("Patching torch settings: torch.backends.cuda.matmul.allow_fp16_accumulation = False") + torch.backends.cuda.matmul.allow_fp16_accumulation = False + + if enable_fp16_accumulation: + if hasattr(torch.backends.cuda.matmul, "allow_fp16_accumulation"): + model_clone.add_callback(CallbacksMP.ON_PRE_RUN, patch_enable_fp16_accum) + model_clone.add_callback(CallbacksMP.ON_CLEANUP, patch_disable_fp16_accum) + else: + raise RuntimeError("Failed to set fp16 accumulation, this requires pytorch 2.7.0 nightly currently") + else: + if hasattr(torch.backends.cuda.matmul, "allow_fp16_accumulation"): + model_clone.add_callback(CallbacksMP.ON_PRE_RUN, patch_disable_fp16_accum) + else: + raise RuntimeError("Failed to set fp16 accumulation, this requires pytorch 2.7.0 nightly currently") + + return (model_clone,) + +def patched_patch_model(self, device_to=None, lowvram_model_memory=0, load_weights=True, force_patch_weights=False): + with self.use_ejected(): + + device_to = mm.get_torch_device() + + full_load_override = getattr(self.model, "full_load_override", "auto") + if full_load_override in ["enabled", "disabled"]: + full_load = full_load_override == "enabled" + else: + full_load = lowvram_model_memory == 0 + + self.load(device_to, lowvram_model_memory=lowvram_model_memory, force_patch_weights=force_patch_weights, full_load=full_load) + + for k in self.object_patches: + old = comfy.utils.set_attr(self.model, k, self.object_patches[k]) + if k not in self.object_patches_backup: + self.object_patches_backup[k] = old + + self.inject_model() + return self.model + +def patched_load_lora_for_models(model, clip, lora, strength_model, strength_clip): + + patch_keys = list(model.object_patches_backup.keys()) + for k in patch_keys: + #print("backing up object patch: ", k) + comfy.utils.set_attr(model.model, k, model.object_patches_backup[k]) + + key_map = {} + if model is not None: + key_map = comfy.lora.model_lora_keys_unet(model.model, key_map) + if clip is not None: + key_map = comfy.lora.model_lora_keys_clip(clip.cond_stage_model, key_map) + + lora = comfy.lora_convert.convert_lora(lora) + loaded = comfy.lora.load_lora(lora, key_map) + #print(temp_object_patches_backup) + + if model is not None: + new_modelpatcher = model.clone() + k = new_modelpatcher.add_patches(loaded, strength_model) + else: + k = () + new_modelpatcher = None + + if clip is not None: + new_clip = clip.clone() + k1 = new_clip.add_patches(loaded, strength_clip) + else: + k1 = () + new_clip = None + k = set(k) + k1 = set(k1) + for x in loaded: + if (x not in k) and (x not in k1): + print("NOT LOADED {}".format(x)) + + if patch_keys: + if hasattr(model.model, "compile_settings"): + compile_settings = getattr(model.model, "compile_settings") + print("compile_settings: ", compile_settings) + for k in patch_keys: + if "diffusion_model." in k: + # Remove the prefix to get the attribute path + key = k.replace('diffusion_model.', '') + attributes = key.split('.') + # Start with the diffusion_model object + block = model.get_model_object("diffusion_model") + # Navigate through the attributes to get to the block + for attr in attributes: + if attr.isdigit(): + block = block[int(attr)] + else: + block = getattr(block, attr) + # Compile the block + compiled_block = torch.compile(block, mode=compile_settings["mode"], dynamic=compile_settings["dynamic"], fullgraph=compile_settings["fullgraph"], backend=compile_settings["backend"]) + # Add the compiled block back as an object patch + model.add_object_patch(k, compiled_block) + return (new_modelpatcher, new_clip) + +class PatchModelPatcherOrder: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "patch_order": (["object_patch_first", "weight_patch_first"], {"default": "weight_patch_first", "tooltip": "Patch the comfy patch_model function to load weight patches (LoRAs) before compiling the model"}), + "full_load": (["enabled", "disabled", "auto"], {"default": "auto", "tooltip": "Disabling may help with memory issues when loading large models, when changing this you should probably force model reload to avoid issues!"}), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + CATEGORY = "KJNodes/experimental" + DESCRIPTION = "Patch the comfy patch_model function patching order, useful for torch.compile (used as object_patch) as it should come last if you want to use LoRAs with compile" + EXPERIMENTAL = True + + def patch(self, model, patch_order, full_load): + comfy.model_patcher.ModelPatcher.temp_object_patches_backup = {} + setattr(model.model, "full_load_override", full_load) + if patch_order == "weight_patch_first": + comfy.model_patcher.ModelPatcher.patch_model = patched_patch_model + comfy.sd.load_lora_for_models = patched_load_lora_for_models + else: + comfy.model_patcher.ModelPatcher.patch_model = _original_functions.get("original_patch_model") + comfy.sd.load_lora_for_models = _original_functions.get("original_load_lora_for_models") + + return model, + +class TorchCompileModelFluxAdvanced: + def __init__(self): + self._compiled = False + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "backend": (["inductor", "cudagraphs"],), + "fullgraph": ("BOOLEAN", {"default": False, "tooltip": "Enable full graph mode"}), + "mode": (["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}), + "double_blocks": ("STRING", {"default": "0-18", "multiline": True}), + "single_blocks": ("STRING", {"default": "0-37", "multiline": True}), + "dynamic": ("BOOLEAN", {"default": False, "tooltip": "Enable dynamic mode"}), + }, + "optional": { + "dynamo_cache_size_limit": ("INT", {"default": 64, "min": 0, "max": 1024, "step": 1, "tooltip": "torch._dynamo.config.cache_size_limit"}), + } + } + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "KJNodes/torchcompile" + EXPERIMENTAL = True + DEPRECATED = True + + def parse_blocks(self, blocks_str): + blocks = [] + for part in blocks_str.split(','): + part = part.strip() + if '-' in part: + start, end = map(int, part.split('-')) + blocks.extend(range(start, end + 1)) + else: + blocks.append(int(part)) + return blocks + + def patch(self, model, backend, mode, fullgraph, single_blocks, double_blocks, dynamic, dynamo_cache_size_limit): + single_block_list = self.parse_blocks(single_blocks) + double_block_list = self.parse_blocks(double_blocks) + m = model.clone() + diffusion_model = m.get_model_object("diffusion_model") + torch._dynamo.config.cache_size_limit = dynamo_cache_size_limit + + if not self._compiled: + try: + for i, block in enumerate(diffusion_model.double_blocks): + if i in double_block_list: + #print("Compiling double_block", i) + m.add_object_patch(f"diffusion_model.double_blocks.{i}", torch.compile(block, mode=mode, dynamic=dynamic, fullgraph=fullgraph, backend=backend)) + for i, block in enumerate(diffusion_model.single_blocks): + if i in single_block_list: + #print("Compiling single block", i) + m.add_object_patch(f"diffusion_model.single_blocks.{i}", torch.compile(block, mode=mode, dynamic=dynamic, fullgraph=fullgraph, backend=backend)) + self._compiled = True + compile_settings = { + "backend": backend, + "mode": mode, + "fullgraph": fullgraph, + "dynamic": dynamic, + } + setattr(m.model, "compile_settings", compile_settings) + except: + raise RuntimeError("Failed to compile model") + + return (m, ) + # rest of the layers that are not patched + # diffusion_model.final_layer = torch.compile(diffusion_model.final_layer, mode=mode, fullgraph=fullgraph, backend=backend) + # diffusion_model.guidance_in = torch.compile(diffusion_model.guidance_in, mode=mode, fullgraph=fullgraph, backend=backend) + # diffusion_model.img_in = torch.compile(diffusion_model.img_in, mode=mode, fullgraph=fullgraph, backend=backend) + # diffusion_model.time_in = torch.compile(diffusion_model.time_in, mode=mode, fullgraph=fullgraph, backend=backend) + # diffusion_model.txt_in = torch.compile(diffusion_model.txt_in, mode=mode, fullgraph=fullgraph, backend=backend) + # diffusion_model.vector_in = torch.compile(diffusion_model.vector_in, mode=mode, fullgraph=fullgraph, backend=backend) + +class TorchCompileModelFluxAdvancedV2: + def __init__(self): + self._compiled = False + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "backend": (["inductor", "cudagraphs"],), + "fullgraph": ("BOOLEAN", {"default": False, "tooltip": "Enable full graph mode"}), + "mode": (["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}), + "double_blocks": ("BOOLEAN", {"default": True, "tooltip": "Compile double blocks"}), + "single_blocks": ("BOOLEAN", {"default": True, "tooltip": "Compile single blocks"}), + "dynamic": ("BOOLEAN", {"default": False, "tooltip": "Enable dynamic mode"}), + }, + "optional": { + "dynamo_cache_size_limit": ("INT", {"default": 64, "min": 0, "max": 1024, "step": 1, "tooltip": "torch._dynamo.config.cache_size_limit"}), + } + } + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "KJNodes/torchcompile" + EXPERIMENTAL = True + + def patch(self, model, backend, mode, fullgraph, single_blocks, double_blocks, dynamic, dynamo_cache_size_limit): + from comfy_api.torch_helpers import set_torch_compile_wrapper + m = model.clone() + diffusion_model = m.get_model_object("diffusion_model") + torch._dynamo.config.cache_size_limit = dynamo_cache_size_limit + + compile_key_list = [] + + try: + if double_blocks: + for i, block in enumerate(diffusion_model.double_blocks): + compile_key_list.append(f"diffusion_model.double_blocks.{i}") + if single_blocks: + for i, block in enumerate(diffusion_model.single_blocks): + compile_key_list.append(f"diffusion_model.single_blocks.{i}") + + set_torch_compile_wrapper(model=m, keys=compile_key_list, backend=backend, mode=mode, dynamic=dynamic, fullgraph=fullgraph) + except: + raise RuntimeError("Failed to compile model") + + return (m, ) + # rest of the layers that are not patched + # diffusion_model.final_layer = torch.compile(diffusion_model.final_layer, mode=mode, fullgraph=fullgraph, backend=backend) + # diffusion_model.guidance_in = torch.compile(diffusion_model.guidance_in, mode=mode, fullgraph=fullgraph, backend=backend) + # diffusion_model.img_in = torch.compile(diffusion_model.img_in, mode=mode, fullgraph=fullgraph, backend=backend) + # diffusion_model.time_in = torch.compile(diffusion_model.time_in, mode=mode, fullgraph=fullgraph, backend=backend) + # diffusion_model.txt_in = torch.compile(diffusion_model.txt_in, mode=mode, fullgraph=fullgraph, backend=backend) + # diffusion_model.vector_in = torch.compile(diffusion_model.vector_in, mode=mode, fullgraph=fullgraph, backend=backend) + + +class TorchCompileModelHyVideo: + def __init__(self): + self._compiled = False + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("MODEL",), + "backend": (["inductor","cudagraphs"], {"default": "inductor"}), + "fullgraph": ("BOOLEAN", {"default": False, "tooltip": "Enable full graph mode"}), + "mode": (["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}), + "dynamic": ("BOOLEAN", {"default": False, "tooltip": "Enable dynamic mode"}), + "dynamo_cache_size_limit": ("INT", {"default": 64, "min": 0, "max": 1024, "step": 1, "tooltip": "torch._dynamo.config.cache_size_limit"}), + "compile_single_blocks": ("BOOLEAN", {"default": True, "tooltip": "Compile single blocks"}), + "compile_double_blocks": ("BOOLEAN", {"default": True, "tooltip": "Compile double blocks"}), + "compile_txt_in": ("BOOLEAN", {"default": False, "tooltip": "Compile txt_in layers"}), + "compile_vector_in": ("BOOLEAN", {"default": False, "tooltip": "Compile vector_in layers"}), + "compile_final_layer": ("BOOLEAN", {"default": False, "tooltip": "Compile final layer"}), + + }, + } + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "KJNodes/torchcompile" + EXPERIMENTAL = True + + def patch(self, model, backend, fullgraph, mode, dynamic, dynamo_cache_size_limit, compile_single_blocks, compile_double_blocks, compile_txt_in, compile_vector_in, compile_final_layer): + m = model.clone() + diffusion_model = m.get_model_object("diffusion_model") + torch._dynamo.config.cache_size_limit = dynamo_cache_size_limit + if not self._compiled: + try: + if compile_single_blocks: + for i, block in enumerate(diffusion_model.single_blocks): + compiled_block = torch.compile(block, fullgraph=fullgraph, dynamic=dynamic, backend=backend, mode=mode) + m.add_object_patch(f"diffusion_model.single_blocks.{i}", compiled_block) + if compile_double_blocks: + for i, block in enumerate(diffusion_model.double_blocks): + compiled_block = torch.compile(block, fullgraph=fullgraph, dynamic=dynamic, backend=backend, mode=mode) + m.add_object_patch(f"diffusion_model.double_blocks.{i}", compiled_block) + if compile_txt_in: + compiled_block = torch.compile(diffusion_model.txt_in, fullgraph=fullgraph, dynamic=dynamic, backend=backend, mode=mode) + m.add_object_patch("diffusion_model.txt_in", compiled_block) + if compile_vector_in: + compiled_block = torch.compile(diffusion_model.vector_in, fullgraph=fullgraph, dynamic=dynamic, backend=backend, mode=mode) + m.add_object_patch("diffusion_model.vector_in", compiled_block) + if compile_final_layer: + compiled_block = torch.compile(diffusion_model.final_layer, fullgraph=fullgraph, dynamic=dynamic, backend=backend, mode=mode) + m.add_object_patch("diffusion_model.final_layer", compiled_block) + self._compiled = True + compile_settings = { + "backend": backend, + "mode": mode, + "fullgraph": fullgraph, + "dynamic": dynamic, + } + setattr(m.model, "compile_settings", compile_settings) + except: + raise RuntimeError("Failed to compile model") + return (m, ) + +class TorchCompileModelWanVideo: + def __init__(self): + self._compiled = False + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("MODEL",), + "backend": (["inductor","cudagraphs"], {"default": "inductor"}), + "fullgraph": ("BOOLEAN", {"default": False, "tooltip": "Enable full graph mode"}), + "mode": (["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}), + "dynamic": ("BOOLEAN", {"default": False, "tooltip": "Enable dynamic mode"}), + "dynamo_cache_size_limit": ("INT", {"default": 64, "min": 0, "max": 1024, "step": 1, "tooltip": "torch._dynamo.config.cache_size_limit"}), + "compile_transformer_blocks_only": ("BOOLEAN", {"default": False, "tooltip": "Compile only transformer blocks"}), + }, + } + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "KJNodes/torchcompile" + EXPERIMENTAL = True + DEPRECATED = True + + def patch(self, model, backend, fullgraph, mode, dynamic, dynamo_cache_size_limit, compile_transformer_blocks_only): + m = model.clone() + diffusion_model = m.get_model_object("diffusion_model") + torch._dynamo.config.cache_size_limit = dynamo_cache_size_limit + try: + if compile_transformer_blocks_only: + for i, block in enumerate(diffusion_model.blocks): + if hasattr(block, "_orig_mod"): + block = block._orig_mod + compiled_block = torch.compile(block, fullgraph=fullgraph, dynamic=dynamic, backend=backend, mode=mode) + m.add_object_patch(f"diffusion_model.blocks.{i}", compiled_block) + else: + compiled_model = torch.compile(diffusion_model, fullgraph=fullgraph, dynamic=dynamic, backend=backend, mode=mode) + m.add_object_patch("diffusion_model", compiled_model) + + compile_settings = { + "backend": backend, + "mode": mode, + "fullgraph": fullgraph, + "dynamic": dynamic, + } + setattr(m.model, "compile_settings", compile_settings) + except: + raise RuntimeError("Failed to compile model") + return (m, ) + +class TorchCompileModelWanVideoV2: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("MODEL",), + "backend": (["inductor","cudagraphs"], {"default": "inductor"}), + "fullgraph": ("BOOLEAN", {"default": False, "tooltip": "Enable full graph mode"}), + "mode": (["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}), + "dynamic": ("BOOLEAN", {"default": False, "tooltip": "Enable dynamic mode"}), + "compile_transformer_blocks_only": ("BOOLEAN", {"default": True, "tooltip": "Compile only transformer blocks, faster compile and less error prone"}), + "dynamo_cache_size_limit": ("INT", {"default": 64, "min": 0, "max": 1024, "step": 1, "tooltip": "torch._dynamo.config.cache_size_limit"}), + }, + } + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "KJNodes/torchcompile" + EXPERIMENTAL = True + + def patch(self, model, backend, fullgraph, mode, dynamic, dynamo_cache_size_limit, compile_transformer_blocks_only): + from comfy_api.torch_helpers import set_torch_compile_wrapper + m = model.clone() + diffusion_model = m.get_model_object("diffusion_model") + torch._dynamo.config.cache_size_limit = dynamo_cache_size_limit + try: + if compile_transformer_blocks_only: + compile_key_list = [] + for i, block in enumerate(diffusion_model.blocks): + compile_key_list.append(f"diffusion_model.blocks.{i}") + else: + compile_key_list =["diffusion_model"] + + set_torch_compile_wrapper(model=m, keys=compile_key_list, backend=backend, mode=mode, dynamic=dynamic, fullgraph=fullgraph) + except: + raise RuntimeError("Failed to compile model") + + return (m, ) + +class TorchCompileModelQwenImage: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("MODEL",), + "backend": (["inductor","cudagraphs"], {"default": "inductor"}), + "fullgraph": ("BOOLEAN", {"default": False, "tooltip": "Enable full graph mode"}), + "mode": (["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}), + "dynamic": ("BOOLEAN", {"default": False, "tooltip": "Enable dynamic mode"}), + "compile_transformer_blocks_only": ("BOOLEAN", {"default": True, "tooltip": "Compile only transformer blocks, faster compile and less error prone"}), + "dynamo_cache_size_limit": ("INT", {"default": 64, "min": 0, "max": 1024, "step": 1, "tooltip": "torch._dynamo.config.cache_size_limit"}), + }, + } + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "KJNodes/torchcompile" + EXPERIMENTAL = True + + def patch(self, model, backend, fullgraph, mode, dynamic, dynamo_cache_size_limit, compile_transformer_blocks_only): + from comfy_api.torch_helpers import set_torch_compile_wrapper + m = model.clone() + diffusion_model = m.get_model_object("diffusion_model") + torch._dynamo.config.cache_size_limit = dynamo_cache_size_limit + try: + if compile_transformer_blocks_only: + compile_key_list = [] + for i, block in enumerate(diffusion_model.transformer_blocks): + compile_key_list.append(f"diffusion_model.transformer_blocks.{i}") + else: + compile_key_list =["diffusion_model"] + + set_torch_compile_wrapper(model=m, keys=compile_key_list, backend=backend, mode=mode, dynamic=dynamic, fullgraph=fullgraph) + except: + raise RuntimeError("Failed to compile model") + + return (m, ) + +class TorchCompileVAE: + def __init__(self): + self._compiled_encoder = False + self._compiled_decoder = False + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "vae": ("VAE",), + "backend": (["inductor", "cudagraphs"],), + "fullgraph": ("BOOLEAN", {"default": False, "tooltip": "Enable full graph mode"}), + "mode": (["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}), + "compile_encoder": ("BOOLEAN", {"default": True, "tooltip": "Compile encoder"}), + "compile_decoder": ("BOOLEAN", {"default": True, "tooltip": "Compile decoder"}), + }} + RETURN_TYPES = ("VAE",) + FUNCTION = "compile" + + CATEGORY = "KJNodes/torchcompile" + EXPERIMENTAL = True + + def compile(self, vae, backend, mode, fullgraph, compile_encoder, compile_decoder): + if compile_encoder: + if not self._compiled_encoder: + encoder_name = "encoder" + if hasattr(vae.first_stage_model, "taesd_encoder"): + encoder_name = "taesd_encoder" + + try: + setattr( + vae.first_stage_model, + encoder_name, + torch.compile( + getattr(vae.first_stage_model, encoder_name), + mode=mode, + fullgraph=fullgraph, + backend=backend, + ), + ) + self._compiled_encoder = True + except: + raise RuntimeError("Failed to compile model") + if compile_decoder: + if not self._compiled_decoder: + decoder_name = "decoder" + if hasattr(vae.first_stage_model, "taesd_decoder"): + decoder_name = "taesd_decoder" + + try: + setattr( + vae.first_stage_model, + decoder_name, + torch.compile( + getattr(vae.first_stage_model, decoder_name), + mode=mode, + fullgraph=fullgraph, + backend=backend, + ), + ) + self._compiled_decoder = True + except: + raise RuntimeError("Failed to compile model") + return (vae, ) + +class TorchCompileControlNet: + def __init__(self): + self._compiled= False + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "controlnet": ("CONTROL_NET",), + "backend": (["inductor", "cudagraphs"],), + "fullgraph": ("BOOLEAN", {"default": False, "tooltip": "Enable full graph mode"}), + "mode": (["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}), + }} + RETURN_TYPES = ("CONTROL_NET",) + FUNCTION = "compile" + + CATEGORY = "KJNodes/torchcompile" + EXPERIMENTAL = True + + def compile(self, controlnet, backend, mode, fullgraph): + if not self._compiled: + try: + # for i, block in enumerate(controlnet.control_model.double_blocks): + # print("Compiling controlnet double_block", i) + # controlnet.control_model.double_blocks[i] = torch.compile(block, mode=mode, fullgraph=fullgraph, backend=backend) + controlnet.control_model = torch.compile(controlnet.control_model, mode=mode, fullgraph=fullgraph, backend=backend) + self._compiled = True + except: + self._compiled = False + raise RuntimeError("Failed to compile model") + + return (controlnet, ) + +class TorchCompileLTXModel: + def __init__(self): + self._compiled = False + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "backend": (["inductor", "cudagraphs"],), + "fullgraph": ("BOOLEAN", {"default": False, "tooltip": "Enable full graph mode"}), + "mode": (["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}), + "dynamic": ("BOOLEAN", {"default": False, "tooltip": "Enable dynamic mode"}), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "KJNodes/torchcompile" + EXPERIMENTAL = True + + def patch(self, model, backend, mode, fullgraph, dynamic): + m = model.clone() + diffusion_model = m.get_model_object("diffusion_model") + + if not self._compiled: + try: + for i, block in enumerate(diffusion_model.transformer_blocks): + compiled_block = torch.compile(block, mode=mode, dynamic=dynamic, fullgraph=fullgraph, backend=backend) + m.add_object_patch(f"diffusion_model.transformer_blocks.{i}", compiled_block) + self._compiled = True + compile_settings = { + "backend": backend, + "mode": mode, + "fullgraph": fullgraph, + "dynamic": dynamic, + } + setattr(m.model, "compile_settings", compile_settings) + + except: + raise RuntimeError("Failed to compile model") + + return (m, ) + +class TorchCompileCosmosModel: + def __init__(self): + self._compiled = False + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "backend": (["inductor", "cudagraphs"],), + "fullgraph": ("BOOLEAN", {"default": False, "tooltip": "Enable full graph mode"}), + "mode": (["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}), + "dynamic": ("BOOLEAN", {"default": False, "tooltip": "Enable dynamic mode"}), + "dynamo_cache_size_limit": ("INT", {"default": 64, "tooltip": "Set the dynamo cache size limit"}), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "KJNodes/torchcompile" + EXPERIMENTAL = True + + def patch(self, model, backend, mode, fullgraph, dynamic, dynamo_cache_size_limit): + + m = model.clone() + diffusion_model = m.get_model_object("diffusion_model") + torch._dynamo.config.cache_size_limit = dynamo_cache_size_limit + + if not self._compiled: + try: + for name, block in diffusion_model.blocks.items(): + #print(f"Compiling block {name}") + compiled_block = torch.compile(block, mode=mode, dynamic=dynamic, fullgraph=fullgraph, backend=backend) + m.add_object_patch(f"diffusion_model.blocks.{name}", compiled_block) + #diffusion_model.blocks[name] = compiled_block + + self._compiled = True + compile_settings = { + "backend": backend, + "mode": mode, + "fullgraph": fullgraph, + "dynamic": dynamic, + } + setattr(m.model, "compile_settings", compile_settings) + + except: + raise RuntimeError("Failed to compile model") + + return (m, ) + + +#teacache + +try: + from comfy.ldm.wan.model import sinusoidal_embedding_1d +except: + pass +from einops import repeat +from unittest.mock import patch +from contextlib import nullcontext +import numpy as np + +def relative_l1_distance(last_tensor, current_tensor): + l1_distance = torch.abs(last_tensor - current_tensor).mean() + norm = torch.abs(last_tensor).mean() + relative_l1_distance = l1_distance / norm + return relative_l1_distance.to(torch.float32) + +@torch.compiler.disable() +def tea_cache(self, x, e0, e, transformer_options): + #teacache for cond and uncond separately + rel_l1_thresh = transformer_options["rel_l1_thresh"] + + is_cond = True if transformer_options["cond_or_uncond"] == [0] else False + + should_calc = True + suffix = "cond" if is_cond else "uncond" + + # Init cache dict if not exists + if not hasattr(self, 'teacache_state'): + self.teacache_state = { + 'cond': {'accumulated_rel_l1_distance': 0, 'prev_input': None, + 'teacache_skipped_steps': 0, 'previous_residual': None}, + 'uncond': {'accumulated_rel_l1_distance': 0, 'prev_input': None, + 'teacache_skipped_steps': 0, 'previous_residual': None} + } + logging.info("\nTeaCache: Initialized") + + cache = self.teacache_state[suffix] + + if cache['prev_input'] is not None: + if transformer_options["coefficients"] == []: + temb_relative_l1 = relative_l1_distance(cache['prev_input'], e0) + curr_acc_dist = cache['accumulated_rel_l1_distance'] + temb_relative_l1 + else: + rescale_func = np.poly1d(transformer_options["coefficients"]) + curr_acc_dist = cache['accumulated_rel_l1_distance'] + rescale_func(((e-cache['prev_input']).abs().mean() / cache['prev_input'].abs().mean()).cpu().item()) + try: + if curr_acc_dist < rel_l1_thresh: + should_calc = False + cache['accumulated_rel_l1_distance'] = curr_acc_dist + else: + should_calc = True + cache['accumulated_rel_l1_distance'] = 0 + except: + should_calc = True + cache['accumulated_rel_l1_distance'] = 0 + + if transformer_options["coefficients"] == []: + cache['prev_input'] = e0.clone().detach() + else: + cache['prev_input'] = e.clone().detach() + + if not should_calc: + x += cache['previous_residual'].to(x.device) + cache['teacache_skipped_steps'] += 1 + #print(f"TeaCache: Skipping {suffix} step") + return should_calc, cache + +def teacache_wanvideo_vace_forward_orig(self, x, t, context, vace_context, vace_strength, clip_fea=None, freqs=None, transformer_options={}, **kwargs): + # embeddings + x = self.patch_embedding(x.float()).to(x.dtype) + grid_sizes = x.shape[2:] + x = x.flatten(2).transpose(1, 2) + + # time embeddings + e = self.time_embedding( + sinusoidal_embedding_1d(self.freq_dim, t).to(dtype=x[0].dtype)) + e0 = self.time_projection(e).unflatten(1, (6, self.dim)) + + # context + context = self.text_embedding(context) + + context_img_len = None + if clip_fea is not None: + if self.img_emb is not None: + context_clip = self.img_emb(clip_fea) # bs x 257 x dim + context = torch.concat([context_clip, context], dim=1) + context_img_len = clip_fea.shape[-2] + + orig_shape = list(vace_context.shape) + vace_context = vace_context.movedim(0, 1).reshape([-1] + orig_shape[2:]) + c = self.vace_patch_embedding(vace_context.float()).to(vace_context.dtype) + c = c.flatten(2).transpose(1, 2) + c = list(c.split(orig_shape[0], dim=0)) + + if not transformer_options: + raise RuntimeError("Can't access transformer_options, this requires ComfyUI nightly version from Mar 14, 2025 or later") + + teacache_enabled = transformer_options.get("teacache_enabled", False) + if not teacache_enabled: + should_calc = True + else: + should_calc, cache = tea_cache(self, x, e0, e, transformer_options) + + if should_calc: + original_x = x.clone().detach() + patches_replace = transformer_options.get("patches_replace", {}) + blocks_replace = patches_replace.get("dit", {}) + for i, block in enumerate(self.blocks): + if ("double_block", i) in blocks_replace: + def block_wrap(args): + out = {} + out["img"] = block(args["img"], context=args["txt"], e=args["vec"], freqs=args["pe"], context_img_len=context_img_len) + return out + out = blocks_replace[("double_block", i)]({"img": x, "txt": context, "vec": e0, "pe": freqs}, {"original_block": block_wrap, "transformer_options": transformer_options}) + x = out["img"] + else: + x = block(x, e=e0, freqs=freqs, context=context, context_img_len=context_img_len) + + ii = self.vace_layers_mapping.get(i, None) + if ii is not None: + for iii in range(len(c)): + c_skip, c[iii] = self.vace_blocks[ii](c[iii], x=original_x, e=e0, freqs=freqs, context=context, context_img_len=context_img_len) + x += c_skip * vace_strength[iii] + del c_skip + + if teacache_enabled: + cache['previous_residual'] = (x - original_x).to(transformer_options["teacache_device"]) + + # head + x = self.head(x, e) + + # unpatchify + x = self.unpatchify(x, grid_sizes) + return x + +def teacache_wanvideo_forward_orig(self, x, t, context, clip_fea=None, freqs=None, transformer_options={}, **kwargs): + # embeddings + x = self.patch_embedding(x.float()).to(x.dtype) + grid_sizes = x.shape[2:] + x = x.flatten(2).transpose(1, 2) + + # time embeddings + e = self.time_embedding( + sinusoidal_embedding_1d(self.freq_dim, t).to(dtype=x[0].dtype)) + e0 = self.time_projection(e).unflatten(1, (6, self.dim)) + + # context + context = self.text_embedding(context) + + context_img_len = None + if clip_fea is not None: + if self.img_emb is not None: + context_clip = self.img_emb(clip_fea) # bs x 257 x dim + context = torch.concat([context_clip, context], dim=1) + context_img_len = clip_fea.shape[-2] + + + teacache_enabled = transformer_options.get("teacache_enabled", False) + if not teacache_enabled: + should_calc = True + else: + should_calc, cache = tea_cache(self, x, e0, e, transformer_options) + + if should_calc: + original_x = x.clone().detach() + patches_replace = transformer_options.get("patches_replace", {}) + blocks_replace = patches_replace.get("dit", {}) + for i, block in enumerate(self.blocks): + if ("double_block", i) in blocks_replace: + def block_wrap(args): + out = {} + out["img"] = block(args["img"], context=args["txt"], e=args["vec"], freqs=args["pe"], context_img_len=context_img_len) + return out + out = blocks_replace[("double_block", i)]({"img": x, "txt": context, "vec": e0, "pe": freqs}, {"original_block": block_wrap, "transformer_options": transformer_options}) + x = out["img"] + else: + x = block(x, e=e0, freqs=freqs, context=context, context_img_len=context_img_len) + + if teacache_enabled: + cache['previous_residual'] = (x - original_x).to(transformer_options["teacache_device"]) + + # head + x = self.head(x, e) + + # unpatchify + x = self.unpatchify(x, grid_sizes) + return x + +class WanVideoTeaCacheKJ: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("MODEL",), + "rel_l1_thresh": ("FLOAT", {"default": 0.275, "min": 0.0, "max": 10.0, "step": 0.001, "tooltip": "Threshold for to determine when to apply the cache, compromise between speed and accuracy. When using coefficients a good value range is something between 0.2-0.4 for all but 1.3B model, which should be about 10 times smaller, same as when not using coefficients."}), + "start_percent": ("FLOAT", {"default": 0.1, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "The start percentage of the steps to use with TeaCache."}), + "end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "The end percentage of the steps to use with TeaCache."}), + "cache_device": (["main_device", "offload_device"], {"default": "offload_device", "tooltip": "Device to cache to"}), + "coefficients": (["disabled", "1.3B", "14B", "i2v_480", "i2v_720"], {"default": "i2v_480", "tooltip": "Coefficients for rescaling the relative l1 distance, if disabled the threshold value should be about 10 times smaller than the value used with coefficients."}), + } + } + + RETURN_TYPES = ("MODEL",) + RETURN_NAMES = ("model",) + FUNCTION = "patch_teacache" + CATEGORY = "KJNodes/teacache" + DESCRIPTION = """ +Patch WanVideo model to use TeaCache. Speeds up inference by caching the output and +applying it instead of doing the step. Best results are achieved by choosing the +appropriate coefficients for the model. Early steps should never be skipped, with too +aggressive values this can happen and the motion suffers. Starting later can help with that too. +When NOT using coefficients, the threshold value should be +about 10 times smaller than the value used with coefficients. + +Official recommended values https://github.com/ali-vilab/TeaCache/tree/main/TeaCache4Wan2.1: + + +
    ++-------------------+--------+---------+--------+
    +|       Model       |  Low   | Medium  |  High  |
    ++-------------------+--------+---------+--------+
    +| Wan2.1 t2v 1.3B  |  0.05  |  0.07   |  0.08  |
    +| Wan2.1 t2v 14B   |  0.14  |  0.15   |  0.20  |
    +| Wan2.1 i2v 480P  |  0.13  |  0.19   |  0.26  |
    +| Wan2.1 i2v 720P  |  0.18  |  0.20   |  0.30  |
    ++-------------------+--------+---------+--------+
    +
    +""" + EXPERIMENTAL = True + + def patch_teacache(self, model, rel_l1_thresh, start_percent, end_percent, cache_device, coefficients): + if rel_l1_thresh == 0: + return (model,) + + if coefficients == "disabled" and rel_l1_thresh > 0.1: + logging.warning("Threshold value is too high for TeaCache without coefficients, consider using coefficients for better results.") + if coefficients != "disabled" and rel_l1_thresh < 0.1 and "1.3B" not in coefficients: + logging.warning("Threshold value is too low for TeaCache with coefficients, consider using higher threshold value for better results.") + + # type_str = str(type(model.model.model_config).__name__) + #if model.model.diffusion_model.dim == 1536: + # model_type ="1.3B" + # else: + # if "WAN21_T2V" in type_str: + # model_type = "14B" + # elif "WAN21_I2V" in type_str: + # model_type = "i2v_480" + # else: + # model_type = "i2v_720" #how to detect this? + + + teacache_coefficients_map = { + "disabled": [], + "1.3B": [2.39676752e+03, -1.31110545e+03, 2.01331979e+02, -8.29855975e+00, 1.37887774e-01], + "14B": [-5784.54975374, 5449.50911966, -1811.16591783, 256.27178429, -13.02252404], + "i2v_480": [-3.02331670e+02, 2.23948934e+02, -5.25463970e+01, 5.87348440e+00, -2.01973289e-01], + "i2v_720": [-114.36346466, 65.26524496, -18.82220707, 4.91518089, -0.23412683], + } + coefficients = teacache_coefficients_map[coefficients] + + teacache_device = mm.get_torch_device() if cache_device == "main_device" else mm.unet_offload_device() + + model_clone = model.clone() + if 'transformer_options' not in model_clone.model_options: + model_clone.model_options['transformer_options'] = {} + model_clone.model_options["transformer_options"]["rel_l1_thresh"] = rel_l1_thresh + model_clone.model_options["transformer_options"]["teacache_device"] = teacache_device + model_clone.model_options["transformer_options"]["coefficients"] = coefficients + diffusion_model = model_clone.get_model_object("diffusion_model") + + def outer_wrapper(start_percent, end_percent): + def unet_wrapper_function(model_function, kwargs): + input = kwargs["input"] + timestep = kwargs["timestep"] + c = kwargs["c"] + sigmas = c["transformer_options"]["sample_sigmas"] + cond_or_uncond = kwargs["cond_or_uncond"] + last_step = (len(sigmas) - 1) + + matched_step_index = (sigmas == timestep[0] ).nonzero() + if len(matched_step_index) > 0: + current_step_index = matched_step_index.item() + else: + for i in range(len(sigmas) - 1): + # walk from beginning of steps until crossing the timestep + if (sigmas[i] - timestep[0]) * (sigmas[i + 1] - timestep[0]) <= 0: + current_step_index = i + break + else: + current_step_index = 0 + + if current_step_index == 0: + if (len(cond_or_uncond) == 1 and cond_or_uncond[0] == 1) or len(cond_or_uncond) == 2: + if hasattr(diffusion_model, "teacache_state"): + delattr(diffusion_model, "teacache_state") + logging.info("\nResetting TeaCache state") + + current_percent = current_step_index / (len(sigmas) - 1) + c["transformer_options"]["current_percent"] = current_percent + if start_percent <= current_percent <= end_percent: + c["transformer_options"]["teacache_enabled"] = True + + forward_function = teacache_wanvideo_vace_forward_orig if hasattr(diffusion_model, "vace_layers") else teacache_wanvideo_forward_orig + context = patch.multiple( + diffusion_model, + forward_orig=forward_function.__get__(diffusion_model, diffusion_model.__class__) + ) + + with context: + out = model_function(input, timestep, **c) + if current_step_index+1 == last_step and hasattr(diffusion_model, "teacache_state"): + if len(cond_or_uncond) == 1 and cond_or_uncond[0] == 0: + skipped_steps_cond = diffusion_model.teacache_state["cond"]["teacache_skipped_steps"] + skipped_steps_uncond = diffusion_model.teacache_state["uncond"]["teacache_skipped_steps"] + logging.info("-----------------------------------") + logging.info(f"TeaCache skipped:") + logging.info(f"{skipped_steps_cond} cond steps") + logging.info(f"{skipped_steps_uncond} uncond step") + logging.info(f"out of {last_step} steps") + logging.info("-----------------------------------") + elif len(cond_or_uncond) == 2: + skipped_steps_cond = diffusion_model.teacache_state["uncond"]["teacache_skipped_steps"] + logging.info("-----------------------------------") + logging.info(f"TeaCache skipped:") + logging.info(f"{skipped_steps_cond} cond steps") + logging.info(f"out of {last_step} steps") + logging.info("-----------------------------------") + + return out + return unet_wrapper_function + + model_clone.set_model_unet_function_wrapper(outer_wrapper(start_percent=start_percent, end_percent=end_percent)) + + return (model_clone,) + + + + +from comfy.ldm.flux.math import apply_rope + +def modified_wan_self_attention_forward(self, x, freqs): + r""" + Args: + x(Tensor): Shape [B, L, num_heads, C / num_heads] + freqs(Tensor): Rope freqs, shape [1024, C / num_heads / 2] + """ + b, s, n, d = *x.shape[:2], self.num_heads, self.head_dim + + # query, key, value function + def qkv_fn(x): + q = self.norm_q(self.q(x)).view(b, s, n, d) + k = self.norm_k(self.k(x)).view(b, s, n, d) + v = self.v(x).view(b, s, n * d) + return q, k, v + + q, k, v = qkv_fn(x) + + q, k = apply_rope(q, k, freqs) + + feta_scores = get_feta_scores(q, k, self.num_frames, self.enhance_weight) + + x = comfy.ldm.modules.attention.optimized_attention( + q.view(b, s, n * d), + k.view(b, s, n * d), + v, + heads=self.num_heads, + ) + + x = self.o(x) + + x *= feta_scores + + return x + +from einops import rearrange +def get_feta_scores(query, key, num_frames, enhance_weight): + img_q, img_k = query, key #torch.Size([2, 9216, 12, 128]) + + _, ST, num_heads, head_dim = img_q.shape + spatial_dim = ST / num_frames + spatial_dim = int(spatial_dim) + + query_image = rearrange( + img_q, "B (T S) N C -> (B S) N T C", T=num_frames, S=spatial_dim, N=num_heads, C=head_dim + ) + key_image = rearrange( + img_k, "B (T S) N C -> (B S) N T C", T=num_frames, S=spatial_dim, N=num_heads, C=head_dim + ) + + return feta_score(query_image, key_image, head_dim, num_frames, enhance_weight) + +def feta_score(query_image, key_image, head_dim, num_frames, enhance_weight): + scale = head_dim**-0.5 + query_image = query_image * scale + attn_temp = query_image @ key_image.transpose(-2, -1) # translate attn to float32 + attn_temp = attn_temp.to(torch.float32) + attn_temp = attn_temp.softmax(dim=-1) + + # Reshape to [batch_size * num_tokens, num_frames, num_frames] + attn_temp = attn_temp.reshape(-1, num_frames, num_frames) + + # Create a mask for diagonal elements + diag_mask = torch.eye(num_frames, device=attn_temp.device).bool() + diag_mask = diag_mask.unsqueeze(0).expand(attn_temp.shape[0], -1, -1) + + # Zero out diagonal elements + attn_wo_diag = attn_temp.masked_fill(diag_mask, 0) + + # Calculate mean for each token's attention matrix + # Number of off-diagonal elements per matrix is n*n - n + num_off_diag = num_frames * num_frames - num_frames + mean_scores = attn_wo_diag.sum(dim=(1, 2)) / num_off_diag + + enhance_scores = mean_scores.mean() * (num_frames + enhance_weight) + enhance_scores = enhance_scores.clamp(min=1) + return enhance_scores + +import types +class WanAttentionPatch: + def __init__(self, num_frames, weight): + self.num_frames = num_frames + self.enhance_weight = weight + + def __get__(self, obj, objtype=None): + # Create bound method with stored parameters + def wrapped_attention(self_module, *args, **kwargs): + self_module.num_frames = self.num_frames + self_module.enhance_weight = self.enhance_weight + return modified_wan_self_attention_forward(self_module, *args, **kwargs) + return types.MethodType(wrapped_attention, obj) + +class WanVideoEnhanceAVideoKJ: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("MODEL",), + "latent": ("LATENT", {"tooltip": "Only used to get the latent count"}), + "weight": ("FLOAT", {"default": 2.0, "min": 0.0, "max": 10.0, "step": 0.001, "tooltip": "Strength of the enhance effect"}), + } + } + + RETURN_TYPES = ("MODEL",) + RETURN_NAMES = ("model",) + FUNCTION = "enhance" + CATEGORY = "KJNodes/experimental" + DESCRIPTION = "https://github.com/NUS-HPC-AI-Lab/Enhance-A-Video" + EXPERIMENTAL = True + + def enhance(self, model, weight, latent): + if weight == 0: + return (model,) + + num_frames = latent["samples"].shape[2] + + model_clone = model.clone() + if 'transformer_options' not in model_clone.model_options: + model_clone.model_options['transformer_options'] = {} + model_clone.model_options["transformer_options"]["enhance_weight"] = weight + diffusion_model = model_clone.get_model_object("diffusion_model") + + compile_settings = getattr(model.model, "compile_settings", None) + for idx, block in enumerate(diffusion_model.blocks): + patched_attn = WanAttentionPatch(num_frames, weight).__get__(block.self_attn, block.__class__) + if compile_settings is not None: + patched_attn = torch.compile(patched_attn, mode=compile_settings["mode"], dynamic=compile_settings["dynamic"], fullgraph=compile_settings["fullgraph"], backend=compile_settings["backend"]) + + model_clone.add_object_patch(f"diffusion_model.blocks.{idx}.self_attn.forward", patched_attn) + + return (model_clone,) + +def normalized_attention_guidance(self, query, context_positive, context_negative): + k_positive = self.norm_k(self.k(context_positive)) + v_positive = self.v(context_positive) + k_negative = self.norm_k(self.k(context_negative)) + v_negative = self.v(context_negative) + + x_positive = comfy.ldm.modules.attention.optimized_attention(query, k_positive, v_positive, heads=self.num_heads).flatten(2) + x_negative = comfy.ldm.modules.attention.optimized_attention(query, k_negative, v_negative, heads=self.num_heads).flatten(2) + + nag_guidance = x_positive * self.nag_scale - x_negative * (self.nag_scale - 1) + + norm_positive = torch.norm(x_positive, p=1, dim=-1, keepdim=True).expand_as(x_positive) + norm_guidance = torch.norm(nag_guidance, p=1, dim=-1, keepdim=True).expand_as(nag_guidance) + + scale = torch.nan_to_num(norm_guidance / norm_positive, nan=10.0) + + mask = scale > self.nag_tau + adjustment = (norm_positive * self.nag_tau) / (norm_guidance + 1e-7) + nag_guidance = torch.where(mask, nag_guidance * adjustment, nag_guidance) + + x = nag_guidance * self.nag_alpha + x_positive * (1 - self.nag_alpha) + del nag_guidance + + return x + +#region NAG +def wan_crossattn_forward_nag(self, x, context, **kwargs): + r""" + Args: + x(Tensor): Shape [B, L1, C] + context(Tensor): Shape [B, L2, C] + """ + # Determine batch splitting and context handling + if self.input_type == "default": + # Single or [pos, neg] pair + if context.shape[0] == 1: + x_pos, context_pos = x, context + x_neg, context_neg = None, None + else: + x_pos, x_neg = torch.chunk(x, 2, dim=0) + context_pos, context_neg = torch.chunk(context, 2, dim=0) + elif self.input_type == "batch": + # Standard batch, no CFG + x_pos, context_pos = x, context + x_neg, context_neg = None, None + + # Positive branch + q_pos = self.norm_q(self.q(x_pos)) + nag_context = self.nag_context + if self.input_type == "batch": + nag_context = nag_context.repeat(x_pos.shape[0], 1, 1) + x_pos_out = normalized_attention_guidance(self, q_pos, context_pos, nag_context) + + # Negative branch + if x_neg is not None and context_neg is not None: + q_neg = self.norm_q(self.q(x_neg)) + k_neg = self.norm_k(self.k(context_neg)) + v_neg = self.v(context_neg) + x_neg_out = comfy.ldm.modules.attention.optimized_attention(q_neg, k_neg, v_neg, heads=self.num_heads) + x = torch.cat([x_pos_out, x_neg_out], dim=0) + else: + x = x_pos_out + + return self.o(x) + + +def wan_i2v_crossattn_forward_nag(self, x, context, context_img_len): + r""" + Args: + x(Tensor): Shape [B, L1, C] + context(Tensor): Shape [B, L2, C] + """ + context_img = context[:, :context_img_len] + context = context[:, context_img_len:] + + q_img = self.norm_q(self.q(x)) + k_img = self.norm_k_img(self.k_img(context_img)) + v_img = self.v_img(context_img) + img_x = comfy.ldm.modules.attention.optimized_attention(q_img, k_img, v_img, heads=self.num_heads) + + if context.shape[0] == 2: + x, x_real_negative = torch.chunk(x, 2, dim=0) + context_positive, context_negative = torch.chunk(context, 2, dim=0) + else: + context_positive = context + context_negative = None + + q = self.norm_q(self.q(x)) + + x = normalized_attention_guidance(self, q, context_positive, self.nag_context) + + if context_negative is not None: + q_real_negative = self.norm_q(self.q(x_real_negative)) + k_real_negative = self.norm_k(self.k(context_negative)) + v_real_negative = self.v(context_negative) + x_real_negative = comfy.ldm.modules.attention.optimized_attention(q_real_negative, k_real_negative, v_real_negative, heads=self.num_heads) + x = torch.cat([x, x_real_negative], dim=0) + + # output + x = x + img_x + x = self.o(x) + return x + +class WanCrossAttentionPatch: + def __init__(self, context, nag_scale, nag_alpha, nag_tau, i2v=False, input_type="default"): + self.nag_context = context + self.nag_scale = nag_scale + self.nag_alpha = nag_alpha + self.nag_tau = nag_tau + self.i2v = i2v + self.input_type = input_type + def __get__(self, obj, objtype=None): + # Create bound method with stored parameters + def wrapped_attention(self_module, *args, **kwargs): + self_module.nag_context = self.nag_context + self_module.nag_scale = self.nag_scale + self_module.nag_alpha = self.nag_alpha + self_module.nag_tau = self.nag_tau + self_module.input_type = self.input_type + if self.i2v: + return wan_i2v_crossattn_forward_nag(self_module, *args, **kwargs) + else: + return wan_crossattn_forward_nag(self_module, *args, **kwargs) + return types.MethodType(wrapped_attention, obj) + +class WanVideoNAG: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("MODEL",), + "conditioning": ("CONDITIONING",), + "nag_scale": ("FLOAT", {"default": 11.0, "min": 0.0, "max": 100.0, "step": 0.001, "tooltip": "Strength of negative guidance effect"}), + "nag_alpha": ("FLOAT", {"default": 0.25, "min": 0.0, "max": 1.0, "step": 0.001, "tooltip": "Mixing coefficient in that controls the balance between the normalized guided representation and the original positive representation."}), + "nag_tau": ("FLOAT", {"default": 2.5, "min": 0.0, "max": 10.0, "step": 0.001, "tooltip": "Clipping threshold that controls how much the guided attention can deviate from the positive attention."}), + }, + "optional": { + "input_type": (["default", "batch"], {"tooltip": "Type of the model input"}), + }, + + } + + RETURN_TYPES = ("MODEL",) + RETURN_NAMES = ("model",) + FUNCTION = "patch" + CATEGORY = "KJNodes/experimental" + DESCRIPTION = "https://github.com/ChenDarYen/Normalized-Attention-Guidance" + EXPERIMENTAL = True + + def patch(self, model, conditioning, nag_scale, nag_alpha, nag_tau, input_type="default"): + if nag_scale == 0: + return (model,) + + device = mm.get_torch_device() + dtype = mm.unet_dtype() + + model_clone = model.clone() + + diffusion_model = model_clone.get_model_object("diffusion_model") + + diffusion_model.text_embedding.to(device) + context = diffusion_model.text_embedding(conditioning[0][0].to(device, dtype)) + + type_str = str(type(model.model.model_config).__name__) + i2v = True if "WAN21_I2V" in type_str else False + + for idx, block in enumerate(diffusion_model.blocks): + patched_attn = WanCrossAttentionPatch(context, nag_scale, nag_alpha, nag_tau, i2v, input_type=input_type).__get__(block.cross_attn, block.__class__) + + model_clone.add_object_patch(f"diffusion_model.blocks.{idx}.cross_attn.forward", patched_attn) + + return (model_clone,) + +class SkipLayerGuidanceWanVideo: + @classmethod + def INPUT_TYPES(s): + return {"required": {"model": ("MODEL", ), + "blocks": ("STRING", {"default": "10", "multiline": False}), + "start_percent": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0, "step": 0.001}), + "end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "slg" + EXPERIMENTAL = True + DESCRIPTION = "Simplified skip layer guidance that only skips the uncond on selected blocks" + + CATEGORY = "advanced/guidance" + + def slg(self, model, start_percent, end_percent, blocks): + def skip(args, extra_args): + transformer_options = extra_args.get("transformer_options", {}) + original_block = extra_args["original_block"] + + if not transformer_options: + raise ValueError("transformer_options not found in extra_args, currently SkipLayerGuidanceWanVideo only works with TeaCacheKJ") + if start_percent <= transformer_options["current_percent"] <= end_percent: + if args["img"].shape[0] == 2: + prev_img_uncond = args["img"][0].unsqueeze(0) + + new_args = { + "img": args["img"][1].unsqueeze(0), + "txt": args["txt"][1].unsqueeze(0), + "vec": args["vec"][1].unsqueeze(0), + "pe": args["pe"][1].unsqueeze(0) + } + + block_out = original_block(new_args) + + out = { + "img": torch.cat([prev_img_uncond, block_out["img"]], dim=0), + "txt": args["txt"], + "vec": args["vec"], + "pe": args["pe"] + } + else: + if transformer_options.get("cond_or_uncond") == [0]: + out = original_block(args) + else: + out = args + else: + out = original_block(args) + return out + + block_list = [int(x.strip()) for x in blocks.split(",")] + blocks = [int(i) for i in block_list] + logging.info(f"Selected blocks to skip uncond on: {blocks}") + + m = model.clone() + + for b in blocks: + #m.set_model_patch_replace(skip, "dit", "double_block", b) + model_options = m.model_options["transformer_options"].copy() + if "patches_replace" not in model_options: + model_options["patches_replace"] = {} + else: + model_options["patches_replace"] = model_options["patches_replace"].copy() + + if "dit" not in model_options["patches_replace"]: + model_options["patches_replace"]["dit"] = {} + else: + model_options["patches_replace"]["dit"] = model_options["patches_replace"]["dit"].copy() + + block = ("double_block", b) + + model_options["patches_replace"]["dit"][block] = skip + m.model_options["transformer_options"] = model_options + + + return (m, ) + +class CFGZeroStarAndInit: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "use_zero_init": ("BOOLEAN", {"default": True}), + "zero_init_steps": ("INT", {"default": 0, "min": 0, "tooltip": "for zero init, starts from 0 so first step is always zeroed out if use_zero_init enabled"}), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + DESCRIPTION = "https://github.com/WeichenFan/CFG-Zero-star" + CATEGORY = "KJNodes/experimental" + EXPERIMENTAL = True + + def patch(self, model, use_zero_init, zero_init_steps): + def cfg_zerostar(args): + #zero init + cond = args["cond"] + timestep = args["timestep"] + sigmas = args["model_options"]["transformer_options"]["sample_sigmas"] + matched_step_index = (sigmas == timestep[0]).nonzero() + if len(matched_step_index) > 0: + current_step_index = matched_step_index.item() + else: + for i in range(len(sigmas) - 1): + if (sigmas[i] - timestep[0]) * (sigmas[i + 1] - timestep[0]) <= 0: + current_step_index = i + break + else: + current_step_index = 0 + + if (current_step_index <= zero_init_steps) and use_zero_init: + return cond * 0 + + uncond = args["uncond"] + cond_scale = args["cond_scale"] + + batch_size = cond.shape[0] + + positive_flat = cond.view(batch_size, -1) + negative_flat = uncond.view(batch_size, -1) + + dot_product = torch.sum(positive_flat * negative_flat, dim=1, keepdim=True) + squared_norm = torch.sum(negative_flat ** 2, dim=1, keepdim=True) + 1e-8 + alpha = dot_product / squared_norm + alpha = alpha.view(batch_size, *([1] * (len(cond.shape) - 1))) + + noise_pred = uncond * alpha + cond_scale * (cond - uncond * alpha) + return noise_pred + + m = model.clone() + m.set_model_sampler_cfg_function(cfg_zerostar) + return (m, ) \ No newline at end of file diff --git a/custom_nodes/comfyui-kjnodes/nodes/nodes.py b/custom_nodes/comfyui-kjnodes/nodes/nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..afc793f36035c5308891ed9f91887ba5a6a1fba0 --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/nodes/nodes.py @@ -0,0 +1,2625 @@ +import torch +import torch.nn as nn +import numpy as np +from PIL import Image +import json, re, os, io, time +import re +import importlib + +from comfy import model_management +import folder_paths +from nodes import MAX_RESOLUTION +from comfy.utils import common_upscale, ProgressBar, load_torch_file +from comfy.comfy_types.node_typing import IO + +script_directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +folder_paths.add_model_folder_path("kjnodes_fonts", os.path.join(script_directory, "fonts")) + +class BOOLConstant: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "value": ("BOOLEAN", {"default": True}), + }, + } + RETURN_TYPES = ("BOOLEAN",) + RETURN_NAMES = ("value",) + FUNCTION = "get_value" + CATEGORY = "KJNodes/constants" + + def get_value(self, value): + return (value,) + +class INTConstant: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "value": ("INT", {"default": 0, "min": -0xffffffffffffffff, "max": 0xffffffffffffffff}), + }, + } + RETURN_TYPES = ("INT",) + RETURN_NAMES = ("value",) + FUNCTION = "get_value" + CATEGORY = "KJNodes/constants" + + def get_value(self, value): + return (value,) + +class FloatConstant: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "value": ("FLOAT", {"default": 0.0, "min": -0xffffffffffffffff, "max": 0xffffffffffffffff, "step": 0.00001}), + }, + } + + RETURN_TYPES = ("FLOAT",) + RETURN_NAMES = ("value",) + FUNCTION = "get_value" + CATEGORY = "KJNodes/constants" + + def get_value(self, value): + return (round(value, 6),) + +class StringConstant: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "string": ("STRING", {"default": '', "multiline": False}), + } + } + RETURN_TYPES = ("STRING",) + FUNCTION = "passtring" + CATEGORY = "KJNodes/constants" + + def passtring(self, string): + return (string, ) + +class StringConstantMultiline: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "string": ("STRING", {"default": "", "multiline": True}), + "strip_newlines": ("BOOLEAN", {"default": True}), + } + } + RETURN_TYPES = ("STRING",) + FUNCTION = "stringify" + CATEGORY = "KJNodes/constants" + + def stringify(self, string, strip_newlines): + new_string = [] + for line in io.StringIO(string): + if not line.strip().startswith("\n") and strip_newlines: + line = line.replace("\n", '') + new_string.append(line) + new_string = "\n".join(new_string) + + return (new_string, ) + + + +class ScaleBatchPromptSchedule: + + RETURN_TYPES = ("STRING",) + FUNCTION = "scaleschedule" + CATEGORY = "KJNodes/misc" + DESCRIPTION = """ +Scales a batch schedule from Fizz' nodes BatchPromptSchedule +to a different frame count. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "input_str": ("STRING", {"forceInput": True,"default": "0:(0.0),\n7:(1.0),\n15:(0.0)\n"}), + "old_frame_count": ("INT", {"forceInput": True,"default": 1,"min": 1, "max": 4096, "step": 1}), + "new_frame_count": ("INT", {"forceInput": True,"default": 1,"min": 1, "max": 4096, "step": 1}), + + }, + } + + def scaleschedule(self, old_frame_count, input_str, new_frame_count): + pattern = r'"(\d+)"\s*:\s*"(.*?)"(?:,|\Z)' + frame_strings = dict(re.findall(pattern, input_str)) + + # Calculate the scaling factor + scaling_factor = (new_frame_count - 1) / (old_frame_count - 1) + + # Initialize a dictionary to store the new frame numbers and strings + new_frame_strings = {} + + # Iterate over the frame numbers and strings + for old_frame, string in frame_strings.items(): + # Calculate the new frame number + new_frame = int(round(int(old_frame) * scaling_factor)) + + # Store the new frame number and corresponding string + new_frame_strings[new_frame] = string + + # Format the output string + output_str = ', '.join([f'"{k}":"{v}"' for k, v in sorted(new_frame_strings.items())]) + return (output_str,) + + +class GetLatentsFromBatchIndexed: + + RETURN_TYPES = ("LATENT",) + FUNCTION = "indexedlatentsfrombatch" + CATEGORY = "KJNodes/latents" + DESCRIPTION = """ +Selects and returns the latents at the specified indices as an latent batch. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "latents": ("LATENT",), + "indexes": ("STRING", {"default": "0, 1, 2", "multiline": True}), + "latent_format": (["BCHW", "BTCHW", "BCTHW"], {"default": "BCHW"}), + }, + } + + def indexedlatentsfrombatch(self, latents, indexes, latent_format): + + samples = latents.copy() + latent_samples = samples["samples"] + + # Parse the indexes string into a list of integers + index_list = [int(index.strip()) for index in indexes.split(',')] + + # Convert list of indices to a PyTorch tensor + indices_tensor = torch.tensor(index_list, dtype=torch.long) + + # Select the latents at the specified indices + if latent_format == "BCHW": + chosen_latents = latent_samples[indices_tensor] + elif latent_format == "BTCHW": + chosen_latents = latent_samples[:, indices_tensor] + elif latent_format == "BCTHW": + chosen_latents = latent_samples[:, :, indices_tensor] + + samples["samples"] = chosen_latents + return (samples,) + + +class ConditioningMultiCombine: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "inputcount": ("INT", {"default": 2, "min": 2, "max": 20, "step": 1}), + "operation": (["combine", "concat"], {"default": "combine"}), + "conditioning_1": ("CONDITIONING", ), + "conditioning_2": ("CONDITIONING", ), + }, + } + + RETURN_TYPES = ("CONDITIONING", "INT") + RETURN_NAMES = ("combined", "inputcount") + FUNCTION = "combine" + CATEGORY = "KJNodes/masking/conditioning" + DESCRIPTION = """ +Combines multiple conditioning nodes into one +""" + + def combine(self, inputcount, operation, **kwargs): + from nodes import ConditioningCombine + from nodes import ConditioningConcat + cond_combine_node = ConditioningCombine() + cond_concat_node = ConditioningConcat() + cond = kwargs["conditioning_1"] + for c in range(1, inputcount): + new_cond = kwargs[f"conditioning_{c + 1}"] + if operation == "combine": + cond = cond_combine_node.combine(new_cond, cond)[0] + elif operation == "concat": + cond = cond_concat_node.concat(cond, new_cond)[0] + return (cond, inputcount,) + +class AppendStringsToList: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "string1": ("STRING", {"default": '', "forceInput": True}), + "string2": ("STRING", {"default": '', "forceInput": True}), + } + } + RETURN_TYPES = ("STRING",) + FUNCTION = "joinstring" + CATEGORY = "KJNodes/text" + + def joinstring(self, string1, string2): + if not isinstance(string1, list): + string1 = [string1] + if not isinstance(string2, list): + string2 = [string2] + + joined_string = string1 + string2 + return (joined_string, ) + +class JoinStrings: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "delimiter": ("STRING", {"default": ' ', "multiline": False}), + }, + "optional": { + "string1": ("STRING", {"default": '', "forceInput": True}), + "string2": ("STRING", {"default": '', "forceInput": True}), + } + } + RETURN_TYPES = ("STRING",) + FUNCTION = "joinstring" + CATEGORY = "KJNodes/text" + + def joinstring(self, delimiter, string1="", string2=""): + joined_string = string1 + delimiter + string2 + return (joined_string, ) + +class JoinStringMulti: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "inputcount": ("INT", {"default": 2, "min": 2, "max": 1000, "step": 1}), + "string_1": ("STRING", {"default": '', "forceInput": True}), + "delimiter": ("STRING", {"default": ' ', "multiline": False}), + "return_list": ("BOOLEAN", {"default": False}), + }, + "optional": { + "string_2": ("STRING", {"default": '', "forceInput": True}), + } + } + + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("string",) + FUNCTION = "combine" + CATEGORY = "KJNodes/text" + DESCRIPTION = """ +Creates single string, or a list of strings, from +multiple input strings. +You can set how many inputs the node has, +with the **inputcount** and clicking update. +""" + + def combine(self, inputcount, delimiter, **kwargs): + string = kwargs["string_1"] + return_list = kwargs["return_list"] + strings = [string] # Initialize a list with the first string + for c in range(1, inputcount): + new_string = kwargs.get(f"string_{c + 1}", "") + if not new_string: + continue + if return_list: + strings.append(new_string) # Add new string to the list + else: + string = string + delimiter + new_string + if return_list: + return (strings,) # Return the list of strings + else: + return (string,) # Return the combined string + +class CondPassThrough: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + }, + "optional": { + "positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + }, + } + + RETURN_TYPES = ("CONDITIONING", "CONDITIONING",) + RETURN_NAMES = ("positive", "negative") + FUNCTION = "passthrough" + CATEGORY = "KJNodes/misc" + DESCRIPTION = """ + Simply passes through the positive and negative conditioning, + workaround for Set node not allowing bypassed inputs. +""" + + def passthrough(self, positive=None, negative=None): + return (positive, negative,) + +class ModelPassThrough: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + }, + "optional": { + "model": ("MODEL", ), + }, + } + + RETURN_TYPES = ("MODEL", ) + RETURN_NAMES = ("model",) + FUNCTION = "passthrough" + CATEGORY = "KJNodes/misc" + DESCRIPTION = """ + Simply passes through the model, + workaround for Set node not allowing bypassed inputs. +""" + + def passthrough(self, model=None): + return (model,) + +def append_helper(t, mask, c, set_area_to_bounds, strength): + n = [t[0], t[1].copy()] + _, h, w = mask.shape + n[1]['mask'] = mask + n[1]['set_area_to_bounds'] = set_area_to_bounds + n[1]['mask_strength'] = strength + c.append(n) + +class ConditioningSetMaskAndCombine: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "positive_1": ("CONDITIONING", ), + "negative_1": ("CONDITIONING", ), + "positive_2": ("CONDITIONING", ), + "negative_2": ("CONDITIONING", ), + "mask_1": ("MASK", ), + "mask_2": ("MASK", ), + "mask_1_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "mask_2_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "set_cond_area": (["default", "mask bounds"],), + } + } + + RETURN_TYPES = ("CONDITIONING","CONDITIONING",) + RETURN_NAMES = ("combined_positive", "combined_negative",) + FUNCTION = "append" + CATEGORY = "KJNodes/masking/conditioning" + DESCRIPTION = """ +Bundles multiple conditioning mask and combine nodes into one,functionality is identical to ComfyUI native nodes +""" + + def append(self, positive_1, negative_1, positive_2, negative_2, mask_1, mask_2, set_cond_area, mask_1_strength, mask_2_strength): + c = [] + c2 = [] + set_area_to_bounds = False + if set_cond_area != "default": + set_area_to_bounds = True + if len(mask_1.shape) < 3: + mask_1 = mask_1.unsqueeze(0) + if len(mask_2.shape) < 3: + mask_2 = mask_2.unsqueeze(0) + for t in positive_1: + append_helper(t, mask_1, c, set_area_to_bounds, mask_1_strength) + for t in positive_2: + append_helper(t, mask_2, c, set_area_to_bounds, mask_2_strength) + for t in negative_1: + append_helper(t, mask_1, c2, set_area_to_bounds, mask_1_strength) + for t in negative_2: + append_helper(t, mask_2, c2, set_area_to_bounds, mask_2_strength) + return (c, c2) + +class ConditioningSetMaskAndCombine3: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "positive_1": ("CONDITIONING", ), + "negative_1": ("CONDITIONING", ), + "positive_2": ("CONDITIONING", ), + "negative_2": ("CONDITIONING", ), + "positive_3": ("CONDITIONING", ), + "negative_3": ("CONDITIONING", ), + "mask_1": ("MASK", ), + "mask_2": ("MASK", ), + "mask_3": ("MASK", ), + "mask_1_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "mask_2_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "mask_3_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "set_cond_area": (["default", "mask bounds"],), + } + } + + RETURN_TYPES = ("CONDITIONING","CONDITIONING",) + RETURN_NAMES = ("combined_positive", "combined_negative",) + FUNCTION = "append" + CATEGORY = "KJNodes/masking/conditioning" + DESCRIPTION = """ +Bundles multiple conditioning mask and combine nodes into one,functionality is identical to ComfyUI native nodes +""" + + def append(self, positive_1, negative_1, positive_2, positive_3, negative_2, negative_3, mask_1, mask_2, mask_3, set_cond_area, mask_1_strength, mask_2_strength, mask_3_strength): + c = [] + c2 = [] + set_area_to_bounds = False + if set_cond_area != "default": + set_area_to_bounds = True + if len(mask_1.shape) < 3: + mask_1 = mask_1.unsqueeze(0) + if len(mask_2.shape) < 3: + mask_2 = mask_2.unsqueeze(0) + if len(mask_3.shape) < 3: + mask_3 = mask_3.unsqueeze(0) + for t in positive_1: + append_helper(t, mask_1, c, set_area_to_bounds, mask_1_strength) + for t in positive_2: + append_helper(t, mask_2, c, set_area_to_bounds, mask_2_strength) + for t in positive_3: + append_helper(t, mask_3, c, set_area_to_bounds, mask_3_strength) + for t in negative_1: + append_helper(t, mask_1, c2, set_area_to_bounds, mask_1_strength) + for t in negative_2: + append_helper(t, mask_2, c2, set_area_to_bounds, mask_2_strength) + for t in negative_3: + append_helper(t, mask_3, c2, set_area_to_bounds, mask_3_strength) + return (c, c2) + +class ConditioningSetMaskAndCombine4: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "positive_1": ("CONDITIONING", ), + "negative_1": ("CONDITIONING", ), + "positive_2": ("CONDITIONING", ), + "negative_2": ("CONDITIONING", ), + "positive_3": ("CONDITIONING", ), + "negative_3": ("CONDITIONING", ), + "positive_4": ("CONDITIONING", ), + "negative_4": ("CONDITIONING", ), + "mask_1": ("MASK", ), + "mask_2": ("MASK", ), + "mask_3": ("MASK", ), + "mask_4": ("MASK", ), + "mask_1_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "mask_2_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "mask_3_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "mask_4_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "set_cond_area": (["default", "mask bounds"],), + } + } + + RETURN_TYPES = ("CONDITIONING","CONDITIONING",) + RETURN_NAMES = ("combined_positive", "combined_negative",) + FUNCTION = "append" + CATEGORY = "KJNodes/masking/conditioning" + DESCRIPTION = """ +Bundles multiple conditioning mask and combine nodes into one,functionality is identical to ComfyUI native nodes +""" + + def append(self, positive_1, negative_1, positive_2, positive_3, positive_4, negative_2, negative_3, negative_4, mask_1, mask_2, mask_3, mask_4, set_cond_area, mask_1_strength, mask_2_strength, mask_3_strength, mask_4_strength): + c = [] + c2 = [] + set_area_to_bounds = False + if set_cond_area != "default": + set_area_to_bounds = True + if len(mask_1.shape) < 3: + mask_1 = mask_1.unsqueeze(0) + if len(mask_2.shape) < 3: + mask_2 = mask_2.unsqueeze(0) + if len(mask_3.shape) < 3: + mask_3 = mask_3.unsqueeze(0) + if len(mask_4.shape) < 3: + mask_4 = mask_4.unsqueeze(0) + for t in positive_1: + append_helper(t, mask_1, c, set_area_to_bounds, mask_1_strength) + for t in positive_2: + append_helper(t, mask_2, c, set_area_to_bounds, mask_2_strength) + for t in positive_3: + append_helper(t, mask_3, c, set_area_to_bounds, mask_3_strength) + for t in positive_4: + append_helper(t, mask_4, c, set_area_to_bounds, mask_4_strength) + for t in negative_1: + append_helper(t, mask_1, c2, set_area_to_bounds, mask_1_strength) + for t in negative_2: + append_helper(t, mask_2, c2, set_area_to_bounds, mask_2_strength) + for t in negative_3: + append_helper(t, mask_3, c2, set_area_to_bounds, mask_3_strength) + for t in negative_4: + append_helper(t, mask_4, c2, set_area_to_bounds, mask_4_strength) + return (c, c2) + +class ConditioningSetMaskAndCombine5: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "positive_1": ("CONDITIONING", ), + "negative_1": ("CONDITIONING", ), + "positive_2": ("CONDITIONING", ), + "negative_2": ("CONDITIONING", ), + "positive_3": ("CONDITIONING", ), + "negative_3": ("CONDITIONING", ), + "positive_4": ("CONDITIONING", ), + "negative_4": ("CONDITIONING", ), + "positive_5": ("CONDITIONING", ), + "negative_5": ("CONDITIONING", ), + "mask_1": ("MASK", ), + "mask_2": ("MASK", ), + "mask_3": ("MASK", ), + "mask_4": ("MASK", ), + "mask_5": ("MASK", ), + "mask_1_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "mask_2_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "mask_3_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "mask_4_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "mask_5_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "set_cond_area": (["default", "mask bounds"],), + } + } + + RETURN_TYPES = ("CONDITIONING","CONDITIONING",) + RETURN_NAMES = ("combined_positive", "combined_negative",) + FUNCTION = "append" + CATEGORY = "KJNodes/masking/conditioning" + DESCRIPTION = """ +Bundles multiple conditioning mask and combine nodes into one,functionality is identical to ComfyUI native nodes +""" + + def append(self, positive_1, negative_1, positive_2, positive_3, positive_4, positive_5, negative_2, negative_3, negative_4, negative_5, mask_1, mask_2, mask_3, mask_4, mask_5, set_cond_area, mask_1_strength, mask_2_strength, mask_3_strength, mask_4_strength, mask_5_strength): + c = [] + c2 = [] + set_area_to_bounds = False + if set_cond_area != "default": + set_area_to_bounds = True + if len(mask_1.shape) < 3: + mask_1 = mask_1.unsqueeze(0) + if len(mask_2.shape) < 3: + mask_2 = mask_2.unsqueeze(0) + if len(mask_3.shape) < 3: + mask_3 = mask_3.unsqueeze(0) + if len(mask_4.shape) < 3: + mask_4 = mask_4.unsqueeze(0) + if len(mask_5.shape) < 3: + mask_5 = mask_5.unsqueeze(0) + for t in positive_1: + append_helper(t, mask_1, c, set_area_to_bounds, mask_1_strength) + for t in positive_2: + append_helper(t, mask_2, c, set_area_to_bounds, mask_2_strength) + for t in positive_3: + append_helper(t, mask_3, c, set_area_to_bounds, mask_3_strength) + for t in positive_4: + append_helper(t, mask_4, c, set_area_to_bounds, mask_4_strength) + for t in positive_5: + append_helper(t, mask_5, c, set_area_to_bounds, mask_5_strength) + for t in negative_1: + append_helper(t, mask_1, c2, set_area_to_bounds, mask_1_strength) + for t in negative_2: + append_helper(t, mask_2, c2, set_area_to_bounds, mask_2_strength) + for t in negative_3: + append_helper(t, mask_3, c2, set_area_to_bounds, mask_3_strength) + for t in negative_4: + append_helper(t, mask_4, c2, set_area_to_bounds, mask_4_strength) + for t in negative_5: + append_helper(t, mask_5, c2, set_area_to_bounds, mask_5_strength) + return (c, c2) + +class VRAM_Debug: + + @classmethod + + def INPUT_TYPES(s): + return { + "required": { + + "empty_cache": ("BOOLEAN", {"default": True}), + "gc_collect": ("BOOLEAN", {"default": True}), + "unload_all_models": ("BOOLEAN", {"default": False}), + }, + "optional": { + "any_input": (IO.ANY,), + "image_pass": ("IMAGE",), + "model_pass": ("MODEL",), + } + } + + RETURN_TYPES = (IO.ANY, "IMAGE","MODEL","INT", "INT",) + RETURN_NAMES = ("any_output", "image_pass", "model_pass", "freemem_before", "freemem_after") + FUNCTION = "VRAMdebug" + CATEGORY = "KJNodes/misc" + DESCRIPTION = """ +Returns the inputs unchanged, they are only used as triggers, +and performs comfy model management functions and garbage collection, +reports free VRAM before and after the operations. +""" + + def VRAMdebug(self, gc_collect, empty_cache, unload_all_models, image_pass=None, model_pass=None, any_input=None): + freemem_before = model_management.get_free_memory() + print("VRAMdebug: free memory before: ", f"{freemem_before:,.0f}") + if empty_cache: + model_management.soft_empty_cache() + if unload_all_models: + model_management.unload_all_models() + if gc_collect: + import gc + gc.collect() + freemem_after = model_management.get_free_memory() + print("VRAMdebug: free memory after: ", f"{freemem_after:,.0f}") + print("VRAMdebug: freed memory: ", f"{freemem_after - freemem_before:,.0f}") + return {"ui": { + "text": [f"{freemem_before:,.0f}x{freemem_after:,.0f}"]}, + "result": (any_input, image_pass, model_pass, freemem_before, freemem_after) + } + +class SomethingToString: + @classmethod + + def INPUT_TYPES(s): + return { + "required": { + "input": (IO.ANY, ), + }, + "optional": { + "prefix": ("STRING", {"default": ""}), + "suffix": ("STRING", {"default": ""}), + } + } + RETURN_TYPES = ("STRING",) + FUNCTION = "stringify" + CATEGORY = "KJNodes/text" + DESCRIPTION = """ +Converts any type to a string. +""" + + def stringify(self, input, prefix="", suffix=""): + if isinstance(input, (int, float, bool)): + stringified = str(input) + elif isinstance(input, list): + stringified = ', '.join(str(item) for item in input) + else: + return + if prefix: # Check if prefix is not empty + stringified = prefix + stringified # Add the prefix + if suffix: # Check if suffix is not empty + stringified = stringified + suffix # Add the suffix + + return (stringified,) + +class Sleep: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "input": (IO.ANY, ), + "minutes": ("INT", {"default": 0, "min": 0, "max": 1439}), + "seconds": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 59.99, "step": 0.01}), + }, + } + RETURN_TYPES = (IO.ANY,) + FUNCTION = "sleepdelay" + CATEGORY = "KJNodes/misc" + DESCRIPTION = """ +Delays the execution for the input amount of time. +""" + + def sleepdelay(self, input, minutes, seconds): + total_seconds = minutes * 60 + seconds + time.sleep(total_seconds) + return input, + +class EmptyLatentImagePresets: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "dimensions": ( + [ + '512 x 512 (1:1)', + '768 x 512 (1.5:1)', + '960 x 512 (1.875:1)', + '1024 x 512 (2:1)', + '1024 x 576 (1.778:1)', + '1536 x 640 (2.4:1)', + '1344 x 768 (1.75:1)', + '1216 x 832 (1.46:1)', + '1152 x 896 (1.286:1)', + '1024 x 1024 (1:1)', + ], + { + "default": '512 x 512 (1:1)' + }), + + "invert": ("BOOLEAN", {"default": False}), + "batch_size": ("INT", { + "default": 1, + "min": 1, + "max": 4096 + }), + }, + } + + RETURN_TYPES = ("LATENT", "INT", "INT") + RETURN_NAMES = ("Latent", "Width", "Height") + FUNCTION = "generate" + CATEGORY = "KJNodes/latents" + + def generate(self, dimensions, invert, batch_size): + from nodes import EmptyLatentImage + result = [x.strip() for x in dimensions.split('x')] + + # Remove the aspect ratio part + result[0] = result[0].split('(')[0].strip() + result[1] = result[1].split('(')[0].strip() + + if invert: + width = int(result[1].split(' ')[0]) + height = int(result[0]) + else: + width = int(result[0]) + height = int(result[1].split(' ')[0]) + latent = EmptyLatentImage().generate(width, height, batch_size)[0] + + return (latent, int(width), int(height),) + +class EmptyLatentImageCustomPresets: + @classmethod + def INPUT_TYPES(cls): + try: + with open(os.path.join(script_directory, 'custom_dimensions.json')) as f: + dimensions_dict = json.load(f) + except FileNotFoundError: + dimensions_dict = [] + return { + "required": { + "dimensions": ( + [f"{d['label']} - {d['value']}" for d in dimensions_dict], + ), + + "invert": ("BOOLEAN", {"default": False}), + "batch_size": ("INT", { + "default": 1, + "min": 1, + "max": 4096 + }), + }, + } + + RETURN_TYPES = ("LATENT", "INT", "INT") + RETURN_NAMES = ("Latent", "Width", "Height") + FUNCTION = "generate" + CATEGORY = "KJNodes/latents" + DESCRIPTION = """ +Generates an empty latent image with the specified dimensions. +The choices are loaded from 'custom_dimensions.json' in the nodes folder. +""" + + def generate(self, dimensions, invert, batch_size): + from nodes import EmptyLatentImage + # Split the string into label and value + label, value = dimensions.split(' - ') + # Split the value into width and height + width, height = [x.strip() for x in value.split('x')] + + if invert: + width, height = height, width + + latent = EmptyLatentImage().generate(int(width), int(height), batch_size)[0] + + return (latent, int(width), int(height),) + +class WidgetToString: + @classmethod + def IS_CHANGED(cls,*,id,node_title,any_input,**kwargs): + if any_input is not None and (id != 0 or node_title != ""): + return float("NaN") + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "id": ("INT", {"default": 0, "min": 0, "max": 100000, "step": 1}), + "widget_name": ("STRING", {"multiline": False}), + "return_all": ("BOOLEAN", {"default": False}), + }, + "optional": { + "any_input": (IO.ANY, ), + "node_title": ("STRING", {"multiline": False}), + "allowed_float_decimals": ("INT", {"default": 2, "min": 0, "max": 10, "tooltip": "Number of decimal places to display for float values"}), + + }, + "hidden": {"extra_pnginfo": "EXTRA_PNGINFO", + "prompt": "PROMPT", + "unique_id": "UNIQUE_ID",}, + } + + RETURN_TYPES = ("STRING", ) + FUNCTION = "get_widget_value" + CATEGORY = "KJNodes/text" + DESCRIPTION = """ +Selects a node and it's specified widget and outputs the value as a string. +If no node id or title is provided it will use the 'any_input' link and use that node. +To see node id's, enable node id display from Manager badge menu. +Alternatively you can search with the node title. Node titles ONLY exist if they +are manually edited! +The 'any_input' is required for making sure the node you want the value from exists in the workflow. +""" + + def get_widget_value(self, id, widget_name, extra_pnginfo, prompt, unique_id, return_all=False, any_input=None, node_title="", allowed_float_decimals=2): + workflow = extra_pnginfo["workflow"] + #print(json.dumps(workflow, indent=4)) + results = [] + node_id = None # Initialize node_id to handle cases where no match is found + link_id = None + link_to_node_map = {} + + for node in workflow["nodes"]: + if node_title: + if "title" in node: + if node["title"] == node_title: + node_id = node["id"] + break + else: + print("Node title not found.") + elif id != 0: + if node["id"] == id: + node_id = id + break + elif any_input is not None: + if node["type"] == "WidgetToString" and node["id"] == int(unique_id) and not link_id: + for node_input in node["inputs"]: + if node_input["name"] == "any_input": + link_id = node_input["link"] + + # Construct a map of links to node IDs for future reference + node_outputs = node.get("outputs", None) + if not node_outputs: + continue + for output in node_outputs: + node_links = output.get("links", None) + if not node_links: + continue + for link in node_links: + link_to_node_map[link] = node["id"] + if link_id and link == link_id: + break + + if link_id: + node_id = link_to_node_map.get(link_id, None) + + if node_id is None: + raise ValueError("No matching node found for the given title or id") + + values = prompt[str(node_id)] + if "inputs" in values: + if return_all: + # Format items based on type + formatted_items = [] + for k, v in values["inputs"].items(): + if isinstance(v, float): + item = f"{k}: {v:.{allowed_float_decimals}f}" + else: + item = f"{k}: {str(v)}" + formatted_items.append(item) + results.append(', '.join(formatted_items)) + elif widget_name in values["inputs"]: + v = values["inputs"][widget_name] + if isinstance(v, float): + v = f"{v:.{allowed_float_decimals}f}" + else: + v = str(v) + return (v, ) + else: + raise NameError(f"Widget not found: {node_id}.{widget_name}") + return (', '.join(results).strip(', '), ) + +class DummyOut: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "any_input": (IO.ANY, ), + } + } + + RETURN_TYPES = (IO.ANY,) + FUNCTION = "dummy" + CATEGORY = "KJNodes/misc" + OUTPUT_NODE = True + DESCRIPTION = """ +Does nothing, used to trigger generic workflow output. +A way to get previews in the UI without saving anything to disk. +""" + + def dummy(self, any_input): + return (any_input,) + +class FlipSigmasAdjusted: + @classmethod + def INPUT_TYPES(s): + return {"required": + {"sigmas": ("SIGMAS", ), + "divide_by_last_sigma": ("BOOLEAN", {"default": False}), + "divide_by": ("FLOAT", {"default": 1,"min": 1, "max": 255, "step": 0.01}), + "offset_by": ("INT", {"default": 1,"min": -100, "max": 100, "step": 1}), + } + } + RETURN_TYPES = ("SIGMAS", "STRING",) + RETURN_NAMES = ("SIGMAS", "sigmas_string",) + CATEGORY = "KJNodes/noise" + FUNCTION = "get_sigmas_adjusted" + + def get_sigmas_adjusted(self, sigmas, divide_by_last_sigma, divide_by, offset_by): + + sigmas = sigmas.flip(0) + if sigmas[0] == 0: + sigmas[0] = 0.0001 + adjusted_sigmas = sigmas.clone() + #offset sigma + for i in range(1, len(sigmas)): + offset_index = i - offset_by + if 0 <= offset_index < len(sigmas): + adjusted_sigmas[i] = sigmas[offset_index] + else: + adjusted_sigmas[i] = 0.0001 + if adjusted_sigmas[0] == 0: + adjusted_sigmas[0] = 0.0001 + if divide_by_last_sigma: + adjusted_sigmas = adjusted_sigmas / adjusted_sigmas[-1] + + sigma_np_array = adjusted_sigmas.numpy() + array_string = np.array2string(sigma_np_array, precision=2, separator=', ', threshold=np.inf) + adjusted_sigmas = adjusted_sigmas / divide_by + return (adjusted_sigmas, array_string,) + +class CustomSigmas: + @classmethod + def INPUT_TYPES(s): + return {"required": + { + "sigmas_string" :("STRING", {"default": "14.615, 6.475, 3.861, 2.697, 1.886, 1.396, 0.963, 0.652, 0.399, 0.152, 0.029","multiline": True}), + "interpolate_to_steps": ("INT", {"default": 10,"min": 0, "max": 255, "step": 1}), + } + } + RETURN_TYPES = ("SIGMAS",) + RETURN_NAMES = ("SIGMAS",) + CATEGORY = "KJNodes/noise" + FUNCTION = "customsigmas" + DESCRIPTION = """ +Creates a sigmas tensor from a string of comma separated values. +Examples: + +Nvidia's optimized AYS 10 step schedule for SD 1.5: +14.615, 6.475, 3.861, 2.697, 1.886, 1.396, 0.963, 0.652, 0.399, 0.152, 0.029 +SDXL: +14.615, 6.315, 3.771, 2.181, 1.342, 0.862, 0.555, 0.380, 0.234, 0.113, 0.029 +SVD: +700.00, 54.5, 15.886, 7.977, 4.248, 1.789, 0.981, 0.403, 0.173, 0.034, 0.002 +""" + def customsigmas(self, sigmas_string, interpolate_to_steps): + sigmas_list = sigmas_string.split(', ') + sigmas_float_list = [float(sigma) for sigma in sigmas_list] + sigmas_tensor = torch.FloatTensor(sigmas_float_list) + if len(sigmas_tensor) != interpolate_to_steps + 1: + sigmas_tensor = self.loglinear_interp(sigmas_tensor, interpolate_to_steps + 1) + sigmas_tensor[-1] = 0 + return (sigmas_tensor.float(),) + + def loglinear_interp(self, t_steps, num_steps): + """ + Performs log-linear interpolation of a given array of decreasing numbers. + """ + t_steps_np = t_steps.numpy() + + xs = np.linspace(0, 1, len(t_steps_np)) + ys = np.log(t_steps_np[::-1]) + + new_xs = np.linspace(0, 1, num_steps) + new_ys = np.interp(new_xs, xs, ys) + + interped_ys = np.exp(new_ys)[::-1].copy() + interped_ys_tensor = torch.tensor(interped_ys) + return interped_ys_tensor + +class StringToFloatList: + @classmethod + def INPUT_TYPES(s): + return {"required": + { + "string" :("STRING", {"default": "1, 2, 3", "multiline": True}), + } + } + RETURN_TYPES = ("FLOAT",) + RETURN_NAMES = ("FLOAT",) + CATEGORY = "KJNodes/misc" + FUNCTION = "createlist" + + def createlist(self, string): + float_list = [float(x.strip()) for x in string.split(',')] + return (float_list,) + + +class InjectNoiseToLatent: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "latents":("LATENT",), + "strength": ("FLOAT", {"default": 0.1, "min": 0.0, "max": 200.0, "step": 0.0001}), + "noise": ("LATENT",), + "normalize": ("BOOLEAN", {"default": False}), + "average": ("BOOLEAN", {"default": False}), + }, + "optional":{ + "mask": ("MASK", ), + "mix_randn_amount": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.001}), + "seed": ("INT", {"default": 123,"min": 0, "max": 0xffffffffffffffff, "step": 1}), + } + } + + RETURN_TYPES = ("LATENT",) + FUNCTION = "injectnoise" + CATEGORY = "KJNodes/noise" + + def injectnoise(self, latents, strength, noise, normalize, average, mix_randn_amount=0, seed=None, mask=None): + samples = latents["samples"].clone().cpu() + noise = noise["samples"].clone().cpu() + if samples.shape != samples.shape: + raise ValueError("InjectNoiseToLatent: Latent and noise must have the same shape") + if average: + noised = (samples + noise) / 2 + else: + noised = samples + noise * strength + if normalize: + noised = noised / noised.std() + if mask is not None: + mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(noised.shape[2], noised.shape[3]), mode="bilinear") + mask = mask.expand((-1,noised.shape[1],-1,-1)) + if mask.shape[0] < noised.shape[0]: + mask = mask.repeat((noised.shape[0] -1) // mask.shape[0] + 1, 1, 1, 1)[:noised.shape[0]] + noised = mask * noised + (1-mask) * samples + if mix_randn_amount > 0: + if seed is not None: + generator = torch.manual_seed(seed) + rand_noise = torch.randn(noised.size(), dtype=noised.dtype, layout=noised.layout, generator=generator, device="cpu") + noised = noised + (mix_randn_amount * rand_noise) + + return ({"samples":noised},) + +class SoundReactive: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "sound_level": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 99999, "step": 0.01}), + "start_range_hz": ("INT", {"default": 150, "min": 0, "max": 9999, "step": 1}), + "end_range_hz": ("INT", {"default": 2000, "min": 0, "max": 9999, "step": 1}), + "multiplier": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 99999, "step": 0.01}), + "smoothing_factor": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "normalize": ("BOOLEAN", {"default": False}), + }, + } + + RETURN_TYPES = ("FLOAT","INT",) + RETURN_NAMES =("sound_level", "sound_level_int",) + FUNCTION = "react" + CATEGORY = "KJNodes/audio" + DESCRIPTION = """ +Reacts to the sound level of the input. +Uses your browsers sound input options and requires. +Meant to be used with realtime diffusion with autoqueue. +""" + + def react(self, sound_level, start_range_hz, end_range_hz, smoothing_factor, multiplier, normalize): + + sound_level *= multiplier + + if normalize: + sound_level /= 255 + + sound_level_int = int(sound_level) + return (sound_level, sound_level_int, ) + +class GenerateNoise: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), + "seed": ("INT", {"default": 123,"min": 0, "max": 0xffffffffffffffff, "step": 1}), + "multiplier": ("FLOAT", {"default": 1.0,"min": 0.0, "max": 4096, "step": 0.01}), + "constant_batch_noise": ("BOOLEAN", {"default": False}), + "normalize": ("BOOLEAN", {"default": False}), + }, + "optional": { + "model": ("MODEL", ), + "sigmas": ("SIGMAS", ), + "latent_channels": (['4', '16', ],), + "shape": (["BCHW", "BCTHW","BTCHW",],), + } + } + + RETURN_TYPES = ("LATENT",) + FUNCTION = "generatenoise" + CATEGORY = "KJNodes/noise" + DESCRIPTION = """ +Generates noise for injection or to be used as empty latents on samplers with add_noise off. +""" + + def generatenoise(self, batch_size, width, height, seed, multiplier, constant_batch_noise, normalize, sigmas=None, model=None, latent_channels=4, shape="BCHW"): + + generator = torch.manual_seed(seed) + if shape == "BCHW": + noise = torch.randn([batch_size, int(latent_channels), height // 8, width // 8], dtype=torch.float32, layout=torch.strided, generator=generator, device="cpu") + elif shape == "BCTHW": + noise = torch.randn([1, int(latent_channels), batch_size,height // 8, width // 8], dtype=torch.float32, layout=torch.strided, generator=generator, device="cpu") + elif shape == "BTCHW": + noise = torch.randn([1, batch_size, int(latent_channels), height // 8, width // 8], dtype=torch.float32, layout=torch.strided, generator=generator, device="cpu") + if sigmas is not None: + sigma = sigmas[0] - sigmas[-1] + sigma /= model.model.latent_format.scale_factor + noise *= sigma + + noise *=multiplier + + if normalize: + noise = noise / noise.std() + if constant_batch_noise: + noise = noise[0].repeat(batch_size, 1, 1, 1) + + + return ({"samples":noise}, ) + +def camera_embeddings(elevation, azimuth): + elevation = torch.as_tensor([elevation]) + azimuth = torch.as_tensor([azimuth]) + embeddings = torch.stack( + [ + torch.deg2rad( + (90 - elevation) - (90) + ), # Zero123 polar is 90-elevation + torch.sin(torch.deg2rad(azimuth)), + torch.cos(torch.deg2rad(azimuth)), + torch.deg2rad( + 90 - torch.full_like(elevation, 0) + ), + ], dim=-1).unsqueeze(1) + + return embeddings + +def interpolate_angle(start, end, fraction): + # Calculate the difference in angles and adjust for wraparound if necessary + diff = (end - start + 540) % 360 - 180 + # Apply fraction to the difference + interpolated = start + fraction * diff + # Normalize the result to be within the range of -180 to 180 + return (interpolated + 180) % 360 - 180 + + +class StableZero123_BatchSchedule: + @classmethod + def INPUT_TYPES(s): + return {"required": { "clip_vision": ("CLIP_VISION",), + "init_image": ("IMAGE",), + "vae": ("VAE",), + "width": ("INT", {"default": 256, "min": 16, "max": MAX_RESOLUTION, "step": 8}), + "height": ("INT", {"default": 256, "min": 16, "max": MAX_RESOLUTION, "step": 8}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), + "interpolation": (["linear", "ease_in", "ease_out", "ease_in_out"],), + "azimuth_points_string": ("STRING", {"default": "0:(0.0),\n7:(1.0),\n15:(0.0)\n", "multiline": True}), + "elevation_points_string": ("STRING", {"default": "0:(0.0),\n7:(0.0),\n15:(0.0)\n", "multiline": True}), + }} + + RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") + RETURN_NAMES = ("positive", "negative", "latent") + FUNCTION = "encode" + CATEGORY = "KJNodes/experimental" + + def encode(self, clip_vision, init_image, vae, width, height, batch_size, azimuth_points_string, elevation_points_string, interpolation): + output = clip_vision.encode_image(init_image) + pooled = output.image_embeds.unsqueeze(0) + pixels = common_upscale(init_image.movedim(-1,1), width, height, "bilinear", "center").movedim(1,-1) + encode_pixels = pixels[:,:,:,:3] + t = vae.encode(encode_pixels) + + def ease_in(t): + return t * t + def ease_out(t): + return 1 - (1 - t) * (1 - t) + def ease_in_out(t): + return 3 * t * t - 2 * t * t * t + + # Parse the azimuth input string into a list of tuples + azimuth_points = [] + azimuth_points_string = azimuth_points_string.rstrip(',\n') + for point_str in azimuth_points_string.split(','): + frame_str, azimuth_str = point_str.split(':') + frame = int(frame_str.strip()) + azimuth = float(azimuth_str.strip()[1:-1]) + azimuth_points.append((frame, azimuth)) + # Sort the points by frame number + azimuth_points.sort(key=lambda x: x[0]) + + # Parse the elevation input string into a list of tuples + elevation_points = [] + elevation_points_string = elevation_points_string.rstrip(',\n') + for point_str in elevation_points_string.split(','): + frame_str, elevation_str = point_str.split(':') + frame = int(frame_str.strip()) + elevation_val = float(elevation_str.strip()[1:-1]) + elevation_points.append((frame, elevation_val)) + # Sort the points by frame number + elevation_points.sort(key=lambda x: x[0]) + + # Index of the next point to interpolate towards + next_point = 1 + next_elevation_point = 1 + + positive_cond_out = [] + positive_pooled_out = [] + negative_cond_out = [] + negative_pooled_out = [] + + #azimuth interpolation + for i in range(batch_size): + # Find the interpolated azimuth for the current frame + while next_point < len(azimuth_points) and i >= azimuth_points[next_point][0]: + next_point += 1 + # If next_point is equal to the length of points, we've gone past the last point + if next_point == len(azimuth_points): + next_point -= 1 # Set next_point to the last index of points + prev_point = max(next_point - 1, 0) # Ensure prev_point is not less than 0 + + # Calculate fraction + if azimuth_points[next_point][0] != azimuth_points[prev_point][0]: # Prevent division by zero + fraction = (i - azimuth_points[prev_point][0]) / (azimuth_points[next_point][0] - azimuth_points[prev_point][0]) + if interpolation == "ease_in": + fraction = ease_in(fraction) + elif interpolation == "ease_out": + fraction = ease_out(fraction) + elif interpolation == "ease_in_out": + fraction = ease_in_out(fraction) + + # Use the new interpolate_angle function + interpolated_azimuth = interpolate_angle(azimuth_points[prev_point][1], azimuth_points[next_point][1], fraction) + else: + interpolated_azimuth = azimuth_points[prev_point][1] + # Interpolate the elevation + next_elevation_point = 1 + while next_elevation_point < len(elevation_points) and i >= elevation_points[next_elevation_point][0]: + next_elevation_point += 1 + if next_elevation_point == len(elevation_points): + next_elevation_point -= 1 + prev_elevation_point = max(next_elevation_point - 1, 0) + + if elevation_points[next_elevation_point][0] != elevation_points[prev_elevation_point][0]: + fraction = (i - elevation_points[prev_elevation_point][0]) / (elevation_points[next_elevation_point][0] - elevation_points[prev_elevation_point][0]) + if interpolation == "ease_in": + fraction = ease_in(fraction) + elif interpolation == "ease_out": + fraction = ease_out(fraction) + elif interpolation == "ease_in_out": + fraction = ease_in_out(fraction) + + interpolated_elevation = interpolate_angle(elevation_points[prev_elevation_point][1], elevation_points[next_elevation_point][1], fraction) + else: + interpolated_elevation = elevation_points[prev_elevation_point][1] + + cam_embeds = camera_embeddings(interpolated_elevation, interpolated_azimuth) + cond = torch.cat([pooled, cam_embeds.repeat((pooled.shape[0], 1, 1))], dim=-1) + + positive_pooled_out.append(t) + positive_cond_out.append(cond) + negative_pooled_out.append(torch.zeros_like(t)) + negative_cond_out.append(torch.zeros_like(pooled)) + + # Concatenate the conditions and pooled outputs + final_positive_cond = torch.cat(positive_cond_out, dim=0) + final_positive_pooled = torch.cat(positive_pooled_out, dim=0) + final_negative_cond = torch.cat(negative_cond_out, dim=0) + final_negative_pooled = torch.cat(negative_pooled_out, dim=0) + + # Structure the final output + final_positive = [[final_positive_cond, {"concat_latent_image": final_positive_pooled}]] + final_negative = [[final_negative_cond, {"concat_latent_image": final_negative_pooled}]] + + latent = torch.zeros([batch_size, 4, height // 8, width // 8]) + return (final_positive, final_negative, {"samples": latent}) + +def linear_interpolate(start, end, fraction): + return start + (end - start) * fraction + +class SV3D_BatchSchedule: + @classmethod + def INPUT_TYPES(s): + return {"required": { "clip_vision": ("CLIP_VISION",), + "init_image": ("IMAGE",), + "vae": ("VAE",), + "width": ("INT", {"default": 576, "min": 16, "max": MAX_RESOLUTION, "step": 8}), + "height": ("INT", {"default": 576, "min": 16, "max": MAX_RESOLUTION, "step": 8}), + "batch_size": ("INT", {"default": 21, "min": 1, "max": 4096}), + "interpolation": (["linear", "ease_in", "ease_out", "ease_in_out"],), + "azimuth_points_string": ("STRING", {"default": "0:(0.0),\n9:(180.0),\n20:(360.0)\n", "multiline": True}), + "elevation_points_string": ("STRING", {"default": "0:(0.0),\n9:(0.0),\n20:(0.0)\n", "multiline": True}), + }} + + RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") + RETURN_NAMES = ("positive", "negative", "latent") + FUNCTION = "encode" + CATEGORY = "KJNodes/experimental" + DESCRIPTION = """ +Allow scheduling of the azimuth and elevation conditions for SV3D. +Note that SV3D is still a video model and the schedule needs to always go forward +https://huggingface.co/stabilityai/sv3d +""" + + def encode(self, clip_vision, init_image, vae, width, height, batch_size, azimuth_points_string, elevation_points_string, interpolation): + output = clip_vision.encode_image(init_image) + pooled = output.image_embeds.unsqueeze(0) + pixels = common_upscale(init_image.movedim(-1,1), width, height, "bilinear", "center").movedim(1,-1) + encode_pixels = pixels[:,:,:,:3] + t = vae.encode(encode_pixels) + + def ease_in(t): + return t * t + def ease_out(t): + return 1 - (1 - t) * (1 - t) + def ease_in_out(t): + return 3 * t * t - 2 * t * t * t + + # Parse the azimuth input string into a list of tuples + azimuth_points = [] + azimuth_points_string = azimuth_points_string.rstrip(',\n') + for point_str in azimuth_points_string.split(','): + frame_str, azimuth_str = point_str.split(':') + frame = int(frame_str.strip()) + azimuth = float(azimuth_str.strip()[1:-1]) + azimuth_points.append((frame, azimuth)) + # Sort the points by frame number + azimuth_points.sort(key=lambda x: x[0]) + + # Parse the elevation input string into a list of tuples + elevation_points = [] + elevation_points_string = elevation_points_string.rstrip(',\n') + for point_str in elevation_points_string.split(','): + frame_str, elevation_str = point_str.split(':') + frame = int(frame_str.strip()) + elevation_val = float(elevation_str.strip()[1:-1]) + elevation_points.append((frame, elevation_val)) + # Sort the points by frame number + elevation_points.sort(key=lambda x: x[0]) + + # Index of the next point to interpolate towards + next_point = 1 + next_elevation_point = 1 + elevations = [] + azimuths = [] + # For azimuth interpolation + for i in range(batch_size): + # Find the interpolated azimuth for the current frame + while next_point < len(azimuth_points) and i >= azimuth_points[next_point][0]: + next_point += 1 + if next_point == len(azimuth_points): + next_point -= 1 + prev_point = max(next_point - 1, 0) + + if azimuth_points[next_point][0] != azimuth_points[prev_point][0]: + fraction = (i - azimuth_points[prev_point][0]) / (azimuth_points[next_point][0] - azimuth_points[prev_point][0]) + # Apply the ease function to the fraction + if interpolation == "ease_in": + fraction = ease_in(fraction) + elif interpolation == "ease_out": + fraction = ease_out(fraction) + elif interpolation == "ease_in_out": + fraction = ease_in_out(fraction) + + interpolated_azimuth = linear_interpolate(azimuth_points[prev_point][1], azimuth_points[next_point][1], fraction) + else: + interpolated_azimuth = azimuth_points[prev_point][1] + + # Interpolate the elevation + next_elevation_point = 1 + while next_elevation_point < len(elevation_points) and i >= elevation_points[next_elevation_point][0]: + next_elevation_point += 1 + if next_elevation_point == len(elevation_points): + next_elevation_point -= 1 + prev_elevation_point = max(next_elevation_point - 1, 0) + + if elevation_points[next_elevation_point][0] != elevation_points[prev_elevation_point][0]: + fraction = (i - elevation_points[prev_elevation_point][0]) / (elevation_points[next_elevation_point][0] - elevation_points[prev_elevation_point][0]) + # Apply the ease function to the fraction + if interpolation == "ease_in": + fraction = ease_in(fraction) + elif interpolation == "ease_out": + fraction = ease_out(fraction) + elif interpolation == "ease_in_out": + fraction = ease_in_out(fraction) + + interpolated_elevation = linear_interpolate(elevation_points[prev_elevation_point][1], elevation_points[next_elevation_point][1], fraction) + else: + interpolated_elevation = elevation_points[prev_elevation_point][1] + + azimuths.append(interpolated_azimuth) + elevations.append(interpolated_elevation) + + #print("azimuths", azimuths) + #print("elevations", elevations) + + # Structure the final output + final_positive = [[pooled, {"concat_latent_image": t, "elevation": elevations, "azimuth": azimuths}]] + final_negative = [[torch.zeros_like(pooled), {"concat_latent_image": torch.zeros_like(t),"elevation": elevations, "azimuth": azimuths}]] + + latent = torch.zeros([batch_size, 4, height // 8, width // 8]) + return (final_positive, final_negative, {"samples": latent}) + +class LoadResAdapterNormalization: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("MODEL",), + "resadapter_path": (folder_paths.get_filename_list("checkpoints"), ) + } + } + + RETURN_TYPES = ("MODEL",) + FUNCTION = "load_res_adapter" + CATEGORY = "KJNodes/experimental" + + def load_res_adapter(self, model, resadapter_path): + print("ResAdapter: Checking ResAdapter path") + resadapter_full_path = folder_paths.get_full_path("checkpoints", resadapter_path) + if not os.path.exists(resadapter_full_path): + raise Exception("Invalid model path") + else: + print("ResAdapter: Loading ResAdapter normalization weights") + from comfy.utils import load_torch_file + prefix_to_remove = 'diffusion_model.' + model_clone = model.clone() + norm_state_dict = load_torch_file(resadapter_full_path) + new_values = {key[len(prefix_to_remove):]: value for key, value in norm_state_dict.items() if key.startswith(prefix_to_remove)} + print("ResAdapter: Attempting to add patches with ResAdapter weights") + try: + for key in model.model.diffusion_model.state_dict().keys(): + if key in new_values: + original_tensor = model.model.diffusion_model.state_dict()[key] + new_tensor = new_values[key].to(model.model.diffusion_model.dtype) + if original_tensor.shape == new_tensor.shape: + model_clone.add_object_patch(f"diffusion_model.{key}.data", new_tensor) + else: + print("ResAdapter: No match for key: ",key) + except: + raise Exception("Could not patch model, this way of patching was added to ComfyUI on March 3rd 2024, is your ComfyUI up to date?") + print("ResAdapter: Added resnet normalization patches") + return (model_clone, ) + +class Superprompt: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "instruction_prompt": ("STRING", {"default": 'Expand the following prompt to add more detail', "multiline": True}), + "prompt": ("STRING", {"default": '', "multiline": True, "forceInput": True}), + "max_new_tokens": ("INT", {"default": 128, "min": 1, "max": 4096, "step": 1}), + } + } + + RETURN_TYPES = ("STRING",) + FUNCTION = "process" + CATEGORY = "KJNodes/text" + DESCRIPTION = """ +# SuperPrompt +A T5 model fine-tuned on the SuperPrompt dataset for +upsampling text prompts to more detailed descriptions. +Meant to be used as a pre-generation step for text-to-image +models that benefit from more detailed prompts. +https://huggingface.co/roborovski/superprompt-v1 +""" + + def process(self, instruction_prompt, prompt, max_new_tokens): + device = model_management.get_torch_device() + from transformers import T5Tokenizer, T5ForConditionalGeneration + + checkpoint_path = os.path.join(script_directory, "models","superprompt-v1") + if not os.path.exists(checkpoint_path): + print(f"Downloading model to: {checkpoint_path}") + from huggingface_hub import snapshot_download + snapshot_download(repo_id="roborovski/superprompt-v1", + local_dir=checkpoint_path, + local_dir_use_symlinks=False) + tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-small", legacy=False) + + model = T5ForConditionalGeneration.from_pretrained(checkpoint_path, device_map=device) + model.to(device) + input_text = instruction_prompt + ": " + prompt + + input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device) + outputs = model.generate(input_ids, max_new_tokens=max_new_tokens) + out = (tokenizer.decode(outputs[0])) + out = out.replace('', '') + out = out.replace('', '') + + return (out, ) + + +class CameraPoseVisualizer: + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "pose_file_path": ("STRING", {"default": '', "multiline": False}), + "base_xval": ("FLOAT", {"default": 0.2,"min": 0, "max": 100, "step": 0.01}), + "zval": ("FLOAT", {"default": 0.3,"min": 0, "max": 100, "step": 0.01}), + "scale": ("FLOAT", {"default": 1.0,"min": 0.01, "max": 10.0, "step": 0.01}), + "use_exact_fx": ("BOOLEAN", {"default": False}), + "relative_c2w": ("BOOLEAN", {"default": True}), + "use_viewer": ("BOOLEAN", {"default": False}), + }, + "optional": { + "cameractrl_poses": ("CAMERACTRL_POSES", {"default": None}), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "plot" + CATEGORY = "KJNodes/misc" + DESCRIPTION = """ +Visualizes the camera poses, from Animatediff-Evolved CameraCtrl Pose +or a .txt file with RealEstate camera intrinsics and coordinates, in a 3D plot. +""" + + def plot(self, pose_file_path, scale, base_xval, zval, use_exact_fx, relative_c2w, use_viewer, cameractrl_poses=None): + import matplotlib as mpl + import matplotlib.pyplot as plt + from torchvision.transforms import ToTensor + + x_min = -2.0 * scale + x_max = 2.0 * scale + y_min = -2.0 * scale + y_max = 2.0 * scale + z_min = -2.0 * scale + z_max = 2.0 * scale + plt.rcParams['text.color'] = '#999999' + self.fig = plt.figure(figsize=(18, 7)) + self.fig.patch.set_facecolor('#353535') + self.ax = self.fig.add_subplot(projection='3d') + self.ax.set_facecolor('#353535') # Set the background color here + self.ax.grid(color='#999999', linestyle='-', linewidth=0.5) + self.plotly_data = None # plotly data traces + self.ax.set_aspect("auto") + self.ax.set_xlim(x_min, x_max) + self.ax.set_ylim(y_min, y_max) + self.ax.set_zlim(z_min, z_max) + self.ax.set_xlabel('x', color='#999999') + self.ax.set_ylabel('y', color='#999999') + self.ax.set_zlabel('z', color='#999999') + for text in self.ax.get_xticklabels() + self.ax.get_yticklabels() + self.ax.get_zticklabels(): + text.set_color('#999999') + print('initialize camera pose visualizer') + + if pose_file_path != "": + with open(pose_file_path, 'r') as f: + poses = f.readlines() + w2cs = [np.asarray([float(p) for p in pose.strip().split(' ')[7:]]).reshape(3, 4) for pose in poses[1:]] + fxs = [float(pose.strip().split(' ')[1]) for pose in poses[1:]] + #print(poses) + elif cameractrl_poses is not None: + poses = cameractrl_poses + w2cs = [np.array(pose[7:]).reshape(3, 4) for pose in cameractrl_poses] + fxs = [pose[1] for pose in cameractrl_poses] + else: + raise ValueError("Please provide either pose_file_path or cameractrl_poses") + + total_frames = len(w2cs) + transform_matrix = np.asarray([[1, 0, 0, 0], [0, 0, 1, 0], [0, -1, 0, 0], [0, 0, 0, 1]]).reshape(4, 4) + last_row = np.zeros((1, 4)) + last_row[0, -1] = 1.0 + + w2cs = [np.concatenate((w2c, last_row), axis=0) for w2c in w2cs] + c2ws = self.get_c2w(w2cs, transform_matrix, relative_c2w) + + for frame_idx, c2w in enumerate(c2ws): + self.extrinsic2pyramid(c2w, frame_idx / total_frames, hw_ratio=1/1, base_xval=base_xval, + zval=(fxs[frame_idx] if use_exact_fx else zval)) + + # Create the colorbar + cmap = mpl.cm.rainbow + norm = mpl.colors.Normalize(vmin=0, vmax=total_frames) + colorbar = self.fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), ax=self.ax, orientation='vertical') + + # Change the colorbar label + colorbar.set_label('Frame', color='#999999') # Change the label and its color + + # Change the tick colors + colorbar.ax.yaxis.set_tick_params(colors='#999999') # Change the tick color + + # Change the tick frequency + # Assuming you want to set the ticks at every 10th frame + ticks = np.arange(0, total_frames, 10) + colorbar.ax.yaxis.set_ticks(ticks) + + plt.title('') + plt.draw() + buf = io.BytesIO() + plt.savefig(buf, format='png', bbox_inches='tight', pad_inches=0) + buf.seek(0) + img = Image.open(buf) + tensor_img = ToTensor()(img) + buf.close() + tensor_img = tensor_img.permute(1, 2, 0).unsqueeze(0) + if use_viewer: + time.sleep(1) + plt.show() + return (tensor_img,) + + def extrinsic2pyramid(self, extrinsic, color_map='red', hw_ratio=1/1, base_xval=1, zval=3): + import matplotlib.pyplot as plt + from mpl_toolkits.mplot3d.art3d import Poly3DCollection + vertex_std = np.array([[0, 0, 0, 1], + [base_xval, -base_xval * hw_ratio, zval, 1], + [base_xval, base_xval * hw_ratio, zval, 1], + [-base_xval, base_xval * hw_ratio, zval, 1], + [-base_xval, -base_xval * hw_ratio, zval, 1]]) + vertex_transformed = vertex_std @ extrinsic.T + meshes = [[vertex_transformed[0, :-1], vertex_transformed[1][:-1], vertex_transformed[2, :-1]], + [vertex_transformed[0, :-1], vertex_transformed[2, :-1], vertex_transformed[3, :-1]], + [vertex_transformed[0, :-1], vertex_transformed[3, :-1], vertex_transformed[4, :-1]], + [vertex_transformed[0, :-1], vertex_transformed[4, :-1], vertex_transformed[1, :-1]], + [vertex_transformed[1, :-1], vertex_transformed[2, :-1], vertex_transformed[3, :-1], vertex_transformed[4, :-1]]] + + color = color_map if isinstance(color_map, str) else plt.cm.rainbow(color_map) + + self.ax.add_collection3d( + Poly3DCollection(meshes, facecolors=color, linewidths=0.3, edgecolors=color, alpha=0.25)) + + def customize_legend(self, list_label): + from matplotlib.patches import Patch + import matplotlib.pyplot as plt + list_handle = [] + for idx, label in enumerate(list_label): + color = plt.cm.rainbow(idx / len(list_label)) + patch = Patch(color=color, label=label) + list_handle.append(patch) + plt.legend(loc='right', bbox_to_anchor=(1.8, 0.5), handles=list_handle) + + def get_c2w(self, w2cs, transform_matrix, relative_c2w): + if relative_c2w: + target_cam_c2w = np.array([ + [1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1] + ]) + abs2rel = target_cam_c2w @ w2cs[0] + ret_poses = [target_cam_c2w, ] + [abs2rel @ np.linalg.inv(w2c) for w2c in w2cs[1:]] + else: + ret_poses = [np.linalg.inv(w2c) for w2c in w2cs] + ret_poses = [transform_matrix @ x for x in ret_poses] + return np.array(ret_poses, dtype=np.float32) + + + +class CheckpointPerturbWeights: + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "joint_blocks": ("FLOAT", {"default": 0.02, "min": 0.001, "max": 10.0, "step": 0.001}), + "final_layer": ("FLOAT", {"default": 0.02, "min": 0.001, "max": 10.0, "step": 0.001}), + "rest_of_the_blocks": ("FLOAT", {"default": 0.02, "min": 0.001, "max": 10.0, "step": 0.001}), + "seed": ("INT", {"default": 123,"min": 0, "max": 0xffffffffffffffff, "step": 1}), + } + } + RETURN_TYPES = ("MODEL",) + FUNCTION = "mod" + OUTPUT_NODE = True + + CATEGORY = "KJNodes/experimental" + + def mod(self, seed, model, joint_blocks, final_layer, rest_of_the_blocks): + import copy + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + device = model_management.get_torch_device() + model_copy = copy.deepcopy(model) + model_copy.model.to(device) + keys = model_copy.model.diffusion_model.state_dict().keys() + + dict = {} + for key in keys: + dict[key] = model_copy.model.diffusion_model.state_dict()[key] + + pbar = ProgressBar(len(keys)) + for k in keys: + v = dict[k] + print(f'{k}: {v.std()}') + if k.startswith('joint_blocks'): + multiplier = joint_blocks + elif k.startswith('final_layer'): + multiplier = final_layer + else: + multiplier = rest_of_the_blocks + dict[k] += torch.normal(torch.zeros_like(v) * v.mean(), torch.ones_like(v) * v.std() * multiplier).to(device) + pbar.update(1) + model_copy.model.diffusion_model.load_state_dict(dict) + return model_copy, + +class DifferentialDiffusionAdvanced(): + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL", ), + "samples": ("LATENT",), + "mask": ("MASK",), + "multiplier": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.001}), + }} + RETURN_TYPES = ("MODEL", "LATENT") + FUNCTION = "apply" + CATEGORY = "_for_testing" + INIT = False + + def apply(self, model, samples, mask, multiplier): + self.multiplier = multiplier + model = model.clone() + model.set_model_denoise_mask_function(self.forward) + s = samples.copy() + s["noise_mask"] = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])) + return (model, s) + + def forward(self, sigma: torch.Tensor, denoise_mask: torch.Tensor, extra_options: dict): + model = extra_options["model"] + step_sigmas = extra_options["sigmas"] + sigma_to = model.inner_model.model_sampling.sigma_min + if step_sigmas[-1] > sigma_to: + sigma_to = step_sigmas[-1] + sigma_from = step_sigmas[0] + + ts_from = model.inner_model.model_sampling.timestep(sigma_from) + ts_to = model.inner_model.model_sampling.timestep(sigma_to) + current_ts = model.inner_model.model_sampling.timestep(sigma[0]) + + threshold = (current_ts - ts_to) / (ts_from - ts_to) / self.multiplier + + return (denoise_mask >= threshold).to(denoise_mask.dtype) + +class FluxBlockLoraSelect: + def __init__(self): + self.loaded_lora = None + + @classmethod + def INPUT_TYPES(s): + arg_dict = {} + argument = ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}) + + for i in range(19): + arg_dict["double_blocks.{}.".format(i)] = argument + + for i in range(38): + arg_dict["single_blocks.{}.".format(i)] = argument + + return {"required": arg_dict} + + RETURN_TYPES = ("SELECTEDDITBLOCKS", ) + RETURN_NAMES = ("blocks", ) + OUTPUT_TOOLTIPS = ("The modified diffusion model.",) + FUNCTION = "load_lora" + + CATEGORY = "KJNodes/experimental" + DESCRIPTION = "Select individual block alpha values, value of 0 removes the block altogether" + + def load_lora(self, **kwargs): + return (kwargs,) + +class HunyuanVideoBlockLoraSelect: + @classmethod + def INPUT_TYPES(s): + arg_dict = {} + argument = ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}) + + for i in range(20): + arg_dict["double_blocks.{}.".format(i)] = argument + + for i in range(40): + arg_dict["single_blocks.{}.".format(i)] = argument + + return {"required": arg_dict} + + RETURN_TYPES = ("SELECTEDDITBLOCKS", ) + RETURN_NAMES = ("blocks", ) + OUTPUT_TOOLTIPS = ("The modified diffusion model.",) + FUNCTION = "load_lora" + + CATEGORY = "KJNodes/experimental" + DESCRIPTION = "Select individual block alpha values, value of 0 removes the block altogether" + + def load_lora(self, **kwargs): + return (kwargs,) + +class Wan21BlockLoraSelect: + @classmethod + def INPUT_TYPES(s): + arg_dict = {} + argument = ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}) + + for i in range(40): + arg_dict["blocks.{}.".format(i)] = argument + + return {"required": arg_dict} + + RETURN_TYPES = ("SELECTEDDITBLOCKS", ) + RETURN_NAMES = ("blocks", ) + OUTPUT_TOOLTIPS = ("The modified diffusion model.",) + FUNCTION = "load_lora" + + CATEGORY = "KJNodes/experimental" + DESCRIPTION = "Select individual block alpha values, value of 0 removes the block altogether" + + def load_lora(self, **kwargs): + return (kwargs,) + +class DiTBlockLoraLoader: + def __init__(self): + self.loaded_lora = None + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL", {"tooltip": "The diffusion model the LoRA will be applied to."}), + "strength_model": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01, "tooltip": "How strongly to modify the diffusion model. This value can be negative."}), + + }, + "optional": { + "lora_name": (folder_paths.get_filename_list("loras"), {"tooltip": "The name of the LoRA."}), + "opt_lora_path": ("STRING", {"forceInput": True, "tooltip": "Absolute path of the LoRA."}), + "blocks": ("SELECTEDDITBLOCKS",), + } + } + + RETURN_TYPES = ("MODEL", "STRING", ) + RETURN_NAMES = ("model", "rank", ) + OUTPUT_TOOLTIPS = ("The modified diffusion model.", "possible rank of the LoRA.") + FUNCTION = "load_lora" + CATEGORY = "KJNodes/experimental" + + def load_lora(self, model, strength_model, lora_name=None, opt_lora_path=None, blocks=None): + + import comfy.lora + + if opt_lora_path: + lora_path = opt_lora_path + else: + lora_path = folder_paths.get_full_path("loras", lora_name) + + lora = None + if self.loaded_lora is not None: + if self.loaded_lora[0] == lora_path: + lora = self.loaded_lora[1] + else: + self.loaded_lora = None + + if lora is None: + lora = load_torch_file(lora_path, safe_load=True) + self.loaded_lora = (lora_path, lora) + + # Find the first key that ends with "weight" + rank = "unknown" + weight_key = next((key for key in lora.keys() if key.endswith('weight')), None) + # Print the shape of the value corresponding to the key + if weight_key: + print(f"Shape of the first 'weight' key ({weight_key}): {lora[weight_key].shape}") + rank = str(lora[weight_key].shape[0]) + else: + print("No key ending with 'weight' found.") + rank = "Couldn't find rank" + self.loaded_lora = (lora_path, lora) + + key_map = {} + if model is not None: + key_map = comfy.lora.model_lora_keys_unet(model.model, key_map) + + loaded = comfy.lora.load_lora(lora, key_map) + + if blocks is not None: + keys_to_delete = [] + + for block in blocks: + for key in list(loaded.keys()): + match = False + if isinstance(key, str) and block in key: + match = True + elif isinstance(key, tuple): + for k in key: + if block in k: + match = True + break + + if match: + ratio = blocks[block] + if ratio == 0: + keys_to_delete.append(key) + else: + # Only modify LoRA adapters, skip diff tuples + value = loaded[key] + if hasattr(value, 'weights'): + print(f"Modifying LoRA adapter for key: {key}") + weights_list = list(value.weights) + weights_list[2] = ratio + loaded[key].weights = tuple(weights_list) + else: + print(f"Skipping non-LoRA entry for key: {key}") + + for key in keys_to_delete: + del loaded[key] + + print("loading lora keys:") + for key, value in loaded.items(): + if hasattr(value, 'weights'): + print(f"Key: {key}, Alpha: {value.weights[2]}") + else: + print(f"Key: {key}, Type: {type(value)}") + + if model is not None: + new_modelpatcher = model.clone() + k = new_modelpatcher.add_patches(loaded, strength_model) + + k = set(k) + for x in loaded: + if (x not in k): + print("NOT LOADED {}".format(x)) + + return (new_modelpatcher, rank) + +class CustomControlNetWeightsFluxFromList: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "list_of_floats": ("FLOAT", {"forceInput": True}, ), + }, + "optional": { + "uncond_multiplier": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}, ), + "cn_extras": ("CN_WEIGHTS_EXTRAS",), + "autosize": ("ACNAUTOSIZE", {"padding": 0}), + } + } + + RETURN_TYPES = ("CONTROL_NET_WEIGHTS", "TIMESTEP_KEYFRAME",) + RETURN_NAMES = ("CN_WEIGHTS", "TK_SHORTCUT") + FUNCTION = "load_weights" + DESCRIPTION = "Creates controlnet weights from a list of floats for Advanced-ControlNet" + + CATEGORY = "KJNodes/controlnet" + + def load_weights(self, list_of_floats: list[float], + uncond_multiplier: float=1.0, cn_extras: dict[str]={}): + + adv_control = importlib.import_module("ComfyUI-Advanced-ControlNet.adv_control") + ControlWeights = adv_control.utils.ControlWeights + TimestepKeyframeGroup = adv_control.utils.TimestepKeyframeGroup + TimestepKeyframe = adv_control.utils.TimestepKeyframe + + weights = ControlWeights.controlnet(weights_input=list_of_floats, uncond_multiplier=uncond_multiplier, extras=cn_extras) + print(weights.weights_input) + return (weights, TimestepKeyframeGroup.default(TimestepKeyframe(control_weights=weights))) + +SHAKKERLABS_UNION_CONTROLNET_TYPES = { + "canny": 0, + "tile": 1, + "depth": 2, + "blur": 3, + "pose": 4, + "gray": 5, + "low quality": 6, +} + +class SetShakkerLabsUnionControlNetType: + @classmethod + def INPUT_TYPES(s): + return {"required": {"control_net": ("CONTROL_NET", ), + "type": (["auto"] + list(SHAKKERLABS_UNION_CONTROLNET_TYPES.keys()),) + }} + + CATEGORY = "conditioning/controlnet" + RETURN_TYPES = ("CONTROL_NET",) + + FUNCTION = "set_controlnet_type" + + def set_controlnet_type(self, control_net, type): + control_net = control_net.copy() + type_number = SHAKKERLABS_UNION_CONTROLNET_TYPES.get(type, -1) + if type_number >= 0: + control_net.set_extra_arg("control_type", [type_number]) + else: + control_net.set_extra_arg("control_type", []) + + return (control_net,) + +class ModelSaveKJ: + def __init__(self): + self.output_dir = folder_paths.get_output_directory() + + @classmethod + def INPUT_TYPES(s): + return {"required": { "model": ("MODEL",), + "filename_prefix": ("STRING", {"default": "diffusion_models/ComfyUI"}), + "model_key_prefix": ("STRING", {"default": "model.diffusion_model."}), + }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},} + RETURN_TYPES = () + FUNCTION = "save" + OUTPUT_NODE = True + + CATEGORY = "advanced/model_merging" + + def save(self, model, filename_prefix, model_key_prefix, prompt=None, extra_pnginfo=None): + from comfy.utils import save_torch_file + full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir) + + output_checkpoint = f"{filename}_{counter:05}_.safetensors" + output_checkpoint = os.path.join(full_output_folder, output_checkpoint) + + load_models = [model] + + model_management.load_models_gpu(load_models, force_patch_weights=True) + default_prefix = "model.diffusion_model." + + sd = model.model.state_dict_for_saving(None, None, None) + + new_sd = {} + for k in sd: + if k.startswith(default_prefix): + new_key = model_key_prefix + k[len(default_prefix):] + else: + new_key = k # In case the key doesn't start with the default prefix, keep it unchanged + t = sd[k] + if not t.is_contiguous(): + t = t.contiguous() + new_sd[new_key] = t + print(full_output_folder) + if not os.path.exists(full_output_folder): + os.makedirs(full_output_folder) + save_torch_file(new_sd, os.path.join(full_output_folder, output_checkpoint)) + return {} + +class StyleModelApplyAdvanced: + @classmethod + def INPUT_TYPES(s): + return {"required": {"conditioning": ("CONDITIONING", ), + "style_model": ("STYLE_MODEL", ), + "clip_vision_output": ("CLIP_VISION_OUTPUT", ), + "strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.001}), + }} + RETURN_TYPES = ("CONDITIONING",) + FUNCTION = "apply_stylemodel" + CATEGORY = "KJNodes/experimental" + DESCRIPTION = "StyleModelApply but with strength parameter" + + def apply_stylemodel(self, clip_vision_output, style_model, conditioning, strength=1.0): + cond = style_model.get_cond(clip_vision_output).flatten(start_dim=0, end_dim=1).unsqueeze(dim=0) + cond = strength * cond + c = [] + for t in conditioning: + n = [torch.cat((t[0], cond), dim=1), t[1].copy()] + c.append(n) + return (c, ) + +class AudioConcatenate: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "audio1": ("AUDIO",), + "audio2": ("AUDIO",), + "direction": ( + [ 'right', + 'left', + ], + { + "default": 'right' + }), + }} + + RETURN_TYPES = ("AUDIO",) + FUNCTION = "concanate" + CATEGORY = "KJNodes/audio" + DESCRIPTION = """ +Concatenates the audio1 to audio2 in the specified direction. +""" + + def concanate(self, audio1, audio2, direction): + sample_rate_1 = audio1["sample_rate"] + sample_rate_2 = audio2["sample_rate"] + if sample_rate_1 != sample_rate_2: + raise Exception("Sample rates of the two audios do not match") + + waveform_1 = audio1["waveform"] + print(waveform_1.shape) + waveform_2 = audio2["waveform"] + + # Concatenate based on the specified direction + if direction == 'right': + concatenated_audio = torch.cat((waveform_1, waveform_2), dim=2) # Concatenate along width + elif direction == 'left': + concatenated_audio= torch.cat((waveform_2, waveform_1), dim=2) # Concatenate along width + return ({"waveform": concatenated_audio, "sample_rate": sample_rate_1},) + +class LeapfusionHunyuanI2V: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("MODEL",), + "latent": ("LATENT",), + "index": ("INT", {"default": 0, "min": -1, "max": 1000, "step": 1,"tooltip": "The index of the latent to be replaced. 0 for first frame and -1 for last"}), + "start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "The start percentage of steps to apply"}), + "end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "The end percentage of steps to apply"}), + "strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.001}), + } + } + + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "KJNodes/experimental" + + def patch(self, model, latent, index, strength, start_percent, end_percent): + + def outer_wrapper(samples, index, start_percent, end_percent): + def unet_wrapper(apply_model, args): + steps = args["c"]["transformer_options"]["sample_sigmas"] + inp, timestep, c = args["input"], args["timestep"], args["c"] + matched_step_index = (steps == timestep).nonzero() + if len(matched_step_index) > 0: + current_step_index = matched_step_index.item() + else: + for i in range(len(steps) - 1): + # walk from beginning of steps until crossing the timestep + if (steps[i] - timestep[0]) * (steps[i + 1] - timestep[0]) <= 0: + current_step_index = i + break + else: + current_step_index = 0 + current_percent = current_step_index / (len(steps) - 1) + if samples is not None: + if start_percent <= current_percent <= end_percent: + inp[:, :, [index], :, :] = samples[:, :, [0], :, :].to(inp) + else: + inp[:, :, [index], :, :] = torch.zeros(1) + return apply_model(inp, timestep, **c) + return unet_wrapper + + samples = latent["samples"] * 0.476986 * strength + m = model.clone() + m.set_model_unet_function_wrapper(outer_wrapper(samples, index, start_percent, end_percent)) + + return (m,) + +class ImageNoiseAugmentation: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "noise_aug_strength": ("FLOAT", {"default": None, "min": 0.0, "max": 100.0, "step": 0.001}), + "seed": ("INT", {"default": 123,"min": 0, "max": 0xffffffffffffffff, "step": 1}), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "add_noise" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ + Add noise to an image. + """ + + def add_noise(self, image, noise_aug_strength, seed): + torch.manual_seed(seed) + sigma = torch.ones((image.shape[0],)).to(image.device, image.dtype) * noise_aug_strength + image_noise = torch.randn_like(image) * sigma[:, None, None, None] + image_noise = torch.where(image==-1, torch.zeros_like(image), image_noise) + image_out = image + image_noise + return image_out, + +class VAELoaderKJ: + @staticmethod + def vae_list(): + vaes = folder_paths.get_filename_list("vae") + approx_vaes = folder_paths.get_filename_list("vae_approx") + sdxl_taesd_enc = False + sdxl_taesd_dec = False + sd1_taesd_enc = False + sd1_taesd_dec = False + sd3_taesd_enc = False + sd3_taesd_dec = False + f1_taesd_enc = False + f1_taesd_dec = False + + for v in approx_vaes: + if v.startswith("taesd_decoder."): + sd1_taesd_dec = True + elif v.startswith("taesd_encoder."): + sd1_taesd_enc = True + elif v.startswith("taesdxl_decoder."): + sdxl_taesd_dec = True + elif v.startswith("taesdxl_encoder."): + sdxl_taesd_enc = True + elif v.startswith("taesd3_decoder."): + sd3_taesd_dec = True + elif v.startswith("taesd3_encoder."): + sd3_taesd_enc = True + elif v.startswith("taef1_encoder."): + f1_taesd_dec = True + elif v.startswith("taef1_decoder."): + f1_taesd_enc = True + if sd1_taesd_dec and sd1_taesd_enc: + vaes.append("taesd") + if sdxl_taesd_dec and sdxl_taesd_enc: + vaes.append("taesdxl") + if sd3_taesd_dec and sd3_taesd_enc: + vaes.append("taesd3") + if f1_taesd_dec and f1_taesd_enc: + vaes.append("taef1") + return vaes + + @staticmethod + def load_taesd(name): + sd = {} + approx_vaes = folder_paths.get_filename_list("vae_approx") + + encoder = next(filter(lambda a: a.startswith("{}_encoder.".format(name)), approx_vaes)) + decoder = next(filter(lambda a: a.startswith("{}_decoder.".format(name)), approx_vaes)) + + enc = load_torch_file(folder_paths.get_full_path_or_raise("vae_approx", encoder)) + for k in enc: + sd["taesd_encoder.{}".format(k)] = enc[k] + + dec = load_torch_file(folder_paths.get_full_path_or_raise("vae_approx", decoder)) + for k in dec: + sd["taesd_decoder.{}".format(k)] = dec[k] + + if name == "taesd": + sd["vae_scale"] = torch.tensor(0.18215) + sd["vae_shift"] = torch.tensor(0.0) + elif name == "taesdxl": + sd["vae_scale"] = torch.tensor(0.13025) + sd["vae_shift"] = torch.tensor(0.0) + elif name == "taesd3": + sd["vae_scale"] = torch.tensor(1.5305) + sd["vae_shift"] = torch.tensor(0.0609) + elif name == "taef1": + sd["vae_scale"] = torch.tensor(0.3611) + sd["vae_shift"] = torch.tensor(0.1159) + return sd + + @classmethod + def INPUT_TYPES(s): + return { + "required": { "vae_name": (s.vae_list(), ), + "device": (["main_device", "cpu"],), + "weight_dtype": (["bf16", "fp16", "fp32" ],), + } + } + + RETURN_TYPES = ("VAE",) + FUNCTION = "load_vae" + CATEGORY = "KJNodes/vae" + + def load_vae(self, vae_name, device, weight_dtype): + from comfy.sd import VAE + dtype = {"bf16": torch.bfloat16, "fp16": torch.float16, "fp32": torch.float32}[weight_dtype] + if device == "main_device": + device = model_management.get_torch_device() + elif device == "cpu": + device = torch.device("cpu") + if vae_name in ["taesd", "taesdxl", "taesd3", "taef1"]: + sd = self.load_taesd(vae_name) + else: + vae_path = folder_paths.get_full_path_or_raise("vae", vae_name) + sd = load_torch_file(vae_path) + vae = VAE(sd=sd, device=device, dtype=dtype) + return (vae,) + +from comfy.samplers import sampling_function, CFGGuider +class Guider_ScheduledCFG(CFGGuider): + + def set_cfg(self, cfg, start_percent, end_percent): + self.cfg = cfg + self.start_percent = start_percent + self.end_percent = end_percent + + def predict_noise(self, x, timestep, model_options={}, seed=None): + steps = model_options["transformer_options"]["sample_sigmas"] + matched_step_index = (steps == timestep).nonzero() + assert not (isinstance(self.cfg, list) and len(self.cfg) != (len(steps) - 1)), "cfg list length must match step count" + if len(matched_step_index) > 0: + current_step_index = matched_step_index.item() + else: + for i in range(len(steps) - 1): + # walk from beginning of steps until crossing the timestep + if (steps[i] - timestep[0]) * (steps[i + 1] - timestep[0]) <= 0: + current_step_index = i + break + else: + current_step_index = 0 + current_percent = current_step_index / (len(steps) - 1) + + if self.start_percent <= current_percent <= self.end_percent: + if isinstance(self.cfg, list): + cfg = self.cfg[current_step_index] + else: + cfg = self.cfg + uncond = self.conds.get("negative", None) + else: + uncond = None + cfg = 1.0 + + return sampling_function(self.inner_model, x, timestep, uncond, self.conds.get("positive", None), cfg, model_options=model_options, seed=seed) + +class ScheduledCFGGuidance: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + "cfg": ("FLOAT", {"default": 6.0, "min": 0.0, "max": 100.0, "step": 0.01}), + "start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step":0.01}), + "end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step":0.01}), + }, + } + RETURN_TYPES = ("GUIDER",) + FUNCTION = "get_guider" + CATEGORY = "KJNodes/experimental" + DESCRiPTION = """ +CFG Guider that allows for scheduled CFG changes over steps, the steps outside the range will use CFG 1.0 thus being processed faster. +cfg input can be a list of floats matching step count, or a single float for all steps. +""" + + def get_guider(self, model, cfg, positive, negative, start_percent, end_percent): + guider = Guider_ScheduledCFG(model) + guider.set_conds(positive, negative) + guider.set_cfg(cfg, start_percent, end_percent) + return (guider, ) + + +class ApplyRifleXRoPE_WanVideo: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("MODEL",), + "latent": ("LATENT", {"tooltip": "Only used to get the latent count"}), + "k": ("INT", {"default": 6, "min": 1, "max": 100, "step": 1, "tooltip": "Index of intrinsic frequency"}), + } + } + + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + CATEGORY = "KJNodes/experimental" + EXPERIMENTAL = True + DESCRIPTION = "Extends the potential frame count of HunyuanVideo using this method: https://github.com/thu-ml/RIFLEx" + + def patch(self, model, latent, k): + model_class = model.model.diffusion_model + + model_clone = model.clone() + num_frames = latent["samples"].shape[2] + d = model_class.dim // model_class.num_heads + + rope_embedder = EmbedND_RifleX( + d, + 10000.0, + [d - 4 * (d // 6), 2 * (d // 6), 2 * (d // 6)], + num_frames, + k + ) + + model_clone.add_object_patch(f"diffusion_model.rope_embedder", rope_embedder) + + return (model_clone, ) + +class ApplyRifleXRoPE_HunuyanVideo: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("MODEL",), + "latent": ("LATENT", {"tooltip": "Only used to get the latent count"}), + "k": ("INT", {"default": 4, "min": 1, "max": 100, "step": 1, "tooltip": "Index of intrinsic frequency"}), + } + } + + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + CATEGORY = "KJNodes/experimental" + EXPERIMENTAL = True + DESCRIPTION = "Extends the potential frame count of HunyuanVideo using this method: https://github.com/thu-ml/RIFLEx" + + def patch(self, model, latent, k): + model_class = model.model.diffusion_model + + model_clone = model.clone() + num_frames = latent["samples"].shape[2] + + pe_embedder = EmbedND_RifleX( + model_class.params.hidden_size // model_class.params.num_heads, + model_class.params.theta, + model_class.params.axes_dim, + num_frames, + k + ) + + model_clone.add_object_patch(f"diffusion_model.pe_embedder", pe_embedder) + + return (model_clone, ) + +def rope_riflex(pos, dim, theta, L_test, k): + from einops import rearrange + assert dim % 2 == 0 + if model_management.is_device_mps(pos.device) or model_management.is_intel_xpu() or model_management.is_directml_enabled(): + device = torch.device("cpu") + else: + device = pos.device + + scale = torch.linspace(0, (dim - 2) / dim, steps=dim//2, dtype=torch.float64, device=device) + omega = 1.0 / (theta**scale) + + # RIFLEX modification - adjust last frequency component if L_test and k are provided + if k and L_test: + omega[k-1] = 0.9 * 2 * torch.pi / L_test + + out = torch.einsum("...n,d->...nd", pos.to(dtype=torch.float32, device=device), omega) + out = torch.stack([torch.cos(out), -torch.sin(out), torch.sin(out), torch.cos(out)], dim=-1) + out = rearrange(out, "b n d (i j) -> b n d i j", i=2, j=2) + return out.to(dtype=torch.float32, device=pos.device) + +class EmbedND_RifleX(nn.Module): + def __init__(self, dim, theta, axes_dim, num_frames, k): + super().__init__() + self.dim = dim + self.theta = theta + self.axes_dim = axes_dim + self.num_frames = num_frames + self.k = k + + def forward(self, ids): + n_axes = ids.shape[-1] + emb = torch.cat( + [rope_riflex(ids[..., i], self.axes_dim[i], self.theta, self.num_frames, self.k if i == 0 else 0) for i in range(n_axes)], + dim=-3, + ) + return emb.unsqueeze(1) + + +class Timer: + def __init__(self, name): + self.name = name + self.start_time = None + self.elapsed = 0 + +class TimerNodeKJ: + @classmethod + + def INPUT_TYPES(s): + return { + "required": { + "any_input": (IO.ANY, ), + "mode": (["start", "stop"],), + "name": ("STRING", {"default": "Timer"}), + }, + "optional": { + "timer": ("TIMER",), + }, + } + + RETURN_TYPES = (IO.ANY, "TIMER", "INT", ) + RETURN_NAMES = ("any_output", "timer", "time") + FUNCTION = "timer" + CATEGORY = "KJNodes/misc" + + def timer(self, mode, name, any_input=None, timer=None): + if timer is None: + if mode == "start": + timer = Timer(name=name) + timer.start_time = time.time() + return {"ui": { + "text": [f"{timer.start_time}"]}, + "result": (any_input, timer, 0) + } + elif mode == "stop" and timer is not None: + end_time = time.time() + timer.elapsed = int((end_time - timer.start_time) * 1000) + timer.start_time = None + return (any_input, timer, timer.elapsed) + +class HunyuanVideoEncodeKeyframesToCond: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "positive": ("CONDITIONING", ), + "vae": ("VAE", ), + "start_frame": ("IMAGE", ), + "end_frame": ("IMAGE", ), + "num_frames": ("INT", {"default": 33, "min": 2, "max": 4096, "step": 1}), + "tile_size": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}), + "overlap": ("INT", {"default": 64, "min": 0, "max": 4096, "step": 32}), + "temporal_size": ("INT", {"default": 64, "min": 8, "max": 4096, "step": 4, "tooltip": "Only used for video VAEs: Amount of frames to encode at a time."}), + "temporal_overlap": ("INT", {"default": 8, "min": 4, "max": 4096, "step": 4, "tooltip": "Only used for video VAEs: Amount of frames to overlap."}), + }, + "optional": { + "negative": ("CONDITIONING", ), + } + } + + RETURN_TYPES = ("MODEL", "CONDITIONING","CONDITIONING","LATENT") + RETURN_NAMES = ("model", "positive", "negative", "latent") + FUNCTION = "encode" + + CATEGORY = "KJNodes/videomodels" + + def encode(self, model, positive, start_frame, end_frame, num_frames, vae, tile_size, overlap, temporal_size, temporal_overlap, negative=None): + + model_clone = model.clone() + + model_clone.add_object_patch("concat_keys", ("concat_image",)) + + + x = (start_frame.shape[1] // 8) * 8 + y = (start_frame.shape[2] // 8) * 8 + + if start_frame.shape[1] != x or start_frame.shape[2] != y: + x_offset = (start_frame.shape[1] % 8) // 2 + y_offset = (start_frame.shape[2] % 8) // 2 + start_frame = start_frame[:,x_offset:x + x_offset, y_offset:y + y_offset,:] + if end_frame.shape[1] != x or end_frame.shape[2] != y: + x_offset = (start_frame.shape[1] % 8) // 2 + y_offset = (start_frame.shape[2] % 8) // 2 + end_frame = end_frame[:,x_offset:x + x_offset, y_offset:y + y_offset,:] + + video_frames = torch.zeros(num_frames-2, start_frame.shape[1], start_frame.shape[2], start_frame.shape[3], device=start_frame.device, dtype=start_frame.dtype) + video_frames = torch.cat([start_frame, video_frames, end_frame], dim=0) + + concat_latent = vae.encode_tiled(video_frames[:,:,:,:3], tile_x=tile_size, tile_y=tile_size, overlap=overlap, tile_t=temporal_size, overlap_t=temporal_overlap) + + out_latent = {} + out_latent["samples"] = torch.zeros_like(concat_latent) + + out = [] + for conditioning in [positive, negative if negative is not None else []]: + c = [] + for t in conditioning: + d = t[1].copy() + d["concat_latent_image"] = concat_latent + n = [t[0], d] + c.append(n) + out.append(c) + if len(out) == 1: + out.append(out[0]) + return (model_clone, out[0], out[1], out_latent) + + +class LazySwitchKJ: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "switch": ("BOOLEAN",), + "on_false": (IO.ANY, {"lazy": True}), + "on_true": (IO.ANY, {"lazy": True}), + }, + } + + RETURN_TYPES = (IO.ANY,) + FUNCTION = "switch" + CATEGORY = "KJNodes/misc" + DESCRIPTION = "Controls flow of execution based on a boolean switch." + + def check_lazy_status(self, switch, on_false=None, on_true=None): + if switch and on_true is None: + return ["on_true"] + if not switch and on_false is None: + return ["on_false"] + + def switch(self, switch, on_false = None, on_true=None): + value = on_true if switch else on_false + return (value,) \ No newline at end of file diff --git a/custom_nodes/comfyui-kjnodes/pyproject.toml b/custom_nodes/comfyui-kjnodes/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..f2d41e3eb46617cd61cdbd6138f191ef92f38236 --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/pyproject.toml @@ -0,0 +1,15 @@ +[project] +name = "comfyui-kjnodes" +description = "Various quality of life -nodes for ComfyUI, mostly just visual stuff to improve usability." +version = "1.1.4" +license = {file = "LICENSE"} +dependencies = ["librosa", "numpy", "pillow>=10.3.0", "scipy", "color-matcher", "matplotlib", "huggingface_hub"] + +[project.urls] +Repository = "https://github.com/kijai/ComfyUI-KJNodes" +# Used by Comfy Registry https://comfyregistry.org + +[tool.comfy] +PublisherId = "kijai" +DisplayName = "ComfyUI-KJNodes" +Icon = "https://avatars.githubusercontent.com/u/40791699" diff --git a/custom_nodes/comfyui-kjnodes/requirements.txt b/custom_nodes/comfyui-kjnodes/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..5bc18ca95b226298cbb88bd4de3307c157e0a88b --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/requirements.txt @@ -0,0 +1,7 @@ +pillow>=10.3.0 +scipy +color-matcher +matplotlib +huggingface_hub +mss +opencv-python \ No newline at end of file diff --git a/custom_nodes/comfyui-kjnodes/utility/__pycache__/utility.cpython-310.pyc b/custom_nodes/comfyui-kjnodes/utility/__pycache__/utility.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..536023f264b00aaff0aa103f593a0e271322e388 Binary files /dev/null and b/custom_nodes/comfyui-kjnodes/utility/__pycache__/utility.cpython-310.pyc differ diff --git a/custom_nodes/comfyui-kjnodes/utility/fluid.py b/custom_nodes/comfyui-kjnodes/utility/fluid.py new file mode 100644 index 0000000000000000000000000000000000000000..c0691987f5249a031ecbb74329ba513d5788b691 --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/utility/fluid.py @@ -0,0 +1,67 @@ +import numpy as np +from scipy.ndimage import map_coordinates, spline_filter +from scipy.sparse.linalg import factorized + +from .numerical import difference, operator + + +class Fluid: + def __init__(self, shape, *quantities, pressure_order=1, advect_order=3): + self.shape = shape + self.dimensions = len(shape) + + # Prototyping is simplified by dynamically + # creating advected quantities as needed. + self.quantities = quantities + for q in quantities: + setattr(self, q, np.zeros(shape)) + + self.indices = np.indices(shape) + self.velocity = np.zeros((self.dimensions, *shape)) + + laplacian = operator(shape, difference(2, pressure_order)) + self.pressure_solver = factorized(laplacian) + + self.advect_order = advect_order + + def step(self): + # Advection is computed backwards in time as described in Stable Fluids. + advection_map = self.indices - self.velocity + + # SciPy's spline filter introduces checkerboard divergence. + # A linear blend of the filtered and unfiltered fields based + # on some value epsilon eliminates this error. + def advect(field, filter_epsilon=10e-2, mode='constant'): + filtered = spline_filter(field, order=self.advect_order, mode=mode) + field = filtered * (1 - filter_epsilon) + field * filter_epsilon + return map_coordinates(field, advection_map, prefilter=False, order=self.advect_order, mode=mode) + + # Apply advection to each axis of the + # velocity field and each user-defined quantity. + for d in range(self.dimensions): + self.velocity[d] = advect(self.velocity[d]) + + for q in self.quantities: + setattr(self, q, advect(getattr(self, q))) + + # Compute the jacobian at each point in the + # velocity field to extract curl and divergence. + jacobian_shape = (self.dimensions,) * 2 + partials = tuple(np.gradient(d) for d in self.velocity) + jacobian = np.stack(partials).reshape(*jacobian_shape, *self.shape) + + divergence = jacobian.trace() + + # If this curl calculation is extended to 3D, the y-axis value must be negated. + # This corresponds to the coefficients of the levi-civita symbol in that dimension. + # Higher dimensions do not have a vector -> scalar, or vector -> vector, + # correspondence between velocity and curl due to differing isomorphisms + # between exterior powers in dimensions != 2 or 3 respectively. + curl_mask = np.triu(np.ones(jacobian_shape, dtype=bool), k=1) + curl = (jacobian[curl_mask] - jacobian[curl_mask.T]).squeeze() + + # Apply the pressure correction to the fluid's velocity field. + pressure = self.pressure_solver(divergence.flatten()).reshape(self.shape) + self.velocity -= np.gradient(pressure) + + return divergence, curl, pressure \ No newline at end of file diff --git a/custom_nodes/comfyui-kjnodes/utility/magictex.py b/custom_nodes/comfyui-kjnodes/utility/magictex.py new file mode 100644 index 0000000000000000000000000000000000000000..e6d426f7deb3deb977604dd37581eb4e9fe9e6a9 --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/utility/magictex.py @@ -0,0 +1,95 @@ +"""Generates psychedelic color textures in the spirit of Blender's magic texture shader using Python/Numpy + +https://github.com/cheind/magic-texture +""" +from typing import Tuple, Optional +import numpy as np + + +def coordinate_grid(shape: Tuple[int, int], dtype=np.float32): + """Returns a three-dimensional coordinate grid of given shape for use in `magic`.""" + x = np.linspace(-1, 1, shape[1], endpoint=True, dtype=dtype) + y = np.linspace(-1, 1, shape[0], endpoint=True, dtype=dtype) + X, Y = np.meshgrid(x, y) + XYZ = np.stack((X, Y, np.ones_like(X)), -1) + return XYZ + + +def random_transform(coords: np.ndarray, rng: np.random.Generator = None): + """Returns randomly transformed coordinates""" + H, W = coords.shape[:2] + rng = rng or np.random.default_rng() + m = rng.uniform(-1.0, 1.0, size=(3, 3)).astype(coords.dtype) + return (coords.reshape(-1, 3) @ m.T).reshape(H, W, 3) + + +def magic( + coords: np.ndarray, + depth: Optional[int] = None, + distortion: Optional[int] = None, + rng: np.random.Generator = None, +): + """Returns color magic color texture. + + The implementation is based on Blender's (https://www.blender.org/) magic + texture shader. The following adaptions have been made: + - we exchange the nested if-cascade by a probabilistic iterative approach + + Kwargs + ------ + coords: HxWx3 array + Coordinates transformed into colors by this method. See + `magictex.coordinate_grid` to generate the default. + depth: int (optional) + Number of transformations applied. Higher numbers lead to more + nested patterns. If not specified, randomly sampled. + distortion: float (optional) + Distortion of patterns. Larger values indicate more distortion, + lower values tend to generate smoother patterns. If not specified, + randomly sampled. + rng: np.random.Generator + Optional random generator to draw samples from. + + Returns + ------- + colors: HxWx3 array + Three channel color image in range [0,1] + """ + rng = rng or np.random.default_rng() + if distortion is None: + distortion = rng.uniform(1, 4) + if depth is None: + depth = rng.integers(1, 5) + + H, W = coords.shape[:2] + XYZ = coords + x = np.sin((XYZ[..., 0] + XYZ[..., 1] + XYZ[..., 2]) * distortion) + y = np.cos((-XYZ[..., 0] + XYZ[..., 1] - XYZ[..., 2]) * distortion) + z = -np.cos((-XYZ[..., 0] - XYZ[..., 1] + XYZ[..., 2]) * distortion) + + if depth > 0: + x *= distortion + y *= distortion + z *= distortion + y = -np.cos(x - y + z) + y *= distortion + + xyz = [x, y, z] + fns = [np.cos, np.sin] + for _ in range(1, depth): + axis = rng.choice(3) + fn = fns[rng.choice(2)] + signs = rng.binomial(n=1, p=0.5, size=4) * 2 - 1 + + xyz[axis] = signs[-1] * fn( + signs[0] * xyz[0] + signs[1] * xyz[1] + signs[2] * xyz[2] + ) + xyz[axis] *= distortion + + x, y, z = xyz + x /= 2 * distortion + y /= 2 * distortion + z /= 2 * distortion + c = 0.5 - np.stack((x, y, z), -1) + np.clip(c, 0, 1.0) + return c \ No newline at end of file diff --git a/custom_nodes/comfyui-kjnodes/utility/numerical.py b/custom_nodes/comfyui-kjnodes/utility/numerical.py new file mode 100644 index 0000000000000000000000000000000000000000..b5b88bc63c45d63d8913e56cbd06eb7ab413fe4f --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/utility/numerical.py @@ -0,0 +1,25 @@ +from functools import reduce +from itertools import cycle +from math import factorial + +import numpy as np +import scipy.sparse as sp + + +def difference(derivative, accuracy=1): + # Central differences implemented based on the article here: + # http://web.media.mit.edu/~crtaylor/calculator.html + derivative += 1 + radius = accuracy + derivative // 2 - 1 + points = range(-radius, radius + 1) + coefficients = np.linalg.inv(np.vander(points)) + return coefficients[-derivative] * factorial(derivative - 1), points + + +def operator(shape, *differences): + # Credit to Philip Zucker for figuring out + # that kronsum's argument order is reversed. + # Without that bit of wisdom I'd have lost it. + differences = zip(shape, cycle(differences)) + factors = (sp.diags(*diff, shape=(dim,) * 2) for dim, diff in differences) + return reduce(lambda a, f: sp.kronsum(f, a, format='csc'), factors) \ No newline at end of file diff --git a/custom_nodes/comfyui-kjnodes/utility/utility.py b/custom_nodes/comfyui-kjnodes/utility/utility.py new file mode 100644 index 0000000000000000000000000000000000000000..f3b5c425922784522791e33c225c29be1e8249e0 --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/utility/utility.py @@ -0,0 +1,39 @@ +import torch +import numpy as np +from PIL import Image +from typing import Union, List + +# Utility functions from mtb nodes: https://github.com/melMass/comfy_mtb +def pil2tensor(image: Union[Image.Image, List[Image.Image]]) -> torch.Tensor: + if isinstance(image, list): + return torch.cat([pil2tensor(img) for img in image], dim=0) + + return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) + + +def np2tensor(img_np: Union[np.ndarray, List[np.ndarray]]) -> torch.Tensor: + if isinstance(img_np, list): + return torch.cat([np2tensor(img) for img in img_np], dim=0) + + return torch.from_numpy(img_np.astype(np.float32) / 255.0).unsqueeze(0) + + +def tensor2np(tensor: torch.Tensor): + if len(tensor.shape) == 3: # Single image + return np.clip(255.0 * tensor.cpu().numpy(), 0, 255).astype(np.uint8) + else: # Batch of images + return [np.clip(255.0 * t.cpu().numpy(), 0, 255).astype(np.uint8) for t in tensor] + +def tensor2pil(image: torch.Tensor) -> List[Image.Image]: + batch_count = image.size(0) if len(image.shape) > 3 else 1 + if batch_count > 1: + out = [] + for i in range(batch_count): + out.extend(tensor2pil(image[i])) + return out + + return [ + Image.fromarray( + np.clip(255.0 * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) + ) + ] \ No newline at end of file diff --git a/custom_nodes/comfyui-kjnodes/web/green.png b/custom_nodes/comfyui-kjnodes/web/green.png new file mode 100644 index 0000000000000000000000000000000000000000..900964e4b3907145fe1e75a5b58473567450e16d Binary files /dev/null and b/custom_nodes/comfyui-kjnodes/web/green.png differ diff --git a/custom_nodes/comfyui-kjnodes/web/js/appearance.js b/custom_nodes/comfyui-kjnodes/web/js/appearance.js new file mode 100644 index 0000000000000000000000000000000000000000..d90b4aa34d4c52b22a4411194100972c83eed88d --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/web/js/appearance.js @@ -0,0 +1,23 @@ +import { app } from "../../../scripts/app.js"; + +app.registerExtension({ + name: "KJNodes.appearance", + nodeCreated(node) { + switch (node.comfyClass) { + case "INTConstant": + node.setSize([200, 58]); + node.color = "#1b4669"; + node.bgcolor = "#29699c"; + break; + case "FloatConstant": + node.setSize([200, 58]); + node.color = LGraphCanvas.node_colors.green.color; + node.bgcolor = LGraphCanvas.node_colors.green.bgcolor; + break; + case "ConditioningMultiCombine": + node.color = LGraphCanvas.node_colors.brown.color; + node.bgcolor = LGraphCanvas.node_colors.brown.bgcolor; + break; + } + } +}); diff --git a/custom_nodes/comfyui-kjnodes/web/js/browserstatus.js b/custom_nodes/comfyui-kjnodes/web/js/browserstatus.js new file mode 100644 index 0000000000000000000000000000000000000000..45abafb163481d9d760c8b273aad4d2a00db1e92 --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/web/js/browserstatus.js @@ -0,0 +1,55 @@ +import { api } from "../../../scripts/api.js"; +import { app } from "../../../scripts/app.js"; + +app.registerExtension({ + name: "KJNodes.browserstatus", + setup() { + if (!app.ui.settings.getSettingValue("KJNodes.browserStatus")) { + return; + } + api.addEventListener("status", ({ detail }) => { + let title = "ComfyUI"; + let favicon = "green"; + let queueRemaining = detail && detail.exec_info.queue_remaining; + + if (queueRemaining) { + favicon = "red"; + title = `00% - ${queueRemaining} | ${title}`; + } + let link = document.querySelector("link[rel~='icon']"); + if (!link) { + link = document.createElement("link"); + link.rel = "icon"; + document.head.appendChild(link); + } + link.href = new URL(`../${favicon}.png`, import.meta.url); + document.title = title; + }); + //add progress to the title + api.addEventListener("progress", ({ detail }) => { + const { value, max } = detail; + const progress = Math.floor((value / max) * 100); + let title = document.title; + + if (!isNaN(progress) && progress >= 0 && progress <= 100) { + const paddedProgress = String(progress).padStart(2, '0'); + title = `${paddedProgress}% ${title.replace(/^\d+%\s/, '')}`; + } + document.title = title; + }); + }, + init() { + if (!app.ui.settings.getSettingValue("KJNodes.browserStatus")) { + return; + } + const pythongossFeed = app.extensions.find( + (e) => e.name === 'pysssss.FaviconStatus', + ) + if (pythongossFeed) { + console.warn("KJNodes - Overriding pysssss.FaviconStatus") + pythongossFeed.setup = function() { + console.warn("Disabled by KJNodes") + }; + } + }, +}); \ No newline at end of file diff --git a/custom_nodes/comfyui-kjnodes/web/js/contextmenu.js b/custom_nodes/comfyui-kjnodes/web/js/contextmenu.js new file mode 100644 index 0000000000000000000000000000000000000000..8485658ef819722280160b124a2acf39353bb96d --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/web/js/contextmenu.js @@ -0,0 +1,147 @@ +import { app } from "../../../scripts/app.js"; + +// Adds context menu entries, code partly from pyssssscustom-scripts + +function addMenuHandler(nodeType, cb) { + const getOpts = nodeType.prototype.getExtraMenuOptions; + nodeType.prototype.getExtraMenuOptions = function () { + const r = getOpts.apply(this, arguments); + cb.apply(this, arguments); + return r; + }; +} + +function addNode(name, nextTo, options) { + console.log("name:", name); + console.log("nextTo:", nextTo); + options = { side: "left", select: true, shiftY: 0, shiftX: 0, ...(options || {}) }; + const node = LiteGraph.createNode(name); + app.graph.add(node); + + node.pos = [ + options.side === "left" ? nextTo.pos[0] - (node.size[0] + options.offset): nextTo.pos[0] + nextTo.size[0] + options.offset, + + nextTo.pos[1] + options.shiftY, + ]; + if (options.select) { + app.canvas.selectNode(node, false); + } + return node; +} + +app.registerExtension({ + name: "KJNodesContextmenu", + async beforeRegisterNodeDef(nodeType, nodeData, app) { + if (nodeData.input && nodeData.input.required) { + addMenuHandler(nodeType, function (_, options) { + options.unshift( + { + content: "Add GetNode", + callback: () => {addNode("GetNode", this, { side:"left", offset: 30});} + }, + { + content: "Add SetNode", + callback: () => {addNode("SetNode", this, { side:"right", offset: 30 }); + }, + }); + }); + } + }, + async setup(app) { + const updateSlots = (value) => { + const valuesToAddToIn = ["GetNode"]; + const valuesToAddToOut = ["SetNode"]; + // Remove entries if they exist + for (const arr of Object.values(LiteGraph.slot_types_default_in)) { + for (const valueToAdd of valuesToAddToIn) { + const idx = arr.indexOf(valueToAdd); + if (idx !== -1) { + arr.splice(idx, 1); + } + } + } + + for (const arr of Object.values(LiteGraph.slot_types_default_out)) { + for (const valueToAdd of valuesToAddToOut) { + const idx = arr.indexOf(valueToAdd); + if (idx !== -1) { + arr.splice(idx, 1); + } + } + } + if (value!="disabled") { + for (const arr of Object.values(LiteGraph.slot_types_default_in)) { + for (const valueToAdd of valuesToAddToIn) { + const idx = arr.indexOf(valueToAdd); + if (idx !== -1) { + arr.splice(idx, 1); + } + if (value === "top") { + arr.unshift(valueToAdd); + } else { + arr.push(valueToAdd); + } + } + } + + for (const arr of Object.values(LiteGraph.slot_types_default_out)) { + for (const valueToAdd of valuesToAddToOut) { + const idx = arr.indexOf(valueToAdd); + if (idx !== -1) { + arr.splice(idx, 1); + } + if (value === "top") { + arr.unshift(valueToAdd); + } else { + arr.push(valueToAdd); + } + } + } + } + }; + + app.ui.settings.addSetting({ + id: "KJNodes.SetGetMenu", + name: "KJNodes: Make Set/Get -nodes defaults", + tooltip: 'Adds Set/Get nodes to the top or bottom of the list of available node suggestions.', + options: ['disabled', 'top', 'bottom'], + defaultValue: 'disabled', + type: "combo", + onChange: updateSlots, + + }); + app.ui.settings.addSetting({ + id: "KJNodes.MiddleClickDefault", + name: "KJNodes: Middle click default node adding", + defaultValue: false, + type: "boolean", + onChange: (value) => { + LiteGraph.middle_click_slot_add_default_node = value; + }, + }); + app.ui.settings.addSetting({ + id: "KJNodes.nodeAutoColor", + name: "KJNodes: Automatically set node colors", + type: "boolean", + defaultValue: true, + }); + app.ui.settings.addSetting({ + id: "KJNodes.helpPopup", + name: "KJNodes: Help popups", + defaultValue: true, + type: "boolean", + }); + app.ui.settings.addSetting({ + id: "KJNodes.disablePrefix", + name: "KJNodes: Disable automatic Set_ and Get_ prefix", + defaultValue: true, + type: "boolean", + }); + app.ui.settings.addSetting({ + id: "KJNodes.browserStatus", + name: "KJNodes: 🟢 Stoplight browser status icon 🔴", + defaultValue: false, + type: "boolean", + }); +} +}); diff --git a/custom_nodes/comfyui-kjnodes/web/js/fast_preview.js b/custom_nodes/comfyui-kjnodes/web/js/fast_preview.js new file mode 100644 index 0000000000000000000000000000000000000000..822c1f745ba4e895364664f7dbed3d225f0430b2 --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/web/js/fast_preview.js @@ -0,0 +1,95 @@ +import { app } from '../../../scripts/app.js' + +//from melmass +export function makeUUID() { + let dt = new Date().getTime() + const uuid = 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, (c) => { + const r = ((dt + Math.random() * 16) % 16) | 0 + dt = Math.floor(dt / 16) + return (c === 'x' ? r : (r & 0x3) | 0x8).toString(16) + }) + return uuid +} + +function chainCallback(object, property, callback) { + if (object == undefined) { + //This should not happen. + console.error("Tried to add callback to non-existant object") + return; + } + if (property in object) { + const callback_orig = object[property] + object[property] = function () { + const r = callback_orig.apply(this, arguments); + callback.apply(this, arguments); + return r + }; + } else { + object[property] = callback; + } +} +app.registerExtension({ + name: 'KJNodes.FastPreview', + + async beforeRegisterNodeDef(nodeType, nodeData) { + if (nodeData?.name === 'FastPreview') { + chainCallback(nodeType.prototype, "onNodeCreated", function () { + + var element = document.createElement("div"); + this.uuid = makeUUID() + element.id = `fast-preview-${this.uuid}` + + this.previewWidget = this.addDOMWidget(nodeData.name, "FastPreviewWidget", element, { + serialize: false, + hideOnZoom: false, + }); + + this.previewer = new Previewer(this); + + this.setSize([550, 550]); + this.resizable = false; + this.previewWidget.parentEl = document.createElement("div"); + this.previewWidget.parentEl.className = "fast-preview"; + this.previewWidget.parentEl.id = `fast-preview-${this.uuid}` + element.appendChild(this.previewWidget.parentEl); + + chainCallback(this, "onExecuted", function (message) { + let bg_image = message["bg_image"]; + this.properties.imgData = { + name: "bg_image", + base64: bg_image + }; + this.previewer.refreshBackgroundImage(this); + }); + + + }); // onAfterGraphConfigured + }//node created + } //before register +})//register + +class Previewer { + constructor(context) { + this.node = context; + this.previousWidth = null; + this.previousHeight = null; + } + refreshBackgroundImage = () => { + const imgData = this.node?.properties?.imgData; + if (imgData?.base64) { + const base64String = imgData.base64; + const imageUrl = `data:${imgData.type};base64,${base64String}`; + const img = new Image(); + img.src = imageUrl; + img.onload = () => { + const { width, height } = img; + if (width !== this.previousWidth || height !== this.previousHeight) { + this.node.setSize([width, height]); + this.previousWidth = width; + this.previousHeight = height; + } + this.node.previewWidget.element.style.backgroundImage = `url(${imageUrl})`; + }; + } + }; + } \ No newline at end of file diff --git a/custom_nodes/comfyui-kjnodes/web/js/help_popup.js b/custom_nodes/comfyui-kjnodes/web/js/help_popup.js new file mode 100644 index 0000000000000000000000000000000000000000..c4734befd7726d6190b246e66140e99dfb9f7e65 --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/web/js/help_popup.js @@ -0,0 +1,326 @@ +import { app } from "../../../scripts/app.js"; + +// code based on mtb nodes by Mel Massadian https://github.com/melMass/comfy_mtb/ +export const loadScript = ( + FILE_URL, + async = true, + type = 'text/javascript', +) => { + return new Promise((resolve, reject) => { + try { + // Check if the script already exists + const existingScript = document.querySelector(`script[src="${FILE_URL}"]`) + if (existingScript) { + resolve({ status: true, message: 'Script already loaded' }) + return + } + + const scriptEle = document.createElement('script') + scriptEle.type = type + scriptEle.async = async + scriptEle.src = FILE_URL + + scriptEle.addEventListener('load', (ev) => { + resolve({ status: true }) + }) + + scriptEle.addEventListener('error', (ev) => { + reject({ + status: false, + message: `Failed to load the script ${FILE_URL}`, + }) + }) + + document.body.appendChild(scriptEle) + } catch (error) { + reject(error) + } + }) +} + +loadScript('kjweb_async/marked.min.js').catch((e) => { + console.log(e) +}) +loadScript('kjweb_async/purify.min.js').catch((e) => { + console.log(e) +}) + +const categories = ["KJNodes", "SUPIR", "VoiceCraft", "Marigold", "IC-Light", "WanVideoWrapper"]; +app.registerExtension({ + name: "KJNodes.HelpPopup", + async beforeRegisterNodeDef(nodeType, nodeData) { + + if (app.ui.settings.getSettingValue("KJNodes.helpPopup") === false) { + return; + } + try { + categories.forEach(category => { + if (nodeData?.category?.startsWith(category)) { + addDocumentation(nodeData, nodeType); + } + else return + }); + } catch (error) { + console.error("Error in registering KJNodes.HelpPopup", error); + } + }, +}); + +const create_documentation_stylesheet = () => { + const tag = 'kj-documentation-stylesheet' + + let styleTag = document.head.querySelector(tag) + + if (!styleTag) { + styleTag = document.createElement('style') + styleTag.type = 'text/css' + styleTag.id = tag + styleTag.innerHTML = ` + .kj-documentation-popup { + background: var(--comfy-menu-bg); + position: absolute; + color: var(--fg-color); + font: 12px monospace; + line-height: 1.5em; + padding: 10px; + border-radius: 10px; + border-style: solid; + border-width: medium; + border-color: var(--border-color); + z-index: 5; + overflow: hidden; + } + .content-wrapper { + overflow: auto; + max-height: 100%; + /* Scrollbar styling for Chrome */ + &::-webkit-scrollbar { + width: 6px; + } + &::-webkit-scrollbar-track { + background: var(--bg-color); + } + &::-webkit-scrollbar-thumb { + background-color: var(--fg-color); + border-radius: 6px; + border: 3px solid var(--bg-color); + } + + /* Scrollbar styling for Firefox */ + scrollbar-width: thin; + scrollbar-color: var(--fg-color) var(--bg-color); + a { + color: yellow; + } + a:visited { + color: orange; + } + a:hover { + color: red; + } + } + ` + document.head.appendChild(styleTag) + } + } + + /** Add documentation widget to the selected node */ + export const addDocumentation = ( + nodeData, + nodeType, + opts = { icon_size: 14, icon_margin: 4 },) => { + + opts = opts || {} + const iconSize = opts.icon_size ? opts.icon_size : 14 + const iconMargin = opts.icon_margin ? opts.icon_margin : 4 + let docElement = null + let contentWrapper = null + //if no description in the node python code, don't do anything + if (!nodeData.description) { + return + } + + const drawFg = nodeType.prototype.onDrawForeground + nodeType.prototype.onDrawForeground = function (ctx) { + const r = drawFg ? drawFg.apply(this, arguments) : undefined + if (this.flags.collapsed) return r + + // icon position + const x = this.size[0] - iconSize - iconMargin + + // create the popup + if (this.show_doc && docElement === null) { + docElement = document.createElement('div') + contentWrapper = document.createElement('div'); + docElement.appendChild(contentWrapper); + + create_documentation_stylesheet() + contentWrapper.classList.add('content-wrapper'); + docElement.classList.add('kj-documentation-popup') + + //parse the string from the python node code to html with marked, and sanitize the html with DOMPurify + contentWrapper.innerHTML = DOMPurify.sanitize(marked.parse(nodeData.description,)) + + // resize handle + const resizeHandle = document.createElement('div'); + resizeHandle.style.width = '0'; + resizeHandle.style.height = '0'; + resizeHandle.style.position = 'absolute'; + resizeHandle.style.bottom = '0'; + resizeHandle.style.right = '0'; + resizeHandle.style.cursor = 'se-resize'; + + // Add pseudo-elements to create a triangle shape + const borderColor = getComputedStyle(document.documentElement).getPropertyValue('--border-color').trim(); + resizeHandle.style.borderTop = '10px solid transparent'; + resizeHandle.style.borderLeft = '10px solid transparent'; + resizeHandle.style.borderBottom = `10px solid ${borderColor}`; + resizeHandle.style.borderRight = `10px solid ${borderColor}`; + + docElement.appendChild(resizeHandle) + let isResizing = false + let startX, startY, startWidth, startHeight + + resizeHandle.addEventListener('mousedown', function (e) { + e.preventDefault(); + e.stopPropagation(); + isResizing = true; + startX = e.clientX; + startY = e.clientY; + startWidth = parseInt(document.defaultView.getComputedStyle(docElement).width, 10); + startHeight = parseInt(document.defaultView.getComputedStyle(docElement).height, 10); + }, + { signal: this.docCtrl.signal }, + ); + + // close button + const closeButton = document.createElement('div'); + closeButton.textContent = '❌'; + closeButton.style.position = 'absolute'; + closeButton.style.top = '0'; + closeButton.style.right = '0'; + closeButton.style.cursor = 'pointer'; + closeButton.style.padding = '5px'; + closeButton.style.color = 'red'; + closeButton.style.fontSize = '12px'; + + docElement.appendChild(closeButton) + + closeButton.addEventListener('mousedown', (e) => { + e.stopPropagation(); + this.show_doc = !this.show_doc + docElement.parentNode.removeChild(docElement) + docElement = null + if (contentWrapper) { + contentWrapper.remove() + contentWrapper = null + } + }, + { signal: this.docCtrl.signal }, + ); + + document.addEventListener('mousemove', function (e) { + if (!isResizing) return; + const scale = app.canvas.ds.scale; + const newWidth = startWidth + (e.clientX - startX) / scale; + const newHeight = startHeight + (e.clientY - startY) / scale;; + docElement.style.width = `${newWidth}px`; + docElement.style.height = `${newHeight}px`; + }, + { signal: this.docCtrl.signal }, + ); + + document.addEventListener('mouseup', function () { + isResizing = false + }, + { signal: this.docCtrl.signal }, + ) + + document.body.appendChild(docElement) + } + // close the popup + else if (!this.show_doc && docElement !== null) { + docElement.parentNode.removeChild(docElement) + docElement = null + } + // update position of the popup + if (this.show_doc && docElement !== null) { + const rect = ctx.canvas.getBoundingClientRect() + const scaleX = rect.width / ctx.canvas.width + const scaleY = rect.height / ctx.canvas.height + + const transform = new DOMMatrix() + .scaleSelf(scaleX, scaleY) + .multiplySelf(ctx.getTransform()) + .translateSelf(this.size[0] * scaleX * Math.max(1.0,window.devicePixelRatio) , 0) + .translateSelf(10, -32) + + const scale = new DOMMatrix() + .scaleSelf(transform.a, transform.d); + const bcr = app.canvas.canvas.getBoundingClientRect() + + const styleObject = { + transformOrigin: '0 0', + transform: scale, + left: `${transform.a + bcr.x + transform.e}px`, + top: `${transform.d + bcr.y + transform.f}px`, + }; + Object.assign(docElement.style, styleObject); + } + + ctx.save() + ctx.translate(x - 2, iconSize - 34) + ctx.scale(iconSize / 32, iconSize / 32) + ctx.strokeStyle = 'rgba(255,255,255,0.3)' + ctx.lineCap = 'round' + ctx.lineJoin = 'round' + ctx.lineWidth = 2.4 + ctx.font = 'bold 36px monospace' + ctx.fillStyle = 'orange'; + ctx.fillText('?', 0, 24) + ctx.restore() + return r + } + // handle clicking of the icon + const mouseDown = nodeType.prototype.onMouseDown + nodeType.prototype.onMouseDown = function (e, localPos, canvas) { + const r = mouseDown ? mouseDown.apply(this, arguments) : undefined + const iconX = this.size[0] - iconSize - iconMargin + const iconY = iconSize - 34 + if ( + localPos[0] > iconX && + localPos[0] < iconX + iconSize && + localPos[1] > iconY && + localPos[1] < iconY + iconSize + ) { + if (this.show_doc === undefined) { + this.show_doc = true + } else { + this.show_doc = !this.show_doc + } + if (this.show_doc) { + this.docCtrl = new AbortController() + } else { + this.docCtrl.abort() + } + return true; + } + return r; + } + const onRem = nodeType.prototype.onRemoved + + nodeType.prototype.onRemoved = function () { + const r = onRem ? onRem.apply(this, []) : undefined + + if (docElement) { + docElement.remove() + docElement = null + } + + if (contentWrapper) { + contentWrapper.remove() + contentWrapper = null + } + return r + } +} \ No newline at end of file diff --git a/custom_nodes/comfyui-kjnodes/web/js/jsnodes.js b/custom_nodes/comfyui-kjnodes/web/js/jsnodes.js new file mode 100644 index 0000000000000000000000000000000000000000..2e676935b83054b693fb6394b54e33a22ed351c2 --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/web/js/jsnodes.js @@ -0,0 +1,413 @@ +import { app } from "../../../scripts/app.js"; +import { applyTextReplacements } from "../../../scripts/utils.js"; + +app.registerExtension({ + name: "KJNodes.jsnodes", + async beforeRegisterNodeDef(nodeType, nodeData, app) { + if(!nodeData?.category?.startsWith("KJNodes")) { + return; + } + switch (nodeData.name) { + case "ConditioningMultiCombine": + nodeType.prototype.onNodeCreated = function () { + this._type = "CONDITIONING" + this.inputs_offset = nodeData.name.includes("selective")?1:0 + this.addWidget("button", "Update inputs", null, () => { + if (!this.inputs) { + this.inputs = []; + } + const target_number_of_inputs = this.widgets.find(w => w.name === "inputcount")["value"]; + const num_inputs = this.inputs.filter(input => input.type === this._type).length + if(target_number_of_inputs===num_inputs)return; // already set, do nothing + + if(target_number_of_inputs < num_inputs){ + const inputs_to_remove = num_inputs - target_number_of_inputs; + for(let i = 0; i < inputs_to_remove; i++) { + this.removeInput(this.inputs.length - 1); + } + } + else{ + for(let i = num_inputs+1; i <= target_number_of_inputs; ++i) + this.addInput(`conditioning_${i}`, this._type) + } + }); + } + break; + case "ImageBatchMulti": + case "ImageAddMulti": + case "ImageConcatMulti": + case "CrossFadeImagesMulti": + case "TransitionImagesMulti": + nodeType.prototype.onNodeCreated = function () { + this._type = "IMAGE" + this.addWidget("button", "Update inputs", null, () => { + if (!this.inputs) { + this.inputs = []; + } + const target_number_of_inputs = this.widgets.find(w => w.name === "inputcount")["value"]; + const num_inputs = this.inputs.filter(input => input.type === this._type).length + if(target_number_of_inputs===num_inputs)return; // already set, do nothing + + if(target_number_of_inputs < num_inputs){ + const inputs_to_remove = num_inputs - target_number_of_inputs; + for(let i = 0; i < inputs_to_remove; i++) { + this.removeInput(this.inputs.length - 1); + } + } + else{ + for(let i = num_inputs+1; i <= target_number_of_inputs; ++i) + this.addInput(`image_${i}`, this._type, {shape: 7}); + } + + }); + } + break; + case "MaskBatchMulti": + nodeType.prototype.onNodeCreated = function () { + this._type = "MASK" + this.addWidget("button", "Update inputs", null, () => { + if (!this.inputs) { + this.inputs = []; + } + const target_number_of_inputs = this.widgets.find(w => w.name === "inputcount")["value"]; + const num_inputs = this.inputs.filter(input => input.type === this._type).length + if(target_number_of_inputs===num_inputs)return; // already set, do nothing + + if(target_number_of_inputs < num_inputs){ + const inputs_to_remove = num_inputs - target_number_of_inputs; + for(let i = 0; i < inputs_to_remove; i++) { + this.removeInput(this.inputs.length - 1); + } + } + else{ + for(let i = num_inputs+1; i <= target_number_of_inputs; ++i) + this.addInput(`mask_${i}`, this._type) + } + }); + } + break; + + case "FluxBlockLoraSelect": + case "HunyuanVideoBlockLoraSelect": + case "Wan21BlockLoraSelect": + nodeType.prototype.onNodeCreated = function () { + this.addWidget("button", "Set all", null, () => { + const userInput = prompt("Enter the values to set for widgets (e.g., s0,1,2-7=2.0, d0,1,2-7=2.0, or 1.0):", ""); + if (userInput) { + const regex = /([sd])?(\d+(?:,\d+|-?\d+)*?)?=(\d+(\.\d+)?)/; + const match = userInput.match(regex); + if (match) { + const type = match[1]; + const indicesPart = match[2]; + const value = parseFloat(match[3]); + + let targetWidgets = []; + if (type === 's') { + targetWidgets = this.widgets.filter(widget => widget.name.includes("single")); + } else if (type === 'd') { + targetWidgets = this.widgets.filter(widget => widget.name.includes("double")); + } else { + targetWidgets = this.widgets; // No type specified, all widgets + } + + if (indicesPart) { + const indices = indicesPart.split(',').flatMap(part => { + if (part.includes('-')) { + const [start, end] = part.split('-').map(Number); + return Array.from({ length: end - start + 1 }, (_, i) => start + i); + } + return Number(part); + }); + + for (const index of indices) { + if (index < targetWidgets.length) { + targetWidgets[index].value = value; + } + } + } else { + // No indices provided, set value for all target widgets + for (const widget of targetWidgets) { + widget.value = value; + } + } + } else if (!isNaN(parseFloat(userInput))) { + // Single value provided, set it for all widgets + const value = parseFloat(userInput); + for (const widget of this.widgets) { + widget.value = value; + } + } else { + alert("Invalid input format. Please use the format s0,1,2-7=2.0, d0,1,2-7=2.0, or 1.0"); + } + } else { + alert("Invalid input. Please enter a value."); + } + }); + }; + break; + + case "GetMaskSizeAndCount": + const onGetMaskSizeConnectInput = nodeType.prototype.onConnectInput; + nodeType.prototype.onConnectInput = function (targetSlot, type, output, originNode, originSlot) { + const v = onGetMaskSizeConnectInput? onGetMaskSizeConnectInput.apply(this, arguments): undefined + this.outputs[1]["label"] = "width" + this.outputs[2]["label"] = "height" + this.outputs[3]["label"] = "count" + return v; + } + const onGetMaskSizeExecuted = nodeType.prototype.onAfterExecuteNode; + nodeType.prototype.onExecuted = function(message) { + const r = onGetMaskSizeExecuted? onGetMaskSizeExecuted.apply(this,arguments): undefined + let values = message["text"].toString().split('x').map(Number); + this.outputs[1]["label"] = values[1] + " width" + this.outputs[2]["label"] = values[2] + " height" + this.outputs[3]["label"] = values[0] + " count" + return r + } + break; + + case "GetImageSizeAndCount": + const onGetImageSizeConnectInput = nodeType.prototype.onConnectInput; + nodeType.prototype.onConnectInput = function (targetSlot, type, output, originNode, originSlot) { + console.log(this) + const v = onGetImageSizeConnectInput? onGetImageSizeConnectInput.apply(this, arguments): undefined + //console.log(this) + this.outputs[1]["label"] = "width" + this.outputs[2]["label"] = "height" + this.outputs[3]["label"] = "count" + return v; + } + //const onGetImageSizeExecuted = nodeType.prototype.onExecuted; + const onGetImageSizeExecuted = nodeType.prototype.onAfterExecuteNode; + nodeType.prototype.onExecuted = function(message) { + console.log(this) + const r = onGetImageSizeExecuted? onGetImageSizeExecuted.apply(this,arguments): undefined + let values = message["text"].toString().split('x').map(Number); + console.log(values) + this.outputs[1]["label"] = values[1] + " width" + this.outputs[2]["label"] = values[2] + " height" + this.outputs[3]["label"] = values[0] + " count" + return r + } + break; + + case "GetLatentSizeAndCount": + const onGetLatentConnectInput = nodeType.prototype.onConnectInput; + nodeType.prototype.onConnectInput = function (targetSlot, type, output, originNode, originSlot) { + console.log(this) + const v = onGetLatentConnectInput? onGetLatentConnectInput.apply(this, arguments): undefined + //console.log(this) + this.outputs[1]["label"] = "width" + this.outputs[2]["label"] = "height" + this.outputs[3]["label"] = "count" + return v; + } + //const onGetImageSizeExecuted = nodeType.prototype.onExecuted; + const onGetLatentSizeExecuted = nodeType.prototype.onAfterExecuteNode; + nodeType.prototype.onExecuted = function(message) { + console.log(this) + const r = onGetLatentSizeExecuted? onGetLatentSizeExecuted.apply(this,arguments): undefined + let values = message["text"].toString().split('x').map(Number); + console.log(values) + this.outputs[1]["label"] = values[0] + " batch" + this.outputs[2]["label"] = values[1] + " channels" + this.outputs[3]["label"] = values[2] + " frames" + this.outputs[4]["label"] = values[3] + " height" + this.outputs[5]["label"] = values[4] + " width" + return r + } + break; + + case "PreviewAnimation": + const onPreviewAnimationConnectInput = nodeType.prototype.onConnectInput; + nodeType.prototype.onConnectInput = function (targetSlot, type, output, originNode, originSlot) { + const v = onPreviewAnimationConnectInput? onPreviewAnimationConnectInput.apply(this, arguments): undefined + this.title = "Preview Animation" + return v; + } + const onPreviewAnimationExecuted = nodeType.prototype.onAfterExecuteNode; + nodeType.prototype.onExecuted = function(message) { + const r = onPreviewAnimationExecuted? onPreviewAnimationExecuted.apply(this,arguments): undefined + let values = message["text"].toString(); + this.title = "Preview Animation " + values + return r + } + break; + + case "VRAM_Debug": + const onVRAM_DebugConnectInput = nodeType.prototype.onConnectInput; + nodeType.prototype.onConnectInput = function (targetSlot, type, output, originNode, originSlot) { + const v = onVRAM_DebugConnectInput? onVRAM_DebugConnectInput.apply(this, arguments): undefined + this.outputs[3]["label"] = "freemem_before" + this.outputs[4]["label"] = "freemem_after" + return v; + } + const onVRAM_DebugExecuted = nodeType.prototype.onAfterExecuteNode; + nodeType.prototype.onExecuted = function(message) { + const r = onVRAM_DebugExecuted? onVRAM_DebugExecuted.apply(this,arguments): undefined + let values = message["text"].toString().split('x'); + this.outputs[3]["label"] = values[0] + " freemem_before" + this.outputs[4]["label"] = values[1] + " freemem_after" + return r + } + break; + + case "JoinStringMulti": + const originalOnNodeCreated = nodeType.prototype.onNodeCreated || function() {}; + nodeType.prototype.onNodeCreated = function () { + originalOnNodeCreated.apply(this, arguments); + + this._type = "STRING"; + this.addWidget("button", "Update inputs", null, () => { + if (!this.inputs) { + this.inputs = []; + } + const target_number_of_inputs = this.widgets.find(w => w.name === "inputcount")["value"]; + const num_inputs = this.inputs.filter(input => input.name && input.name.toLowerCase().includes("string_")).length + if (target_number_of_inputs === num_inputs) return; // already set, do nothing + + if(target_number_of_inputs < num_inputs){ + const inputs_to_remove = num_inputs - target_number_of_inputs; + for(let i = 0; i < inputs_to_remove; i++) { + this.removeInput(this.inputs.length - 1); + } + } + else{ + for(let i = num_inputs+1; i <= target_number_of_inputs; ++i) + this.addInput(`string_${i}`, this._type, {shape: 7}); + } + }); + } + break; + case "SoundReactive": + nodeType.prototype.onNodeCreated = function () { + let audioContext; + let microphoneStream; + let animationFrameId; + let analyser; + let dataArray; + let startRangeHz; + let endRangeHz; + let smoothingFactor = 0.5; + let smoothedSoundLevel = 0; + + // Function to update the widget value in real-time + const updateWidgetValueInRealTime = () => { + // Ensure analyser and dataArray are defined before using them + if (analyser && dataArray) { + analyser.getByteFrequencyData(dataArray); + + const startRangeHzWidget = this.widgets.find(w => w.name === "start_range_hz"); + if (startRangeHzWidget) startRangeHz = startRangeHzWidget.value; + const endRangeHzWidget = this.widgets.find(w => w.name === "end_range_hz"); + if (endRangeHzWidget) endRangeHz = endRangeHzWidget.value; + const smoothingFactorWidget = this.widgets.find(w => w.name === "smoothing_factor"); + if (smoothingFactorWidget) smoothingFactor = smoothingFactorWidget.value; + + // Calculate frequency bin width (frequency resolution) + const frequencyBinWidth = audioContext.sampleRate / analyser.fftSize; + // Convert the widget values from Hz to indices + const startRangeIndex = Math.floor(startRangeHz / frequencyBinWidth); + const endRangeIndex = Math.floor(endRangeHz / frequencyBinWidth); + + // Function to calculate the average value for a frequency range + const calculateAverage = (start, end) => { + const sum = dataArray.slice(start, end).reduce((acc, val) => acc + val, 0); + const average = sum / (end - start); + + // Apply exponential moving average smoothing + smoothedSoundLevel = (average * (1 - smoothingFactor)) + (smoothedSoundLevel * smoothingFactor); + return smoothedSoundLevel; + }; + // Calculate the average levels for each frequency range + const soundLevel = calculateAverage(startRangeIndex, endRangeIndex); + + // Update the widget values + + const lowLevelWidget = this.widgets.find(w => w.name === "sound_level"); + if (lowLevelWidget) lowLevelWidget.value = soundLevel; + + animationFrameId = requestAnimationFrame(updateWidgetValueInRealTime); + } + }; + + // Function to start capturing audio from the microphone + const startMicrophoneCapture = () => { + // Only create the audio context and analyser once + if (!audioContext) { + audioContext = new (window.AudioContext || window.webkitAudioContext)(); + // Access the sample rate of the audio context + console.log(`Sample rate: ${audioContext.sampleRate}Hz`); + analyser = audioContext.createAnalyser(); + analyser.fftSize = 2048; + dataArray = new Uint8Array(analyser.frequencyBinCount); + // Get the range values from widgets (assumed to be in Hz) + const lowRangeWidget = this.widgets.find(w => w.name === "low_range_hz"); + if (lowRangeWidget) startRangeHz = lowRangeWidget.value; + + const midRangeWidget = this.widgets.find(w => w.name === "mid_range_hz"); + if (midRangeWidget) endRangeHz = midRangeWidget.value; + } + + navigator.mediaDevices.getUserMedia({ audio: true }).then(stream => { + microphoneStream = stream; + const microphone = audioContext.createMediaStreamSource(stream); + microphone.connect(analyser); + updateWidgetValueInRealTime(); + }).catch(error => { + console.error('Access to microphone was denied or an error occurred:', error); + }); + }; + + // Function to stop capturing audio from the microphone + const stopMicrophoneCapture = () => { + if (animationFrameId) { + cancelAnimationFrame(animationFrameId); + } + if (microphoneStream) { + microphoneStream.getTracks().forEach(track => track.stop()); + } + if (audioContext) { + audioContext.close(); + // Reset audioContext to ensure it can be created again when starting + audioContext = null; + } + }; + + // Add start button + this.addWidget("button", "Start mic capture", null, startMicrophoneCapture); + + // Add stop button + this.addWidget("button", "Stop mic capture", null, stopMicrophoneCapture); + }; + break; + case "SaveImageKJ": + const onNodeCreated = nodeType.prototype.onNodeCreated; + nodeType.prototype.onNodeCreated = function() { + const r = onNodeCreated ? onNodeCreated.apply(this, arguments) : void 0; + const widget = this.widgets.find((w) => w.name === "filename_prefix"); + widget.serializeValue = () => { + return applyTextReplacements(app, widget.value); + }; + return r; + }; + break; + + } + + }, + async setup() { + // to keep Set/Get node virtual connections visible when offscreen + const originalComputeVisibleNodes = LGraphCanvas.prototype.computeVisibleNodes; + LGraphCanvas.prototype.computeVisibleNodes = function () { + const visibleNodesSet = new Set(originalComputeVisibleNodes.apply(this, arguments)); + for (const node of this.graph._nodes) { + if ((node.type === "SetNode" || node.type === "GetNode") && node.drawConnection) { + visibleNodesSet.add(node); + } + } + return Array.from(visibleNodesSet); + }; + + } +}); \ No newline at end of file diff --git a/custom_nodes/comfyui-kjnodes/web/js/point_editor.js b/custom_nodes/comfyui-kjnodes/web/js/point_editor.js new file mode 100644 index 0000000000000000000000000000000000000000..6baa10830d5cba754eaa440685fdabc0c9d9f8e8 --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/web/js/point_editor.js @@ -0,0 +1,734 @@ +import { app } from '../../../scripts/app.js' + +//from melmass +export function makeUUID() { + let dt = new Date().getTime() + const uuid = 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, (c) => { + const r = ((dt + Math.random() * 16) % 16) | 0 + dt = Math.floor(dt / 16) + return (c === 'x' ? r : (r & 0x3) | 0x8).toString(16) + }) + return uuid +} + +export const loadScript = ( + FILE_URL, + async = true, + type = 'text/javascript', +) => { + return new Promise((resolve, reject) => { + try { + // Check if the script already exists + const existingScript = document.querySelector(`script[src="${FILE_URL}"]`) + if (existingScript) { + resolve({ status: true, message: 'Script already loaded' }) + return + } + + const scriptEle = document.createElement('script') + scriptEle.type = type + scriptEle.async = async + scriptEle.src = FILE_URL + + scriptEle.addEventListener('load', (ev) => { + resolve({ status: true }) + }) + + scriptEle.addEventListener('error', (ev) => { + reject({ + status: false, + message: `Failed to load the script ${FILE_URL}`, + }) + }) + + document.body.appendChild(scriptEle) + } catch (error) { + reject(error) + } + }) +} +const create_documentation_stylesheet = () => { + const tag = 'kj-pointseditor-stylesheet' + + let styleTag = document.head.querySelector(tag) + + if (!styleTag) { + styleTag = document.createElement('style') + styleTag.type = 'text/css' + styleTag.id = tag + styleTag.innerHTML = ` + .points-editor { + + position: absolute; + + font: 12px monospace; + line-height: 1.5em; + padding: 10px; + z-index: 0; + overflow: hidden; + } + ` + document.head.appendChild(styleTag) + } +} + +loadScript('kjweb_async/svg-path-properties.min.js').catch((e) => { + console.log(e) +}) +loadScript('kjweb_async/protovis.min.js').catch((e) => { + console.log(e) +}) +create_documentation_stylesheet() + +function chainCallback(object, property, callback) { + if (object == undefined) { + //This should not happen. + console.error("Tried to add callback to non-existant object") + return; + } + if (property in object) { + const callback_orig = object[property] + object[property] = function () { + const r = callback_orig.apply(this, arguments); + callback.apply(this, arguments); + return r + }; + } else { + object[property] = callback; + } +} +app.registerExtension({ + name: 'KJNodes.PointEditor', + + async beforeRegisterNodeDef(nodeType, nodeData) { + if (nodeData?.name === 'PointsEditor') { + chainCallback(nodeType.prototype, "onNodeCreated", function () { + + hideWidgetForGood(this, this.widgets.find(w => w.name === "coordinates")) + hideWidgetForGood(this, this.widgets.find(w => w.name === "neg_coordinates")) + hideWidgetForGood(this, this.widgets.find(w => w.name === "bboxes")) + + var element = document.createElement("div"); + this.uuid = makeUUID() + element.id = `points-editor-${this.uuid}` + + this.previewMediaType = 'image' + + this.pointsEditor = this.addDOMWidget(nodeData.name, "PointsEditorWidget", element, { + serialize: false, + hideOnZoom: false, + }); + + // context menu + this.contextMenu = document.createElement("div"); + this.contextMenu.id = "context-menu"; + this.contextMenu.style.display = "none"; + this.contextMenu.style.position = "absolute"; + this.contextMenu.style.backgroundColor = "#202020"; + this.contextMenu.style.minWidth = "100px"; + this.contextMenu.style.boxShadow = "0px 8px 16px 0px rgba(0,0,0,0.2)"; + this.contextMenu.style.zIndex = "100"; + this.contextMenu.style.padding = "5px"; + + function styleMenuItem(menuItem) { + menuItem.style.display = "block"; + menuItem.style.padding = "5px"; + menuItem.style.color = "#FFF"; + menuItem.style.fontFamily = "Arial, sans-serif"; + menuItem.style.fontSize = "16px"; + menuItem.style.textDecoration = "none"; + menuItem.style.marginBottom = "5px"; + } + function createMenuItem(id, textContent) { + let menuItem = document.createElement("a"); + menuItem.href = "#"; + menuItem.id = `menu-item-${id}`; + menuItem.textContent = textContent; + styleMenuItem(menuItem); + return menuItem; + } + + // Create an array of menu items using the createMenuItem function + this.menuItems = [ + createMenuItem(0, "Load Image"), + createMenuItem(1, "Clear Image"), + ]; + + // Add mouseover and mouseout event listeners to each menu item for styling + this.menuItems.forEach(menuItem => { + menuItem.addEventListener('mouseover', function () { + this.style.backgroundColor = "gray"; + }); + + menuItem.addEventListener('mouseout', function () { + this.style.backgroundColor = "#202020"; + }); + }); + + // Append each menu item to the context menu + this.menuItems.forEach(menuItem => { + this.contextMenu.appendChild(menuItem); + }); + + document.body.appendChild(this.contextMenu); + + this.addWidget("button", "New canvas", null, () => { + if (!this.properties || !("points" in this.properties)) { + this.editor = new PointsEditor(this); + this.addProperty("points", this.constructor.type, "string"); + this.addProperty("neg_points", this.constructor.type, "string"); + + } + else { + this.editor = new PointsEditor(this, true); + } + }); + + this.setSize([550, 550]); + this.resizable = false; + this.pointsEditor.parentEl = document.createElement("div"); + this.pointsEditor.parentEl.className = "points-editor"; + this.pointsEditor.parentEl.id = `points-editor-${this.uuid}` + element.appendChild(this.pointsEditor.parentEl); + + chainCallback(this, "onConfigure", function () { + try { + this.editor = new PointsEditor(this); + } catch (error) { + console.error("An error occurred while configuring the editor:", error); + } + }); + chainCallback(this, "onExecuted", function (message) { + let bg_image = message["bg_image"]; + this.properties.imgData = { + name: "bg_image", + base64: bg_image + }; + this.editor.refreshBackgroundImage(this); + }); + + }); // onAfterGraphConfigured + }//node created + } //before register +})//register + +class PointsEditor { + constructor(context, reset = false) { + this.node = context; + this.reset = reset; + const self = this; // Keep a reference to the main class context + + console.log("creatingPointEditor") + + this.node.pasteFile = (file) => { + if (file.type.startsWith("image/")) { + this.handleImageFile(file); + return true; + } + return false; + }; + + this.node.onDragOver = function (e) { + if (e.dataTransfer && e.dataTransfer.items) { + return [...e.dataTransfer.items].some(f => f.kind === "file" && f.type.startsWith("image/")); + } + return false; + }; + + // On drop upload files + this.node.onDragDrop = (e) => { + console.log("onDragDrop called"); + let handled = false; + for (const file of e.dataTransfer.files) { + if (file.type.startsWith("image/")) { + this.handleImageFile(file); + handled = true; + } + } + return handled; + }; + + // context menu + this.createContextMenu(); + + if (reset && context.pointsEditor.element) { + context.pointsEditor.element.innerHTML = ''; // Clear the container + } + this.pos_coordWidget = context.widgets.find(w => w.name === "coordinates"); + this.neg_coordWidget = context.widgets.find(w => w.name === "neg_coordinates"); + this.pointsStoreWidget = context.widgets.find(w => w.name === "points_store"); + this.widthWidget = context.widgets.find(w => w.name === "width"); + this.heightWidget = context.widgets.find(w => w.name === "height"); + this.bboxStoreWidget = context.widgets.find(w => w.name === "bbox_store"); + this.bboxWidget = context.widgets.find(w => w.name === "bboxes"); + + //widget callbacks + this.widthWidget.callback = () => { + this.width = this.widthWidget.value; + if (this.width > 256) { + context.setSize([this.width + 45, context.size[1]]); + } + this.vis.width(this.width); + this.updateData(); + } + this.heightWidget.callback = () => { + this.height = this.heightWidget.value + this.vis.height(this.height) + context.setSize([context.size[0], this.height + 300]); + this.updateData(); + } + this.pointsStoreWidget.callback = () => { + this.points = JSON.parse(pointsStoreWidget.value).positive; + this.neg_points = JSON.parse(pointsStoreWidget.value).negative; + this.updateData(); + } + this.bboxStoreWidget.callback = () => { + this.bbox = JSON.parse(bboxStoreWidget.value) + this.updateData(); + } + + this.width = this.widthWidget.value; + this.height = this.heightWidget.value; + var i = 3; + this.points = []; + this.neg_points = []; + this.bbox = [{}]; + var drawing = false; + + // Initialize or reset points array + if (!reset && this.pointsStoreWidget.value != "") { + this.points = JSON.parse(this.pointsStoreWidget.value).positive; + this.neg_points = JSON.parse(this.pointsStoreWidget.value).negative; + this.bbox = JSON.parse(this.bboxStoreWidget.value); + console.log(this.bbox) + } else { + this.points = [ + { + x: this.width / 2, // Middle point horizontally centered + y: this.height / 2 // Middle point vertically centered + } + ]; + this.neg_points = [ + { + x: 0, // Middle point horizontally centered + y: 0 // Middle point vertically centered + } + ]; + const combinedPoints = { + positive: this.points, + negative: this.neg_points, + }; + this.pointsStoreWidget.value = JSON.stringify(combinedPoints); + this.bboxStoreWidget.value = JSON.stringify(this.bbox); + } + + //create main canvas panel + this.vis = new pv.Panel() + .width(this.width) + .height(this.height) + .fillStyle("#222") + .strokeStyle("gray") + .lineWidth(2) + .antialias(false) + .margin(10) + .event("mousedown", function () { + if (pv.event.shiftKey && pv.event.button === 2) { // Use pv.event to access the event object + let scaledMouse = { + x: this.mouse().x / app.canvas.ds.scale, + y: this.mouse().y / app.canvas.ds.scale + }; + i = self.neg_points.push(scaledMouse) - 1; + self.updateData(); + return this; + } + else if (pv.event.shiftKey) { + let scaledMouse = { + x: this.mouse().x / app.canvas.ds.scale, + y: this.mouse().y / app.canvas.ds.scale + }; + i = self.points.push(scaledMouse) - 1; + self.updateData(); + return this; + } + else if (pv.event.ctrlKey) { + console.log("start drawing at " + this.mouse().x / app.canvas.ds.scale + ", " + this.mouse().y / app.canvas.ds.scale); + drawing = true; + self.bbox[0].startX = this.mouse().x / app.canvas.ds.scale; + self.bbox[0].startY = this.mouse().y / app.canvas.ds.scale; + } + else if (pv.event.button === 2) { + self.node.contextMenu.style.display = 'block'; + self.node.contextMenu.style.left = `${pv.event.clientX}px`; + self.node.contextMenu.style.top = `${pv.event.clientY}px`; + } + }) + .event("mousemove", function () { + if (drawing) { + self.bbox[0].endX = this.mouse().x / app.canvas.ds.scale; + self.bbox[0].endY = this.mouse().y / app.canvas.ds.scale; + self.vis.render(); + } + }) + .event("mouseup", function () { + console.log("end drawing at " + this.mouse().x / app.canvas.ds.scale + ", " + this.mouse().y / app.canvas.ds.scale); + drawing = false; + self.updateData(); + }); + + this.backgroundImage = this.vis.add(pv.Image).visible(false) + + //create bounding box + this.bounding_box = this.vis.add(pv.Area) + .data(function () { + if (drawing || (self.bbox && self.bbox[0] && Object.keys(self.bbox[0]).length > 0)) { + return [self.bbox[0].startX, self.bbox[0].endX]; + } else { + return []; + } + }) + .bottom(function () {return self.height - Math.max(self.bbox[0].startY, self.bbox[0].endY); }) + .left(function (d) {return d; }) + .height(function () {return Math.abs(self.bbox[0].startY - self.bbox[0].endY);}) + .fillStyle("rgba(70, 130, 180, 0.5)") + .strokeStyle("steelblue") + .visible(function () {return drawing || Object.keys(self.bbox[0]).length > 0; }) + .add(pv.Dot) + .visible(function () {return drawing || Object.keys(self.bbox[0]).length > 0; }) + .data(() => { + if (self.bbox && Object.keys(self.bbox[0]).length > 0) { + return [{ + x: self.bbox[0].endX, + y: self.bbox[0].endY + }]; + } else { + return []; + } + }) + .left(d => d.x) + .top(d => d.y) + .radius(Math.log(Math.min(self.width, self.height)) * 1) + .shape("square") + .cursor("move") + .strokeStyle("steelblue") + .lineWidth(2) + .fillStyle(function () { return "rgba(100, 100, 100, 0.6)"; }) + .event("mousedown", pv.Behavior.drag()) + .event("drag", function () { + let adjustedX = this.mouse().x / app.canvas.ds.scale; // Adjust the new position by the inverse of the scale factor + let adjustedY = this.mouse().y / app.canvas.ds.scale; + + // Adjust the new position if it would place the dot outside the bounds of the vis.Panel + adjustedX = Math.max(0, Math.min(self.vis.width(), adjustedX)); + adjustedY = Math.max(0, Math.min(self.vis.height(), adjustedY)); + self.bbox[0].endX = this.mouse().x / app.canvas.ds.scale; + self.bbox[0].endY = this.mouse().y / app.canvas.ds.scale; + self.vis.render(); + }) + .event("dragend", function () { + self.updateData(); + }); + + //create positive points + this.vis.add(pv.Dot) + .data(() => this.points) + .left(d => d.x) + .top(d => d.y) + .radius(Math.log(Math.min(self.width, self.height)) * 4) + .shape("circle") + .cursor("move") + .strokeStyle(function () { return i == this.index ? "#07f907" : "#139613"; }) + .lineWidth(4) + .fillStyle(function () { return "rgba(100, 100, 100, 0.6)"; }) + .event("mousedown", pv.Behavior.drag()) + .event("dragstart", function () { + i = this.index; + }) + .event("dragend", function () { + if (pv.event.button === 2 && i !== 0 && i !== self.points.length - 1) { + this.index = i; + self.points.splice(i--, 1); + } + self.updateData(); + + }) + .event("drag", function () { + let adjustedX = this.mouse().x / app.canvas.ds.scale; // Adjust the new X position by the inverse of the scale factor + let adjustedY = this.mouse().y / app.canvas.ds.scale; // Adjust the new Y position by the inverse of the scale factor + // Determine the bounds of the vis.Panel + const panelWidth = self.vis.width(); + const panelHeight = self.vis.height(); + + // Adjust the new position if it would place the dot outside the bounds of the vis.Panel + adjustedX = Math.max(0, Math.min(panelWidth, adjustedX)); + adjustedY = Math.max(0, Math.min(panelHeight, adjustedY)); + self.points[this.index] = { x: adjustedX, y: adjustedY }; // Update the point's position + self.vis.render(); // Re-render the visualization to reflect the new position + }) + + .anchor("center") + .add(pv.Label) + .left(d => d.x < this.width / 2 ? d.x + 30 : d.x - 35) // Shift label to right if on left half, otherwise shift to left + .top(d => d.y < this.height / 2 ? d.y + 25 : d.y - 25) // Shift label down if on top half, otherwise shift up + .font(25 + "px sans-serif") + .text(d => {return this.points.indexOf(d); }) + .textStyle("#139613") + .textShadow("2px 2px 2px black") + .add(pv.Dot) // Add smaller point in the center + .data(() => this.points) + .left(d => d.x) + .top(d => d.y) + .radius(2) // Smaller radius for the center point + .shape("circle") + .fillStyle("red") // Color for the center point + .lineWidth(1); // Stroke thickness for the center point + + //create negative points + this.vis.add(pv.Dot) + .data(() => this.neg_points) + .left(d => d.x) + .top(d => d.y) + .radius(Math.log(Math.min(self.width, self.height)) * 4) + .shape("circle") + .cursor("move") + .strokeStyle(function () { return i == this.index ? "#f91111" : "#891616"; }) + .lineWidth(4) + .fillStyle(function () { return "rgba(100, 100, 100, 0.6)"; }) + .event("mousedown", pv.Behavior.drag()) + .event("dragstart", function () { + i = this.index; + }) + .event("dragend", function () { + if (pv.event.button === 2 && i !== 0 && i !== self.neg_points.length - 1) { + this.index = i; + self.neg_points.splice(i--, 1); + } + self.updateData(); + + }) + .event("drag", function () { + let adjustedX = this.mouse().x / app.canvas.ds.scale; // Adjust the new X position by the inverse of the scale factor + let adjustedY = this.mouse().y / app.canvas.ds.scale; // Adjust the new Y position by the inverse of the scale factor + // Determine the bounds of the vis.Panel + const panelWidth = self.vis.width(); + const panelHeight = self.vis.height(); + + // Adjust the new position if it would place the dot outside the bounds of the vis.Panel + adjustedX = Math.max(0, Math.min(panelWidth, adjustedX)); + adjustedY = Math.max(0, Math.min(panelHeight, adjustedY)); + self.neg_points[this.index] = { x: adjustedX, y: adjustedY }; // Update the point's position + self.vis.render(); // Re-render the visualization to reflect the new position + }) + .anchor("center") + .add(pv.Label) + .left(d => d.x < this.width / 2 ? d.x + 30 : d.x - 35) // Shift label to right if on left half, otherwise shift to left + .top(d => d.y < this.height / 2 ? d.y + 25 : d.y - 25) // Shift label down if on top half, otherwise shift up + .font(25 + "px sans-serif") + .text(d => {return this.neg_points.indexOf(d); }) + .textStyle("red") + .textShadow("2px 2px 2px black") + .add(pv.Dot) // Add smaller point in the center + .data(() => this.neg_points) + .left(d => d.x) + .top(d => d.y) + .radius(2) // Smaller radius for the center point + .shape("circle") + .fillStyle("red") // Color for the center point + .lineWidth(1); // Stroke thickness for the center point + + if (this.points.length != 0) { + this.vis.render(); + } + + var svgElement = this.vis.canvas(); + svgElement.style['zIndex'] = "2" + svgElement.style['position'] = "relative" + this.node.pointsEditor.element.appendChild(svgElement); + + if (this.width > 256) { + this.node.setSize([this.width + 45, this.node.size[1]]); + } + this.node.setSize([this.node.size[0], this.height + 300]); + this.updateData(); + this.refreshBackgroundImage(); + + }//end constructor + + updateData = () => { + if (!this.points || this.points.length === 0) { + console.log("no points"); + return; + } + const combinedPoints = { + positive: this.points, + negative: this.neg_points, + }; + this.pointsStoreWidget.value = JSON.stringify(combinedPoints); + this.pos_coordWidget.value = JSON.stringify(this.points); + this.neg_coordWidget.value = JSON.stringify(this.neg_points); + + if (this.bbox.length != 0) { + let bboxString = JSON.stringify(this.bbox); + this.bboxStoreWidget.value = bboxString; + this.bboxWidget.value = bboxString; + } + + this.vis.render(); + }; + + handleImageLoad = (img, file, base64String) => { + console.log(img.width, img.height); // Access width and height here + this.widthWidget.value = img.width; + this.heightWidget.value = img.height; + + if (img.width != this.vis.width() || img.height != this.vis.height()) { + if (img.width > 256) { + this.node.setSize([img.width + 45, this.node.size[1]]); + } + this.node.setSize([this.node.size[0], img.height + 300]); + this.vis.width(img.width); + this.vis.height(img.height); + this.height = img.height; + this.width = img.width; + this.updateData(); + } + this.backgroundImage.url(file ? URL.createObjectURL(file) : `data:${this.node.properties.imgData.type};base64,${base64String}`).visible(true).root.render(); + }; + + processImage = (img, file) => { + const canvas = document.createElement('canvas'); + const ctx = canvas.getContext('2d'); + + const maxWidth = 800; // maximum width + const maxHeight = 600; // maximum height + let width = img.width; + let height = img.height; + + // Calculate the new dimensions while preserving the aspect ratio + if (width > height) { + if (width > maxWidth) { + height *= maxWidth / width; + width = maxWidth; + } + } else { + if (height > maxHeight) { + width *= maxHeight / height; + height = maxHeight; + } + } + + canvas.width = width; + canvas.height = height; + ctx.drawImage(img, 0, 0, width, height); + + // Get the compressed image data as a Base64 string + const base64String = canvas.toDataURL('image/jpeg', 0.5).replace('data:', '').replace(/^.+,/, ''); // 0.5 is the quality from 0 to 1 + + this.node.properties.imgData = { + name: file.name, + lastModified: file.lastModified, + size: file.size, + type: file.type, + base64: base64String + }; + handleImageLoad(img, file, base64String); +}; + + handleImageFile = (file) => { + const reader = new FileReader(); + reader.onloadend = () => { + const img = new Image(); + img.src = reader.result; + img.onload = () => processImage(img, file); + }; + reader.readAsDataURL(file); + + const imageUrl = URL.createObjectURL(file); + const img = new Image(); + img.src = imageUrl; + img.onload = () => this.handleImageLoad(img, file, null); + }; + + refreshBackgroundImage = () => { + if (this.node.properties.imgData && this.node.properties.imgData.base64) { + const base64String = this.node.properties.imgData.base64; + const imageUrl = `data:${this.node.properties.imgData.type};base64,${base64String}`; + const img = new Image(); + img.src = imageUrl; + img.onload = () => this.handleImageLoad(img, null, base64String); + } + }; + + createContextMenu = () => { + self = this; + document.addEventListener('contextmenu', function (e) { + e.preventDefault(); + }); + + document.addEventListener('click', function (e) { + if (!self.node.contextMenu.contains(e.target)) { + self.node.contextMenu.style.display = 'none'; + } + }); + + this.node.menuItems.forEach((menuItem, index) => { + self = this; + menuItem.addEventListener('click', function (e) { + e.preventDefault(); + switch (index) { + case 0: + // Create file input element + const fileInput = document.createElement('input'); + fileInput.type = 'file'; + fileInput.accept = 'image/*'; // Accept only image files + + // Listen for file selection + fileInput.addEventListener('change', function (event) { + const file = event.target.files[0]; // Get the selected file + + if (file) { + const imageUrl = URL.createObjectURL(file); + let img = new Image(); + img.src = imageUrl; + img.onload = () => self.handleImageLoad(img, file, null); + } + }); + + fileInput.click(); + + self.node.contextMenu.style.display = 'none'; + break; + case 1: + self.backgroundImage.visible(false).root.render(); + self.node.properties.imgData = null; + self.node.contextMenu.style.display = 'none'; + break; + } + }); + }); + }//end createContextMenu +}//end class + + +//from melmass +export function hideWidgetForGood(node, widget, suffix = '') { + widget.origType = widget.type + widget.origComputeSize = widget.computeSize + widget.origSerializeValue = widget.serializeValue + widget.computeSize = () => [0, -4] // -4 is due to the gap litegraph adds between widgets automatically + widget.type = "converted-widget" + suffix + // widget.serializeValue = () => { + // // Prevent serializing the widget if we have no input linked + // const w = node.inputs?.find((i) => i.widget?.name === widget.name); + // if (w?.link == null) { + // return undefined; + // } + // return widget.origSerializeValue ? widget.origSerializeValue() : widget.value; + // }; + + // Hide any linked widgets, e.g. seed+seedControl + if (widget.linkedWidgets) { + for (const w of widget.linkedWidgets) { + hideWidgetForGood(node, w, ':' + widget.name) + } + } +} \ No newline at end of file diff --git a/custom_nodes/comfyui-kjnodes/web/js/setgetnodes.js b/custom_nodes/comfyui-kjnodes/web/js/setgetnodes.js new file mode 100644 index 0000000000000000000000000000000000000000..c4531881378c51128b1885b212aabc70d2a8c602 --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/web/js/setgetnodes.js @@ -0,0 +1,565 @@ +import { app } from "../../../scripts/app.js"; + +//based on diffus3's SetGet: https://github.com/diffus3/ComfyUI-extensions + +// Nodes that allow you to tunnel connections for cleaner graphs +function setColorAndBgColor(type) { + const colorMap = { + "MODEL": LGraphCanvas.node_colors.blue, + "LATENT": LGraphCanvas.node_colors.purple, + "VAE": LGraphCanvas.node_colors.red, + "CONDITIONING": LGraphCanvas.node_colors.brown, + "IMAGE": LGraphCanvas.node_colors.pale_blue, + "CLIP": LGraphCanvas.node_colors.yellow, + "FLOAT": LGraphCanvas.node_colors.green, + "MASK": { color: "#1c5715", bgcolor: "#1f401b"}, + "INT": { color: "#1b4669", bgcolor: "#29699c"}, + "CONTROL_NET": { color: "#156653", bgcolor: "#1c453b"}, + "NOISE": { color: "#2e2e2e", bgcolor: "#242121"}, + "GUIDER": { color: "#3c7878", bgcolor: "#1c453b"}, + "SAMPLER": { color: "#614a4a", bgcolor: "#3b2c2c"}, + "SIGMAS": { color: "#485248", bgcolor: "#272e27"}, + + }; + + const colors = colorMap[type]; + if (colors) { + this.color = colors.color; + this.bgcolor = colors.bgcolor; + } +} +let disablePrefix = app.ui.settings.getSettingValue("KJNodes.disablePrefix") +const LGraphNode = LiteGraph.LGraphNode + +function showAlert(message) { + app.extensionManager.toast.add({ + severity: 'warn', + summary: "KJ Get/Set", + detail: `${message}. Most likely you're missing custom nodes`, + life: 5000, + }) +} +app.registerExtension({ + name: "SetNode", + registerCustomNodes() { + class SetNode extends LGraphNode { + defaultVisibility = true; + serialize_widgets = true; + drawConnection = false; + currentGetters = null; + slotColor = "#FFF"; + canvas = app.canvas; + menuEntry = "Show connections"; + + constructor(title) { + super(title) + if (!this.properties) { + this.properties = { + "previousName": "" + }; + } + this.properties.showOutputText = SetNode.defaultVisibility; + + const node = this; + + this.addWidget( + "text", + "Constant", + '', + (s, t, u, v, x) => { + node.validateName(node.graph); + if(this.widgets[0].value !== ''){ + this.title = (!disablePrefix ? "Set_" : "") + this.widgets[0].value; + } + this.update(); + this.properties.previousName = this.widgets[0].value; + }, + {} + ) + + this.addInput("*", "*"); + this.addOutput("*", '*'); + + this.onConnectionsChange = function( + slotType, //1 = input, 2 = output + slot, + isChangeConnect, + link_info, + output + ) { + //On Disconnect + if (slotType == 1 && !isChangeConnect) { + if(this.inputs[slot].name === ''){ + this.inputs[slot].type = '*'; + this.inputs[slot].name = '*'; + this.title = "Set" + } + } + if (slotType == 2 && !isChangeConnect) { + if (this.outputs && this.outputs[slot]) { + this.outputs[slot].type = '*'; + this.outputs[slot].name = '*'; + } + } + //On Connect + if (link_info && node.graph && slotType == 1 && isChangeConnect) { + const fromNode = node.graph._nodes.find((otherNode) => otherNode.id == link_info.origin_id); + + if (fromNode && fromNode.outputs && fromNode.outputs[link_info.origin_slot]) { + const type = fromNode.outputs[link_info.origin_slot].type; + + if (this.title === "Set"){ + this.title = (!disablePrefix ? "Set_" : "") + type; + } + if (this.widgets[0].value === '*'){ + this.widgets[0].value = type + } + + this.validateName(node.graph); + this.inputs[0].type = type; + this.inputs[0].name = type; + + if (app.ui.settings.getSettingValue("KJNodes.nodeAutoColor")){ + setColorAndBgColor.call(this, type); + } + } else { + showAlert("node input undefined.") + } + } + if (link_info && node.graph && slotType == 2 && isChangeConnect) { + const fromNode = node.graph._nodes.find((otherNode) => otherNode.id == link_info.origin_id); + + if (fromNode && fromNode.inputs && fromNode.inputs[link_info.origin_slot]) { + const type = fromNode.inputs[link_info.origin_slot].type; + + this.outputs[0].type = type; + this.outputs[0].name = type; + } else { + showAlert('node output undefined'); + } + } + + + //Update either way + this.update(); + } + + this.validateName = function(graph) { + let widgetValue = node.widgets[0].value; + + if (widgetValue !== '') { + let tries = 0; + const existingValues = new Set(); + + graph._nodes.forEach(otherNode => { + if (otherNode !== this && otherNode.type === 'SetNode') { + existingValues.add(otherNode.widgets[0].value); + } + }); + + while (existingValues.has(widgetValue)) { + widgetValue = node.widgets[0].value + "_" + tries; + tries++; + } + + node.widgets[0].value = widgetValue; + this.update(); + } + } + + this.clone = function () { + const cloned = SetNode.prototype.clone.apply(this); + cloned.inputs[0].name = '*'; + cloned.inputs[0].type = '*'; + cloned.value = ''; + cloned.properties.previousName = ''; + cloned.size = cloned.computeSize(); + return cloned; + }; + + this.onAdded = function(graph) { + this.validateName(graph); + } + + + this.update = function() { + if (!node.graph) { + return; + } + + const getters = this.findGetters(node.graph); + getters.forEach(getter => { + getter.setType(this.inputs[0].type); + }); + + if (this.widgets[0].value) { + const gettersWithPreviousName = this.findGetters(node.graph, true); + gettersWithPreviousName.forEach(getter => { + getter.setName(this.widgets[0].value); + }); + } + + const allGetters = node.graph._nodes.filter(otherNode => otherNode.type === "GetNode"); + allGetters.forEach(otherNode => { + if (otherNode.setComboValues) { + otherNode.setComboValues(); + } + }); + } + + + this.findGetters = function(graph, checkForPreviousName) { + const name = checkForPreviousName ? this.properties.previousName : this.widgets[0].value; + return graph._nodes.filter(otherNode => otherNode.type === 'GetNode' && otherNode.widgets[0].value === name && name !== ''); + } + + + // This node is purely frontend and does not impact the resulting prompt so should not be serialized + this.isVirtualNode = true; + } + + + onRemoved() { + const allGetters = this.graph._nodes.filter((otherNode) => otherNode.type == "GetNode"); + allGetters.forEach((otherNode) => { + if (otherNode.setComboValues) { + otherNode.setComboValues([this]); + } + }) + } + getExtraMenuOptions(_, options) { + this.menuEntry = this.drawConnection ? "Hide connections" : "Show connections"; + options.unshift( + { + content: this.menuEntry, + callback: () => { + this.currentGetters = this.findGetters(this.graph); + if (this.currentGetters.length == 0) return; + let linkType = (this.currentGetters[0].outputs[0].type); + this.slotColor = this.canvas.default_connection_color_byType[linkType] + this.menuEntry = this.drawConnection ? "Hide connections" : "Show connections"; + this.drawConnection = !this.drawConnection; + this.canvas.setDirty(true, true); + + }, + has_submenu: true, + submenu: { + title: "Color", + options: [ + { + content: "Highlight", + callback: () => { + this.slotColor = "orange" + this.canvas.setDirty(true, true); + } + } + ], + }, + }, + { + content: "Hide all connections", + callback: () => { + const allGetters = this.graph._nodes.filter(otherNode => otherNode.type === "GetNode" || otherNode.type === "SetNode"); + allGetters.forEach(otherNode => { + otherNode.drawConnection = false; + console.log(otherNode); + }); + + this.menuEntry = "Show connections"; + this.drawConnection = false + this.canvas.setDirty(true, true); + + }, + + }, + ); + // Dynamically add a submenu for all getters + this.currentGetters = this.findGetters(this.graph); + if (this.currentGetters) { + + let gettersSubmenu = this.currentGetters.map(getter => ({ + + content: `${getter.title} id: ${getter.id}`, + callback: () => { + this.canvas.centerOnNode(getter); + this.canvas.selectNode(getter, false); + this.canvas.setDirty(true, true); + + }, + })); + + options.unshift({ + content: "Getters", + has_submenu: true, + submenu: { + title: "GetNodes", + options: gettersSubmenu, + } + }); + } + } + + + onDrawForeground(ctx, lGraphCanvas) { + if (this.drawConnection) { + this._drawVirtualLinks(lGraphCanvas, ctx); + } + } + // onDrawCollapsed(ctx, lGraphCanvas) { + // if (this.drawConnection) { + // this._drawVirtualLinks(lGraphCanvas, ctx); + // } + // } + _drawVirtualLinks(lGraphCanvas, ctx) { + if (!this.currentGetters?.length) return; + var title = this.getTitle ? this.getTitle() : this.title; + var title_width = ctx.measureText(title).width; + if (!this.flags.collapsed) { + var start_node_slotpos = [ + this.size[0], + LiteGraph.NODE_TITLE_HEIGHT * 0.5, + ]; + } + else { + + var start_node_slotpos = [ + title_width + 55, + -15, + + ]; + } + // Provide a default link object with necessary properties, to avoid errors as link can't be null anymore + const defaultLink = { type: 'default', color: this.slotColor }; + + for (const getter of this.currentGetters) { + if (!this.flags.collapsed) { + var end_node_slotpos = this.getConnectionPos(false, 0); + end_node_slotpos = [ + getter.pos[0] - end_node_slotpos[0] + this.size[0], + getter.pos[1] - end_node_slotpos[1] + ]; + } + else { + var end_node_slotpos = this.getConnectionPos(false, 0); + end_node_slotpos = [ + getter.pos[0] - end_node_slotpos[0] + title_width + 50, + getter.pos[1] - end_node_slotpos[1] - 30 + ]; + } + lGraphCanvas.renderLink( + ctx, + start_node_slotpos, + end_node_slotpos, + defaultLink, + false, + null, + this.slotColor, + LiteGraph.RIGHT, + LiteGraph.LEFT + ); + } + } + } + + LiteGraph.registerNodeType( + "SetNode", + Object.assign(SetNode, { + title: "Set", + }) + ); + + SetNode.category = "KJNodes"; + }, +}); + +app.registerExtension({ + name: "GetNode", + registerCustomNodes() { + class GetNode extends LGraphNode { + + defaultVisibility = true; + serialize_widgets = true; + drawConnection = false; + slotColor = "#FFF"; + currentSetter = null; + canvas = app.canvas; + + constructor(title) { + super(title) + if (!this.properties) { + this.properties = {}; + } + this.properties.showOutputText = GetNode.defaultVisibility; + const node = this; + this.addWidget( + "combo", + "Constant", + "", + (e) => { + this.onRename(); + }, + { + values: () => { + const setterNodes = node.graph._nodes.filter((otherNode) => otherNode.type == 'SetNode'); + return setterNodes.map((otherNode) => otherNode.widgets[0].value).sort(); + } + } + ) + + this.addOutput("*", '*'); + this.onConnectionsChange = function( + slotType, //0 = output, 1 = input + slot, //self-explanatory + isChangeConnect, + link_info, + output + ) { + this.validateLinks(); + } + + this.setName = function(name) { + node.widgets[0].value = name; + node.onRename(); + node.serialize(); + } + + this.onRename = function() { + const setter = this.findSetter(node.graph); + if (setter) { + let linkType = (setter.inputs[0].type); + + this.setType(linkType); + this.title = (!disablePrefix ? "Get_" : "") + setter.widgets[0].value; + + if (app.ui.settings.getSettingValue("KJNodes.nodeAutoColor")){ + setColorAndBgColor.call(this, linkType); + } + + } else { + this.setType('*'); + } + } + + this.clone = function () { + const cloned = GetNode.prototype.clone.apply(this); + cloned.size = cloned.computeSize(); + return cloned; + }; + + this.validateLinks = function() { + if (this.outputs[0].type !== '*' && this.outputs[0].links) { + this.outputs[0].links.filter(linkId => { + const link = node.graph.links[linkId]; + return link && (!link.type.split(",").includes(this.outputs[0].type) && link.type !== '*'); + }).forEach(linkId => { + node.graph.removeLink(linkId); + }); + } + }; + + this.setType = function(type) { + this.outputs[0].name = type; + this.outputs[0].type = type; + this.validateLinks(); + } + + this.findSetter = function(graph) { + const name = this.widgets[0].value; + const foundNode = graph._nodes.find(otherNode => otherNode.type === 'SetNode' && otherNode.widgets[0].value === name && name !== ''); + return foundNode; + }; + + this.goToSetter = function() { + const setter = this.findSetter(this.graph); + this.canvas.centerOnNode(setter); + this.canvas.selectNode(setter, false); + }; + + // This node is purely frontend and does not impact the resulting prompt so should not be serialized + this.isVirtualNode = true; + } + + getInputLink(slot) { + const setter = this.findSetter(this.graph); + + if (setter) { + const slotInfo = setter.inputs[slot]; + const link = this.graph.links[slotInfo.link]; + return link; + } else { + const errorMessage = "No SetNode found for " + this.widgets[0].value + "(" + this.type + ")"; + showAlert(errorMessage); + //throw new Error(errorMessage); + } + } + onAdded(graph) { + } + getExtraMenuOptions(_, options) { + let menuEntry = this.drawConnection ? "Hide connections" : "Show connections"; + + options.unshift( + { + content: "Go to setter", + callback: () => { + this.goToSetter(); + }, + }, + { + content: menuEntry, + callback: () => { + this.currentSetter = this.findSetter(this.graph); + if (this.currentSetter.length == 0) return; + let linkType = (this.currentSetter.inputs[0].type); + this.drawConnection = !this.drawConnection; + this.slotColor = this.canvas.default_connection_color_byType[linkType] + menuEntry = this.drawConnection ? "Hide connections" : "Show connections"; + this.canvas.setDirty(true, true); + }, + }, + ); + } + + onDrawForeground(ctx, lGraphCanvas) { + if (this.drawConnection) { + this._drawVirtualLink(lGraphCanvas, ctx); + } + } + // onDrawCollapsed(ctx, lGraphCanvas) { + // if (this.drawConnection) { + // this._drawVirtualLink(lGraphCanvas, ctx); + // } + // } + _drawVirtualLink(lGraphCanvas, ctx) { + if (!this.currentSetter) return; + + // Provide a default link object with necessary properties, to avoid errors as link can't be null anymore + const defaultLink = { type: 'default', color: this.slotColor }; + + let start_node_slotpos = this.currentSetter.getConnectionPos(false, 0); + start_node_slotpos = [ + start_node_slotpos[0] - this.pos[0], + start_node_slotpos[1] - this.pos[1], + ]; + let end_node_slotpos = [0, -LiteGraph.NODE_TITLE_HEIGHT * 0.5]; + lGraphCanvas.renderLink( + ctx, + start_node_slotpos, + end_node_slotpos, + defaultLink, + false, + null, + this.slotColor + ); + } + } + + LiteGraph.registerNodeType( + "GetNode", + Object.assign(GetNode, { + title: "Get", + }) + ); + + GetNode.category = "KJNodes"; + }, +}); diff --git a/custom_nodes/comfyui-kjnodes/web/js/spline_editor.js b/custom_nodes/comfyui-kjnodes/web/js/spline_editor.js new file mode 100644 index 0000000000000000000000000000000000000000..cc095990a7ab48b4ea930563e0755982ddc39cf5 --- /dev/null +++ b/custom_nodes/comfyui-kjnodes/web/js/spline_editor.js @@ -0,0 +1,1379 @@ +import { app } from '../../../scripts/app.js' + +//from melmass +export function makeUUID() { + let dt = new Date().getTime() + const uuid = 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, (c) => { + const r = ((dt + Math.random() * 16) % 16) | 0 + dt = Math.floor(dt / 16) + return (c === 'x' ? r : (r & 0x3) | 0x8).toString(16) + }) + return uuid +} + +export const loadScript = ( + FILE_URL, + async = true, + type = 'text/javascript', + ) => { + return new Promise((resolve, reject) => { + try { + // Check if the script already exists + const existingScript = document.querySelector(`script[src="${FILE_URL}"]`) + if (existingScript) { + resolve({ status: true, message: 'Script already loaded' }) + return + } + + const scriptEle = document.createElement('script') + scriptEle.type = type + scriptEle.async = async + scriptEle.src = FILE_URL + + scriptEle.addEventListener('load', (ev) => { + resolve({ status: true }) + }) + + scriptEle.addEventListener('error', (ev) => { + reject({ + status: false, + message: `Failed to load the script ${FILE_URL}`, + }) + }) + + document.body.appendChild(scriptEle) + } catch (error) { + reject(error) + } + }) + } + const create_documentation_stylesheet = () => { + const tag = 'kj-splineditor-stylesheet' + + let styleTag = document.head.querySelector(tag) + + if (!styleTag) { + styleTag = document.createElement('style') + styleTag.type = 'text/css' + styleTag.id = tag + styleTag.innerHTML = ` + .spline-editor { + + position: absolute; + + font: 12px monospace; + line-height: 1.5em; + padding: 10px; + z-index: 0; + overflow: hidden; + } + ` + document.head.appendChild(styleTag) + } + } + +loadScript('kjweb_async/svg-path-properties.min.js').catch((e) => { + console.log(e) +}) +loadScript('kjweb_async/protovis.min.js').catch((e) => { + console.log(e) +}) +create_documentation_stylesheet() + +function chainCallback(object, property, callback) { + if (object == undefined) { + //This should not happen. + console.error("Tried to add callback to non-existant object") + return; + } + if (property in object) { + const callback_orig = object[property] + object[property] = function () { + const r = callback_orig.apply(this, arguments); + callback.apply(this, arguments); + return r + }; + } else { + object[property] = callback; + } +} +app.registerExtension({ + name: 'KJNodes.SplineEditor', + + async beforeRegisterNodeDef(nodeType, nodeData) { + if (nodeData?.name === 'SplineEditor') { + chainCallback(nodeType.prototype, "onNodeCreated", function () { + + this.widgets.find(w => w.name === "coordinates").hidden = true + + var element = document.createElement("div"); + this.uuid = makeUUID() + element.id = `spline-editor-${this.uuid}` + + this.previewMediaType = 'image' + + this.splineEditor = this.addDOMWidget(nodeData.name, "SplineEditorWidget", element, { + serialize: false, + hideOnZoom: false, + }); + + // context menu + this.contextMenu = document.createElement("div"); + this.contextMenu.className = 'spline-editor-context-menu'; + this.contextMenu.id = "context-menu"; + this.contextMenu.style.display = "none"; + this.contextMenu.style.position = "absolute"; + this.contextMenu.style.backgroundColor = "#202020"; + this.contextMenu.style.minWidth = "100px"; + this.contextMenu.style.boxShadow = "0px 8px 16px 0px rgba(0,0,0,0.2)"; + this.contextMenu.style.zIndex = "100"; + this.contextMenu.style.padding = "5px"; + + function styleMenuItem(menuItem) { + menuItem.style.display = "block"; + menuItem.style.padding = "5px"; + menuItem.style.color = "#FFF"; + menuItem.style.fontFamily = "Arial, sans-serif"; + menuItem.style.fontSize = "16px"; + menuItem.style.textDecoration = "none"; + menuItem.style.marginBottom = "5px"; + } + function createMenuItem(id, textContent) { + let menuItem = document.createElement("a"); + menuItem.href = "#"; + menuItem.id = `menu-item-${id}`; + menuItem.textContent = textContent; + styleMenuItem(menuItem); + return menuItem; + } + + // Create an array of menu items using the createMenuItem function + this.menuItems = [ + createMenuItem(0, "Toggle handles"), + createMenuItem(1, "Display sample points"), + createMenuItem(2, "Switch point shape"), + createMenuItem(3, "Background image"), + createMenuItem(4, "Invert point order"), + createMenuItem(5, "Clear Image"), + createMenuItem(6, "Add new spline"), + createMenuItem(7, "Add new single point"), + createMenuItem(8, "Delete current spline"), + createMenuItem(9, "Next spline"), + ]; + + // Add mouseover and mouseout event listeners to each menu item for styling + this.menuItems.forEach(menuItem => { + menuItem.addEventListener('mouseover', function() { + this.style.backgroundColor = "gray"; + }); + + menuItem.addEventListener('mouseout', function() { + this.style.backgroundColor = "#202020"; + }); + }); + + // Append each menu item to the context menu + this.menuItems.forEach(menuItem => { + this.contextMenu.appendChild(menuItem); + }); + + document.body.appendChild(this.contextMenu); + + this.addWidget("button", "New canvas", null, () => { + if (!this.properties || !("points" in this.properties)) { + this.editor = new SplineEditor(this); + this.addProperty("points", this.constructor.type, "string"); + } + else { + this.editor = new SplineEditor(this, true); + } + }); + + this.setSize([550, 1000]); + this.resizable = false; + this.splineEditor.parentEl = document.createElement("div"); + this.splineEditor.parentEl.className = "spline-editor"; + this.splineEditor.parentEl.id = `spline-editor-${this.uuid}` + element.appendChild(this.splineEditor.parentEl); + + chainCallback(this, "onConfigure", function () { + try { + this.editor = new SplineEditor(this); + } catch (error) { + console.error("An error occurred while configuring the editor:", error); + } + }); + chainCallback(this, "onExecuted", function (message) { + let bg_image = message["bg_image"]; + this.properties.imgData = { + name: "bg_image", + base64: bg_image + }; + this.editor.refreshBackgroundImage(this); + }); + + }); // onAfterGraphConfigured + }//node created + } //before register +})//register + + +class SplineEditor{ + constructor(context, reset = false) { + this.node = context; + this.reset=reset; + const self = this; + console.log("creatingSplineEditor") + + this.node.pasteFile = (file) => { + if (file.type.startsWith("image/")) { + this.handleImageFile(file); + return true; + } + return false; + }; + + this.node.onDragOver = function (e) { + if (e.dataTransfer && e.dataTransfer.items) { + return [...e.dataTransfer.items].some(f => f.kind === "file" && f.type.startsWith("image/")); + } + return false; + }; + + // On drop upload files + this.node.onDragDrop = (e) => { + console.log("onDragDrop called"); + let handled = false; + for (const file of e.dataTransfer.files) { + if (file.type.startsWith("image/")) { + this.handleImageFile(file); + handled = true; + } + } + return handled; + }; + + // context menu + this.createContextMenu(); + + + this.dotShape = "circle"; + this.drawSamplePoints = false; + + if (reset && context.splineEditor.element) { + context.splineEditor.element.innerHTML = ''; // Clear the container + } + this.coordWidget = context.widgets.find(w => w.name === "coordinates"); + this.interpolationWidget = context.widgets.find(w => w.name === "interpolation"); + this.pointsWidget = context.widgets.find(w => w.name === "points_to_sample"); + this.pointsStoreWidget = context.widgets.find(w => w.name === "points_store"); + this.tensionWidget = context.widgets.find(w => w.name === "tension"); + this.minValueWidget = context.widgets.find(w => w.name === "min_value"); + this.maxValueWidget = context.widgets.find(w => w.name === "max_value"); + this.samplingMethodWidget = context.widgets.find(w => w.name === "sampling_method"); + this.widthWidget = context.widgets.find(w => w.name === "mask_width"); + this.heightWidget = context.widgets.find(w => w.name === "mask_height"); + + this.interpolation = this.interpolationWidget.value + this.tension = this.tensionWidget.value + this.points_to_sample = this.pointsWidget.value + this.rangeMin = this.minValueWidget.value + this.rangeMax = this.maxValueWidget.value + this.pointsLayer = null; + this.samplingMethod = this.samplingMethodWidget.value + + if (this.samplingMethod == "path"||this.samplingMethod == "speed") { + this.dotShape = "triangle" + } + + + this.interpolationWidget.callback = () => { + this.interpolation = this.interpolationWidget.value + this.updatePath(); + } + this.samplingMethodWidget.callback = () => { + this.samplingMethod = this.samplingMethodWidget.value + if (this.samplingMethod == "path") { + this.dotShape = "triangle" + } + else if (this.samplingMethod == "controlpoints") { + this.dotShape = "circle" + this.drawSamplePoints = true; + } + this.updatePath(); + } + this.tensionWidget.callback = () => { + this.tension = this.tensionWidget.value + this.updatePath(); + } + this.pointsWidget.callback = () => { + this.points_to_sample = this.pointsWidget.value + this.updatePath(); + } + this.minValueWidget.callback = () => { + this.rangeMin = this.minValueWidget.value + this.updatePath(); + } + this.maxValueWidget.callback = () => { + this.rangeMax = this.maxValueWidget.value + this.updatePath(); + } + this.widthWidget.callback = () => { + this.width = this.widthWidget.value; + if (this.width > 256) { + context.setSize([this.width + 45, context.size[1]]); + } + this.vis.width(this.width); + this.updatePath(); +} +this.heightWidget.callback = () => { + this.height = this.heightWidget.value + this.vis.height(this.height) + context.setSize([context.size[0], this.height + 450]); + this.updatePath(); + } + this.pointsStoreWidget.callback = () => { + points = JSON.parse(this.pointsStoreWidget.value); + this.updatePath(); + } + + // Initialize or reset points array + this.drawHandles = false; + this.drawRuler = true; + var hoverIndex = -1; + var isDragging = false; + this.width = this.widthWidget.value; + this.height = this.heightWidget.value; + var i = 3; + this.splines = []; + this.activeSplineIndex = 0; // Track which spline is being edited + // init mouse position +this.lastMousePosition = { x: this.width/2, y: this.height/2 }; + + if (!reset && this.pointsStoreWidget.value != "") { + try { + const parsedData = JSON.parse(this.pointsStoreWidget.value); + // Check if it's already in the new format (array of splines) + if (Array.isArray(parsedData) && parsedData.length > 0 && parsedData[0].hasOwnProperty('points')) { + this.splines = parsedData; + } else { + // Convert old format (single array of points) to new format + this.splines = [{ + points: parsedData, + color: "#1f77b4", + name: "Spline 1" + }]; + } + } catch (e) { + console.error("Error parsing spline data:", e); + this.initializeDefaultSplines(); + } +} else { + this.initializeDefaultSplines(); + this.pointsStoreWidget.value = JSON.stringify(this.splines); +} + + this.vis = new pv.Panel() + .width(this.width) + .height(this.height) + .fillStyle("#222") + .strokeStyle("gray") + .lineWidth(2) + .antialias(false) + .margin(10) + .event("mousedown", function () { + if (pv.event.shiftKey) { // Use pv.event to access the event object + let scaledMouse = { + x: this.mouse().x / app.canvas.ds.scale, + y: this.mouse().y / app.canvas.ds.scale + }; + i = self.splines[self.activeSplineIndex].points.push(scaledMouse) - 1; + self.updatePath(); + return this; + } + else if (pv.event.ctrlKey) { + // Capture the clicked location + let clickedPoint = { + x: this.mouse().x / app.canvas.ds.scale, + y: this.mouse().y / app.canvas.ds.scale + }; + + // Find the two closest points to the clicked location + const activePoints = self.splines[self.activeSplineIndex].points; + let { point1Index, point2Index } = self.findClosestPoints(self.splines[self.activeSplineIndex].points, clickedPoint); + + // Calculate the midpoint between the two closest points + let midpoint = { + x: (activePoints[point1Index].x + activePoints[point2Index].x) / 2, + y: (activePoints[point1Index].y + activePoints[point2Index].y) / 2 + }; + + // Insert the midpoint into the array + activePoints.splice(point2Index, 0, midpoint); + i = point2Index; + self.updatePath(); + } + else if (pv.event.button === 2) { + // Store the current mouse position adjusted for scale + self.lastMousePosition = { + x: this.mouse().x / app.canvas.ds.scale, + y: this.mouse().y / app.canvas.ds.scale + }; + + self.node.contextMenu.style.display = 'block'; + self.node.contextMenu.style.left = `${pv.event.clientX}px`; + self.node.contextMenu.style.top = `${pv.event.clientY}px`; + } + }) + this.backgroundImage = this.vis.add(pv.Image).visible(false) + + this.vis.add(pv.Rule) + .data(pv.range(0, this.height, 64)) + .bottom(d => d) + .strokeStyle("gray") + .lineWidth(3) + .visible(() => self.drawRuler) + + this.hoverSplineIndex = -1; + + this.splines.forEach((spline, splineIndex) => { + const strokeObj = this.vis.add(pv.Line) + .data(() => spline.points) + .left(d => d.x) + .top(d => d.y) + .interpolate(() => this.interpolation) + .tension(() => this.tension) + .segmented(() => false) + .strokeStyle("black") // Stroke color + .lineWidth(() => { + // Make stroke slightly wider than the main line + if (splineIndex === this.activeSplineIndex) return 5; + if (splineIndex === this.hoverSplineIndex) return 4; + return 3.5; + }); + + this.vis.add(pv.Line) + .data(() => spline.points) + .left(d => d.x) + .top(d => d.y) + .interpolate(() => this.interpolation) + .tension(() => this.tension) + .segmented(() => false) + .strokeStyle(spline.color) + .lineWidth(() => { + // Change line width based on active or hover state + if (splineIndex === this.activeSplineIndex) return 3; + if (splineIndex === this.hoverSplineIndex) return 2; + return 1.5; + }) + .event("mouseover", () => { + this.hoverSplineIndex = splineIndex; + this.vis.render(); + }) + .event("mouseout", () => { + this.hoverSplineIndex = -1; + this.vis.render(); + }) + .event("mousedown", () => { + if (this.activeSplineIndex !== splineIndex) { + this.activeSplineIndex = splineIndex; + this.refreshSplineElements(); + } + }); + }); + + this.vis.add(pv.Dot) + .data(() => { + const activeSpline = this.splines[this.activeSplineIndex]; + // If this is a single point, don't show it in the main visualization + if (activeSpline.isSinglePoint || (activeSpline.points && activeSpline.points.length === 1)) { + return []; // Return empty array to hide in main visualization + } + return activeSpline.points; + }) + .left(d => d.x) + .top(d => d.y) + .radius(12) + .shape(function() { + return self.dotShape; + }) + .angle(function() { + const index = this.index; + let angle = 0; + + if (self.dotShape === "triangle") { + const activePoints = self.splines[self.activeSplineIndex].points; + let dxNext = 0, dyNext = 0; + if (index < activePoints.length - 1) { + dxNext = activePoints[index + 1].x - activePoints[index].x; + dyNext = activePoints[index + 1].y - activePoints[index].y; + } + + let dxPrev = 0, dyPrev = 0; + if (index > 0) { + dxPrev = activePoints[index].x - activePoints[index - 1].x; + dyPrev = activePoints[index].y - activePoints[index - 1].y; + } + + const dx = (dxNext + dxPrev) / 2; + const dy = (dyNext + dyPrev) / 2; + + angle = Math.atan2(dy, dx); + angle -= Math.PI / 2; + angle = (angle + 2 * Math.PI) % (2 * Math.PI); + } + + return angle; + }) + .cursor("move") + .strokeStyle(function () { return i == this.index ? "#ff7f0e" : "#1f77b4"; }) + .fillStyle(function () { return "rgba(100, 100, 100, 0.3)"; }) + .event("mousedown", pv.Behavior.drag()) + .event("dragstart", function () { + i = this.index; + hoverIndex = this.index; + isDragging = true; + const activePoints = self.splines[self.activeSplineIndex].points; + if (pv.event.button === 2 && i !== 0 && i !== activePoints.length - 1) { + activePoints.splice(i--, 1); + self.vis.render(); + } + return this; + }) + .event("dragend", function() { + if (this.pathElements !== null) { + self.updatePath(); + } + isDragging = false; + }) + .event("drag", function () { + let adjustedX = this.mouse().x / app.canvas.ds.scale; // Adjust the new X position by the inverse of the scale factor + let adjustedY = this.mouse().y / app.canvas.ds.scale; // Adjust the new Y position by the inverse of the scale factor + // Determine the bounds of the vis.Panel + const panelWidth = self.vis.width(); + const panelHeight = self.vis.height(); + + // Adjust the new position if it would place the dot outside the bounds of the vis.Panel + adjustedX = Math.max(0, Math.min(panelWidth, adjustedX)); + adjustedY = Math.max(0, Math.min(panelHeight, adjustedY)); + self.splines[self.activeSplineIndex].points[this.index] = { x: adjustedX, y: adjustedY }; // Update the point's position + self.vis.render(); // Re-render the visualization to reflect the new position + }) + .event("mouseover", function() { + hoverIndex = this.index; // Set the hover index to the index of the hovered dot + self.vis.render(); // Re-render the visualization + }) + .event("mouseout", function() { + !isDragging && (hoverIndex = -1); // Reset the hover index when the mouse leaves the dot + self.vis.render(); // Re-render the visualization + }) + .anchor("center") + .add(pv.Label) + .visible(function() { + return hoverIndex === this.index; // Only show the label for the hovered dot + }) + .left(d => d.x < this.width / 2 ? d.x + 80 : d.x - 70) // Shift label to right if on left half, otherwise shift to left + .top(d => d.y < this.height / 2 ? d.y + 20 : d.y - 20) // Shift label down if on top half, otherwise shift up + .font(12 + "px sans-serif") + .text(d => { + if (this.samplingMethod == "path") { + return `X: ${Math.round(d.x)}, Y: ${Math.round(d.y)}`; + } else { + let frame = Math.round((d.x / self.width) * self.points_to_sample); + let normalizedY = (1.0 - (d.y / self.height) - 0.0) * (self.rangeMax - self.rangeMin) + self.rangeMin; + let normalizedX = (d.x / self.width); + return `F: ${frame}, X: ${normalizedX.toFixed(2)}, Y: ${normalizedY.toFixed(2)}`; + } + }) + .textStyle("orange") + + // single points + this.vis.add(pv.Dot) + .data(() => { + // Collect all single points from all splines + const singlePoints = []; + this.splines.forEach((spline, splineIndex) => { + if (spline.isSinglePoint || (spline.points && spline.points.length === 1)) { + singlePoints.push({ + x: spline.points[0].x, + y: spline.points[0].y, + splineIndex: splineIndex, + color: spline.color + }); + } + }); + return singlePoints; + }) + .left(d => d.x) + .top(d => d.y) + .radius(6) + .shape("square") + .strokeStyle(d => d.splineIndex === this.activeSplineIndex ? "#ff7f0e" : d.color) + .fillStyle(d => "rgba(100, 100, 100, 0.9)") + .lineWidth(d => d.splineIndex === this.activeSplineIndex ? 3 : 1.5) + .cursor("move") + .event("mousedown", pv.Behavior.drag()) + .event("dragstart", function(d) { + self.activeSplineIndex = d.splineIndex; + self.refreshSplineElements(); + return this; + }) + .event("drag", function(d) { + let adjustedX = this.mouse().x / app.canvas.ds.scale; + let adjustedY = this.mouse().y / app.canvas.ds.scale; + + // Determine the bounds of the vis.Panel + const panelWidth = self.vis.width(); + const panelHeight = self.vis.height(); + + // Adjust the new position if it would place the dot outside the bounds + adjustedX = Math.max(0, Math.min(panelWidth, adjustedX)); + adjustedY = Math.max(0, Math.min(panelHeight, adjustedY)); + + // Update the point position + const spline = self.splines[d.splineIndex]; + spline.points[0] = { x: adjustedX, y: adjustedY }; + + // For single points, we need to refresh the entire spline element + // to prevent the line-drawing effect + + }) + .event("dragend", function(d) { + self.refreshSplineElements(); + self.updatePath(); + }) + .visible(d => true); // Make always visible + + if (this.splines.length != 0) { + this.vis.render(); + } + var svgElement = this.vis.canvas(); + svgElement.style['zIndex'] = "2" + svgElement.style['position'] = "relative" + this.node.splineEditor.element.appendChild(svgElement); + this.pathElements = svgElement.getElementsByTagName('path'); // Get all path elements + + if (this.width > 256) { + this.node.setSize([this.width + 45, this.node.size[1]]); + } + this.node.setSize([this.node.size[0], this.height + 450]); + this.updatePath(); + this.refreshBackgroundImage(); + } + + updatePath = () => { + if (!this.splines || this.splines.length === 0) { + console.log("no splines"); + return; + } + // Get active spline points + console.log("this.activeSplineIndex", this.activeSplineIndex); + const activeSpline = this.splines[this.activeSplineIndex]; + const activePoints = activeSpline.points; + + if (!activePoints || activePoints.length === 0) { + console.log("no points in active spline"); + return; + } + + + let coords; + if (this.samplingMethod != "controlpoints") { + coords = this.samplePoints(this.pathElements[this.activeSplineIndex], this.points_to_sample, this.samplingMethod, this.width, this.activeSplineIndex); + } else { + coords = activePoints; + } + + let allSplineCoords = []; + for (let i = 0; i < this.splines.length; i++) { + // Use the same sampling method for all splines + let splineCoords; + const pathElement = this.pathElements[i]; + + if (this.samplingMethod != "controlpoints" && pathElement) { + splineCoords = this.samplePoints(pathElement, this.points_to_sample, this.samplingMethod, this.width, i); + } else { + // Fall back to control points if no path element or sampling method is "controlpoints" + splineCoords = this.splines[i].points; + } + + allSplineCoords.push(splineCoords); + } + + if (this.drawSamplePoints) { + if (this.pointsLayer) { + // Update the data of the existing points layer + this.pointsLayer.data(coords); + } else { + // Create the points layer if it doesn't exist + this.pointsLayer = this.vis.add(pv.Dot) + .data(coords) + .left(function(d) { return d.x; }) + .top(function(d) { return d.y; }) + .radius(5) // Adjust the radius as needed + .fillStyle("red") // Change the color as needed + .strokeStyle("black") // Change the stroke color as needed + .lineWidth(1); // Adjust the line width as needed + } + } else { + if (this.pointsLayer) { + // Remove the points layer + this.pointsLayer.data([]); + this.vis.render(); + } + } + this.pointsStoreWidget.value = JSON.stringify(this.splines); + if (this.coordWidget) { + this.coordWidget.value = JSON.stringify(allSplineCoords); + } + this.vis.render(); + }; + + handleImageLoad = (img, file, base64String) => { + //console.log(img.width, img.height); // Access width and height here + this.widthWidget.value = img.width; + this.heightWidget.value = img.height; + this.drawRuler = false; + + if (img.width != this.vis.width() || img.height != this.vis.height()) { + if (img.width > 256) { + this.node.setSize([img.width + 45, this.node.size[1]]); + } + this.node.setSize([this.node.size[0], img.height + 520]); + this.vis.width(img.width); + this.vis.height(img.height); + this.height = img.height; + this.width = img.width; + + this.updatePath(); + } + this.backgroundImage.url(file ? URL.createObjectURL(file) : `data:${this.node.properties.imgData.type};base64,${base64String}`).visible(true).root.render(); + }; + + processImage = (img, file) => { + const canvas = document.createElement('canvas'); + const ctx = canvas.getContext('2d'); + + const maxWidth = 800; // maximum width + const maxHeight = 600; // maximum height + let width = img.width; + let height = img.height; + + // Calculate the new dimensions while preserving the aspect ratio + if (width > height) { + if (width > maxWidth) { + height *= maxWidth / width; + width = maxWidth; + } + } else { + if (height > maxHeight) { + width *= maxHeight / height; + height = maxHeight; + } + } + + canvas.width = width; + canvas.height = height; + ctx.drawImage(img, 0, 0, width, height); + + // Get the compressed image data as a Base64 string + const base64String = canvas.toDataURL('image/jpeg', 0.5).replace('data:', '').replace(/^.+,/, ''); // 0.5 is the quality from 0 to 1 + + this.node.properties.imgData = { + name: file.name, + lastModified: file.lastModified, + size: file.size, + type: file.type, + base64: base64String + }; + handleImageLoad(img, file, base64String); + }; + + handleImageFile = (file) => { + const reader = new FileReader(); + reader.onloadend = () => { + const img = new Image(); + img.src = reader.result; + img.onload = () => processImage(img, file); + }; + reader.readAsDataURL(file); + + const imageUrl = URL.createObjectURL(file); + const img = new Image(); + img.src = imageUrl; + img.onload = () => this.handleImageLoad(img, file, null); + }; + + refreshBackgroundImage = () => { + if (this.node.properties.imgData && this.node.properties.imgData.base64) { + const base64String = this.node.properties.imgData.base64; + const imageUrl = `data:${this.node.properties.imgData.type};base64,${base64String}`; + const img = new Image(); + img.src = imageUrl; + img.onload = () => this.handleImageLoad(img, null, base64String); + } + }; + + refreshSplineElements = () => { + // Clear existing line elements and recreate them + const svgElement = this.vis.canvas(); + + // Remove all existing line elements + const oldLines = svgElement.querySelectorAll('path'); + oldLines.forEach(line => line.remove()); + + this.pathElements = []; + this.lineObjects = []; + + const originalChildren = [...this.vis.children]; + + // Find line objects to remove (those that represent splines) + const linesToRemove = originalChildren.filter(child => + child instanceof pv.Line + ); + linesToRemove.forEach(line => line.visible(false)); + + // Re-add all spline lines and store references to them + this.splines.forEach((spline, splineIndex) => { + // For single points, we need a special handling + if (spline.isSinglePoint || (spline.points && spline.points.length === 1)) { + const point = spline.points[0]; + // For single points, create a tiny line at the same point + // This ensures we have a path element for the point + const lineObj = this.vis.add(pv.Line) + .data([point, {x: point.x + 0.001, y: point.y + 0.001}]) + .left(d => d.x) + .top(d => d.y) + .strokeStyle(spline.color) + .lineWidth(() => { + if (splineIndex === this.activeSplineIndex) return 3; + if (splineIndex === this.hoverSplineIndex) return 2; + return 1.5; + }) + .event("mouseover", () => { + this.hoverSplineIndex = splineIndex; + this.vis.render(); + }) + .event("mouseout", () => { + this.hoverSplineIndex = -1; + this.vis.render(); + }) + .event("mousedown", () => { + if (this.activeSplineIndex !== splineIndex) { + this.activeSplineIndex = splineIndex; + this.refreshSplineElements(); + } + }); + this.lineObjects.push(lineObj); + } else { + // For normal multi-point splines + const strokeObj = this.vis.add(pv.Line) + .data(() => spline.points) + .left(d => d.x) + .top(d => d.y) + .interpolate(() => this.interpolation) + .tension(() => this.tension) + .segmented(() => false) + .strokeStyle("black") // Stroke color + .lineWidth(() => { + // Make stroke slightly wider than the main line + if (splineIndex === this.activeSplineIndex) return 5; + if (splineIndex === this.hoverSplineIndex) return 4; + return 3.5; + }); + const lineObj = this.vis.add(pv.Line) + .data(() => spline.points) + .left(d => d.x) + .top(d => d.y) + .interpolate(() => this.interpolation) + .tension(() => this.tension) + .segmented(() => false) + .strokeStyle(spline.color) + .lineWidth(() => { + if (splineIndex === this.activeSplineIndex) return 3; + if (splineIndex === this.hoverSplineIndex) return 2; + return 1.5; + }) + .event("mouseover", () => { + this.hoverSplineIndex = splineIndex; + this.vis.render(); + }) + .event("mouseout", () => { + this.hoverSplineIndex = -1; + this.vis.render(); + }) + .event("mousedown", () => { + if (this.activeSplineIndex !== splineIndex) { + this.activeSplineIndex = splineIndex; + this.refreshSplineElements(); + } + }); + + // // Add invisible wider hit area for easier selection + // this.vis.add(pv.Line) + // .data(() => spline.points) + // .left(d => d.x) + // .top(d => d.y) + // .interpolate(() => this.interpolation) + // .tension(() => this.tension) + // .segmented(() => false) + // .strokeStyle("rgba(0,0,0,0.01)") // Nearly invisible + // .lineWidth(15) // Much wider hit area + // .event("mouseover", () => { + // this.hoverSplineIndex = splineIndex; + // this.vis.render(); + // }) + // .event("mouseout", () => { + // this.hoverSplineIndex = -1; + // this.vis.render(); + // }) + // .event("mousedown", () => { + // if (pv.event.shiftKey) { + // if (this.activeSplineIndex !== splineIndex) { + // this.activeSplineIndex = splineIndex; + // this.refreshSplineElements(); + // } + // }} + // ); + + this.lineObjects.push(lineObj); + } + }); + + this.vis.render(); + + requestAnimationFrame(() => { + const allPaths = Array.from(svgElement.querySelectorAll('path')); + this.pathElements = []; + + // First try: look at paths with specific childIndex values + this.lineObjects.forEach((lineObj, i) => { + // Find paths that correspond to our line objects + const childIndex = lineObj.childIndex; + const matchingPath = allPaths.find(path => + path.$scene && path.$scene.scenes && + path.$scene.scenes.childIndex === childIndex + ); + + if (matchingPath) { + //console.log("matchingPath:", matchingPath); + this.pathElements[i] = matchingPath; + } + }); + + // Check if we found all paths + if (this.pathElements.filter(p => p).length !== this.splines.length) { + // Fallback to color matching + this.pathElements = []; + for (let i = 0; i < this.splines.length; i++) { + const color = this.splines[i].color; + const matchingPath = allPaths.find(p => + p.getAttribute('style')?.includes(color) && + !this.pathElements.includes(p) + ); + + if (matchingPath) { + this.pathElements[i] = matchingPath; + } + } + } + + // If we still don't have the right number of paths, use the first N paths + if (this.pathElements.filter(p => p).length !== this.splines.length) { + this.pathElements = allPaths.slice(0, this.splines.length); + } + + this.updatePath(); + }); + }; + + + initializeDefaultSplines() { + this.splines = [{ + points: pv.range(1, 4).map((i, index) => { + if (index === 0) { + return { x: 0, y: this.height }; + } else if (index === 2) { + return { x: this.width, y: 0 }; + } else { + return { + x: i * this.width / 5, + y: 50 + Math.random() * (this.height - 100) + }; + } + }), + color: this.getSplineColor(0), + name: "Spline 1" + }]; + } + + getSplineColor(index) { + const colors = [ + "#1f77b4", "#ff7f0e", "#2ca02c", "#d62728", + "#9467bd", "#8c564b", "#e377c2", "#7f7f7f", + "#bcbd22", "#17becf" + ]; + return colors[index % colors.length]; + } + + createContextMenu = () => { + const self = this; + const oldMenu = this.node.contextMenu; + const newMenu = oldMenu.cloneNode(true); + oldMenu.parentNode.replaceChild(newMenu, oldMenu); + this.node.contextMenu = newMenu; + + document.addEventListener('contextmenu', function (e) { + e.preventDefault(); + }); + + document.addEventListener('click', function (e) { + document.querySelectorAll('.spline-editor-context-menu').forEach(menu => { + menu.style.display = 'none'; + }); + }); + + this.node.contextMenu.addEventListener('click', function(e) { + e.preventDefault(); + if (e.target.tagName === 'A') { + const id = parseInt(e.target.id.split('-')[2]); + + switch(id) { + case 0: + e.preventDefault(); + if (!self.drawHandles) { + self.drawHandles = true + self.vis.add(pv.Line) + .data(() => self.splines[self.activeSplineIndex].points.map((point, index) => ({ + start: point, + end: [index] + }))) + .left(d => d.start.x) + .top(d => d.start.y) + .interpolate("linear") + .tension(0) // Straight lines + .strokeStyle("#ff7f0e") // Same color as control points + .lineWidth(1) + .visible(() => self.drawHandles); + self.vis.render(); + } else { + self.drawHandles = false + self.vis.render(); + } + self.node.contextMenu.style.display = 'none'; + break; + case 1: + + self.drawSamplePoints = !self.drawSamplePoints; + self.updatePath(); + break; + case 2: + if (self.dotShape == "circle"){ + self.dotShape = "triangle" + } + else { + self.dotShape = "circle" + } + self.updatePath(); + break; + case 3: + // Create file input element + const fileInput = document.createElement('input'); + fileInput.type = 'file'; + fileInput.accept = 'image/*'; // Accept only image files + + // Listen for file selection + fileInput.addEventListener('change', function (event) { + const file = event.target.files[0]; // Get the selected file + + if (file) { + const imageUrl = URL.createObjectURL(file); + let img = new Image(); + img.src = imageUrl; + img.onload = () => self.handleImageLoad(img, file, null); + } + }); + + fileInput.click(); + + self.node.contextMenu.style.display = 'none'; + break; + case 4: + self.splines[self.activeSplineIndex].points.reverse(); + self.updatePath(); + break; + case 5: + self.backgroundImage.visible(false).root.render(); + self.node.properties.imgData = null; + self.node.contextMenu.style.display = 'none'; + break; + case 6: // Add new spline + const newSplineIndex = self.splines.length; + self.splines.push({ + points: [ + // Create default points for the new spline + { x: 0, y: self.height }, + { x: self.width/2, y: self.height/2 }, + { x: self.width, y: 0 } + ], + color: self.getSplineColor(newSplineIndex), + name: `Spline ${newSplineIndex + 1}` + }); + self.activeSplineIndex = newSplineIndex; + self.refreshSplineElements(); + self.node.contextMenu.style.display = 'none'; + break; + case 7: // Add new single point + const newSingleSplineIndex = self.splines.length; + self.splines.push({ + points: [ + { x: self.lastMousePosition.x, y: self.lastMousePosition.y }, + ], + color: self.getSplineColor(newSingleSplineIndex), + name: `Spline ${newSingleSplineIndex + 1}`, + isSinglePoint: true + }); + self.activeSplineIndex = newSingleSplineIndex; + self.refreshSplineElements(); + self.node.contextMenu.style.display = 'none'; + break; + case 8: // Delete current spline + if (self.splines.length > 1) { + self.splines.splice(self.activeSplineIndex, 1); + self.activeSplineIndex = Math.min(self.activeSplineIndex, self.splines.length - 1); + self.refreshSplineElements(); + } + self.node.contextMenu.style.display = 'none'; + break; + case 9: // Next spline + self.activeSplineIndex = (self.activeSplineIndex + 1) % self.splines.length; + self.refreshSplineElements(); + self.node.contextMenu.style.display = 'none'; + break; + } + } + }); + } + + samplePoints(svgPathElement, numSamples, samplingMethod, width, splineIndex) { + const spline = this.splines[splineIndex]; + + // Check if this is a single point spline + if (spline && (spline.isSinglePoint || (spline.points && spline.points.length === 1))) { + // For a single point, return an array with the same coordinates repeated + const point = spline.points[0]; + return Array(numSamples).fill().map(() => ({ x: point.x, y: point.y })); + } + + if (!svgPathElement) { + console.warn(`Path element not found for spline index: ${splineIndex}. Available paths: ${this.pathElements.length}`); + + + const splinePoints = this.splines[splineIndex].points; + + // If we have no points, return an empty array + if (!splinePoints || splinePoints.length === 0) { + return []; + } + + // Create a simple interpolation between control points + const result = []; + for (let i = 0; i < numSamples; i++) { + const t = i / (numSamples - 1); + const idx = Math.min( + Math.floor(t * (splinePoints.length - 1)), + splinePoints.length - 2 + ); + const fraction = (t * (splinePoints.length - 1)) - idx; + + const x = splinePoints[idx].x + fraction * (splinePoints[idx + 1].x - splinePoints[idx].x); + const y = splinePoints[idx].y + fraction * (splinePoints[idx + 1].y - splinePoints[idx].y); + + result.push({ x, y }); + } + return result; + } + + var svgWidth = width; // Fixed width of the SVG element + var pathLength = svgPathElement.getTotalLength(); + var points = []; + + if (samplingMethod === "speed") { + // Calculate control point distances along the path + const controlPoints = this.splines[splineIndex].points; + const pathPositions = []; + + // Find approximate path positions for each control point + for (const cp of controlPoints) { + let bestDist = Infinity; + let bestPos = 0; + + // Sample the path to find closest point to each control point + for (let pos = 0; pos <= pathLength; pos += pathLength / 100) { + const pt = svgPathElement.getPointAtLength(pos); + const dist = Math.sqrt(Math.pow(pt.x - cp.x, 2) + Math.pow(pt.y - cp.y, 2)); + + if (dist < bestDist) { + bestDist = dist; + bestPos = pos; + } + } + pathPositions.push(bestPos); + } + + // Sort positions along path + pathPositions.sort((a, b) => a - b); + + // Create a smooth speed mapping function with synchronization + const createSynchronizedMapping = () => { + // Calculate segment lengths and densities + const segments = []; + let totalLength = pathPositions[pathPositions.length - 1] - pathPositions[0]; + + for (let i = 0; i < pathPositions.length - 1; i++) { + const segLength = pathPositions[i+1] - pathPositions[i]; + // Inverse relationship - shorter segments = higher density = slower speed + const density = 1 / Math.max(segLength, 0.0001); + segments.push({ + position: pathPositions[i], + length: segLength, + density: density + }); + } + + // Create mapping function with forced synchronization at endpoints + return t => { + // Force synchronization at t=0 and t=1 + if (t === 0) return 0; + if (t === 1) return pathLength; + + // For intermediate points, use the speed control + // Scale t to fit between first and last control points + const firstPos = pathPositions[0]; + const lastPos = pathPositions[pathPositions.length - 1]; + + // Create a density-weighted position mapping + let totalWeight = 0; + let weights = []; + + for (let i = 0; i < segments.length; i++) { + totalWeight += segments[i].density; + weights.push(segments[i].density); + } + + // Normalize weights + const normalizedWeights = weights.map(w => w / totalWeight); + + // Calculate cumulative weights + let cumulativeWeight = 0; + const cumulativeWeights = normalizedWeights.map(w => { + cumulativeWeight += w; + return cumulativeWeight; + }); + + // Find the segment for this t value + let segmentIndex = 0; + for (let i = 0; i < cumulativeWeights.length; i++) { + if (t <= cumulativeWeights[i]) { + segmentIndex = i; + break; + } + } + + // Calculate position within segment + const segmentStart = segmentIndex > 0 ? cumulativeWeights[segmentIndex - 1] : 0; + const segmentEnd = cumulativeWeights[segmentIndex]; + const segmentT = (t - segmentStart) / (segmentEnd - segmentStart); + + // Map to path position + const pathStart = pathPositions[segmentIndex]; + const pathEnd = pathPositions[segmentIndex + 1]; + const pos = pathStart + segmentT * (pathEnd - pathStart); + + // Scale to fill entire path + return pos; + }; + }; + + const mapToPath = createSynchronizedMapping(); + + // Sample using the synchronized mapping function + for (let i = 0; i < numSamples; i++) { + const t = i / (numSamples - 1); + const pathPos = mapToPath(t); + const point = svgPathElement.getPointAtLength(pathPos); + points.push({ x: point.x, y: point.y }); + } + + return points; + + } + else{ + for (var i = 0; i < numSamples; i++) { + if (samplingMethod === "time") { + // Calculate the x-coordinate for the current sample based on the SVG's width + var x = (svgWidth / (numSamples - 1)) * i; + // Find the point on the path that intersects the vertical line at the calculated x-coordinate + var point = this.findPointAtX(svgPathElement, x, pathLength); + } + else if (samplingMethod === "path") { + // Calculate the distance along the path for the current sample + var distance = (pathLength / (numSamples - 1)) * i; + // Get the point at the current distance + var point = svgPathElement.getPointAtLength(distance); + } + + // Add the point to the array of points + points.push({ x: point.x, y: point.y }); + } + return points; + } + } + + findClosestPoints(points, clickedPoint) { + // Calculate distances from clickedPoint to each point in the array + let distances = points.map(point => { + let dx = clickedPoint.x - point.x; + let dy = clickedPoint.y - point.y; + return { index: points.indexOf(point), distance: Math.sqrt(dx * dx + dy * dy) }; + }); + // Sort distances and get the indices of the two closest points + let sortedDistances = distances.sort((a, b) => a.distance - b.distance); + let closestPoint1Index = sortedDistances[0].index; + let closestPoint2Index = sortedDistances[1].index; + // Ensure point1Index is always the smaller index + if (closestPoint1Index > closestPoint2Index) { + [closestPoint1Index, closestPoint2Index] = [closestPoint2Index, closestPoint1Index]; + } + return { point1Index: closestPoint1Index, point2Index: closestPoint2Index }; + } + + findPointAtX(svgPathElement, targetX, pathLength) { + let low = 0; + let high = pathLength; + let bestPoint = svgPathElement.getPointAtLength(0); + + while (low <= high) { + let mid = low + (high - low) / 2; + let point = svgPathElement.getPointAtLength(mid); + + if (Math.abs(point.x - targetX) < 1) { + return point; // The point is close enough to the target + } + + if (point.x < targetX) { + low = mid + 1; + } else { + high = mid - 1; + } + + // Keep track of the closest point found so far + if (Math.abs(point.x - targetX) < Math.abs(bestPoint.x - targetX)) { + bestPoint = point; + } + } + + // Return the closest point found + return bestPoint; + } +} \ No newline at end of file diff --git a/custom_nodes/comfyui-kjnodes/web/red.png b/custom_nodes/comfyui-kjnodes/web/red.png new file mode 100644 index 0000000000000000000000000000000000000000..4352c118b2c5fa6f33edc4d99a5e4d22649ff827 Binary files /dev/null and b/custom_nodes/comfyui-kjnodes/web/red.png differ diff --git a/custom_nodes/example_node.py.example b/custom_nodes/example_node.py.example new file mode 100644 index 0000000000000000000000000000000000000000..29ab2aa72319354b147b7dd79e1c3179e54d3d06 --- /dev/null +++ b/custom_nodes/example_node.py.example @@ -0,0 +1,155 @@ +class Example: + """ + A example node + + Class methods + ------------- + INPUT_TYPES (dict): + Tell the main program input parameters of nodes. + IS_CHANGED: + optional method to control when the node is re executed. + + Attributes + ---------- + RETURN_TYPES (`tuple`): + The type of each element in the output tuple. + RETURN_NAMES (`tuple`): + Optional: The name of each output in the output tuple. + FUNCTION (`str`): + The name of the entry-point method. For example, if `FUNCTION = "execute"` then it will run Example().execute() + OUTPUT_NODE ([`bool`]): + If this node is an output node that outputs a result/image from the graph. The SaveImage node is an example. + The backend iterates on these output nodes and tries to execute all their parents if their parent graph is properly connected. + Assumed to be False if not present. + CATEGORY (`str`): + The category the node should appear in the UI. + DEPRECATED (`bool`): + Indicates whether the node is deprecated. Deprecated nodes are hidden by default in the UI, but remain + functional in existing workflows that use them. + EXPERIMENTAL (`bool`): + Indicates whether the node is experimental. Experimental nodes are marked as such in the UI and may be subject to + significant changes or removal in future versions. Use with caution in production workflows. + execute(s) -> tuple || None: + The entry point method. The name of this method must be the same as the value of property `FUNCTION`. + For example, if `FUNCTION = "execute"` then this method's name must be `execute`, if `FUNCTION = "foo"` then it must be `foo`. + """ + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + """ + Return a dictionary which contains config for all input fields. + Some types (string): "MODEL", "VAE", "CLIP", "CONDITIONING", "LATENT", "IMAGE", "INT", "STRING", "FLOAT". + Input types "INT", "STRING" or "FLOAT" are special values for fields on the node. + The type can be a list for selection. + + Returns: `dict`: + - Key input_fields_group (`string`): Can be either required, hidden or optional. A node class must have property `required` + - Value input_fields (`dict`): Contains input fields config: + * Key field_name (`string`): Name of a entry-point method's argument + * Value field_config (`tuple`): + + First value is a string indicate the type of field or a list for selection. + + Second value is a config for type "INT", "STRING" or "FLOAT". + """ + return { + "required": { + "image": ("IMAGE",), + "int_field": ("INT", { + "default": 0, + "min": 0, #Minimum value + "max": 4096, #Maximum value + "step": 64, #Slider's step + "display": "number", # Cosmetic only: display as "number" or "slider" + "lazy": True # Will only be evaluated if check_lazy_status requires it + }), + "float_field": ("FLOAT", { + "default": 1.0, + "min": 0.0, + "max": 10.0, + "step": 0.01, + "round": 0.001, #The value representing the precision to round to, will be set to the step value by default. Can be set to False to disable rounding. + "display": "number", + "lazy": True + }), + "print_to_screen": (["enable", "disable"],), + "string_field": ("STRING", { + "multiline": False, #True if you want the field to look like the one on the ClipTextEncode node + "default": "Hello World!", + "lazy": True + }), + }, + } + + RETURN_TYPES = ("IMAGE",) + #RETURN_NAMES = ("image_output_name",) + + FUNCTION = "test" + + #OUTPUT_NODE = False + + CATEGORY = "Example" + + def check_lazy_status(self, image, string_field, int_field, float_field, print_to_screen): + """ + Return a list of input names that need to be evaluated. + + This function will be called if there are any lazy inputs which have not yet been + evaluated. As long as you return at least one field which has not yet been evaluated + (and more exist), this function will be called again once the value of the requested + field is available. + + Any evaluated inputs will be passed as arguments to this function. Any unevaluated + inputs will have the value None. + """ + if print_to_screen == "enable": + return ["int_field", "float_field", "string_field"] + else: + return [] + + def test(self, image, string_field, int_field, float_field, print_to_screen): + if print_to_screen == "enable": + print(f"""Your input contains: + string_field aka input text: {string_field} + int_field: {int_field} + float_field: {float_field} + """) + #do some processing on the image, in this example I just invert it + image = 1.0 - image + return (image,) + + """ + The node will always be re executed if any of the inputs change but + this method can be used to force the node to execute again even when the inputs don't change. + You can make this node return a number or a string. This value will be compared to the one returned the last time the node was + executed, if it is different the node will be executed again. + This method is used in the core repo for the LoadImage node where they return the image hash as a string, if the image hash + changes between executions the LoadImage node is executed again. + """ + #@classmethod + #def IS_CHANGED(s, image, string_field, int_field, float_field, print_to_screen): + # return "" + +# Set the web directory, any .js file in that directory will be loaded by the frontend as a frontend extension +# WEB_DIRECTORY = "./somejs" + + +# Add custom API routes, using router +from aiohttp import web +from server import PromptServer + +@PromptServer.instance.routes.get("/hello") +async def get_hello(request): + return web.json_response("hello") + + +# A dictionary that contains all nodes you want to export with their names +# NOTE: names should be globally unique +NODE_CLASS_MAPPINGS = { + "Example": Example +} + +# A dictionary that contains the friendly/humanly readable titles for the nodes +NODE_DISPLAY_NAME_MAPPINGS = { + "Example": "Example Node" +} diff --git a/custom_nodes/websocket_image_save.py b/custom_nodes/websocket_image_save.py new file mode 100644 index 0000000000000000000000000000000000000000..15f87f9f56175f33df18c6142f9e13c4503b1186 --- /dev/null +++ b/custom_nodes/websocket_image_save.py @@ -0,0 +1,44 @@ +from PIL import Image +import numpy as np +import comfy.utils +import time + +#You can use this node to save full size images through the websocket, the +#images will be sent in exactly the same format as the image previews: as +#binary images on the websocket with a 8 byte header indicating the type +#of binary message (first 4 bytes) and the image format (next 4 bytes). + +#Note that no metadata will be put in the images saved with this node. + +class SaveImageWebsocket: + @classmethod + def INPUT_TYPES(s): + return {"required": + {"images": ("IMAGE", ),} + } + + RETURN_TYPES = () + FUNCTION = "save_images" + + OUTPUT_NODE = True + + CATEGORY = "api/image" + + def save_images(self, images): + pbar = comfy.utils.ProgressBar(images.shape[0]) + step = 0 + for image in images: + i = 255. * image.cpu().numpy() + img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8)) + pbar.update_absolute(step, images.shape[0], ("PNG", img, None)) + step += 1 + + return {} + + @classmethod + def IS_CHANGED(s, images): + return time.time() + +NODE_CLASS_MAPPINGS = { + "SaveImageWebsocket": SaveImageWebsocket, +}