Spaces:
				
			
			
	
			
			
		Running
		
			on 
			
			Zero
	
	
	
			
			
	
	
	
	
		
		
		Running
		
			on 
			
			Zero
	Upload 1425 files
Browse filesThis view is limited to 50 files because it contains too many changes.  
							See raw diff
- .gitattributes +25 -0
- custom_nodes/comfyui_controlnet_aux/LICENSE.txt +201 -0
- custom_nodes/comfyui_controlnet_aux/NotoSans-Regular.ttf +3 -0
- custom_nodes/comfyui_controlnet_aux/README.md +252 -0
- custom_nodes/comfyui_controlnet_aux/UPDATES.md +44 -0
- custom_nodes/comfyui_controlnet_aux/__init__.py +214 -0
- custom_nodes/comfyui_controlnet_aux/config.example.yaml +20 -0
- custom_nodes/comfyui_controlnet_aux/dev_interface.py +6 -0
- custom_nodes/comfyui_controlnet_aux/examples/CNAuxBanner.jpg +3 -0
- custom_nodes/comfyui_controlnet_aux/examples/ExecuteAll.png +3 -0
- custom_nodes/comfyui_controlnet_aux/examples/ExecuteAll1.jpg +3 -0
- custom_nodes/comfyui_controlnet_aux/examples/ExecuteAll2.jpg +3 -0
- custom_nodes/comfyui_controlnet_aux/examples/comfyui-controlnet-aux-logo.png +3 -0
- custom_nodes/comfyui_controlnet_aux/examples/example_animal_pose.png +3 -0
- custom_nodes/comfyui_controlnet_aux/examples/example_anime_face_segmentor.png +3 -0
- custom_nodes/comfyui_controlnet_aux/examples/example_anyline.png +3 -0
- custom_nodes/comfyui_controlnet_aux/examples/example_densepose.png +3 -0
- custom_nodes/comfyui_controlnet_aux/examples/example_depth_anything.png +3 -0
- custom_nodes/comfyui_controlnet_aux/examples/example_depth_anything_v2.png +3 -0
- custom_nodes/comfyui_controlnet_aux/examples/example_dsine.png +3 -0
- custom_nodes/comfyui_controlnet_aux/examples/example_marigold.png +3 -0
- custom_nodes/comfyui_controlnet_aux/examples/example_marigold_flat.jpg +3 -0
- custom_nodes/comfyui_controlnet_aux/examples/example_mesh_graphormer.png +3 -0
- custom_nodes/comfyui_controlnet_aux/examples/example_metric3d.png +3 -0
- custom_nodes/comfyui_controlnet_aux/examples/example_onnx.png +0 -0
- custom_nodes/comfyui_controlnet_aux/examples/example_recolor.png +3 -0
- custom_nodes/comfyui_controlnet_aux/examples/example_save_kps.png +3 -0
- custom_nodes/comfyui_controlnet_aux/examples/example_teed.png +3 -0
- custom_nodes/comfyui_controlnet_aux/examples/example_torchscript.png +3 -0
- custom_nodes/comfyui_controlnet_aux/examples/example_unimatch.png +3 -0
- custom_nodes/comfyui_controlnet_aux/hint_image_enchance.py +233 -0
- custom_nodes/comfyui_controlnet_aux/install.bat +20 -0
- custom_nodes/comfyui_controlnet_aux/log.py +80 -0
- custom_nodes/comfyui_controlnet_aux/lvminthin.py +87 -0
- custom_nodes/comfyui_controlnet_aux/node_wrappers/anime_face_segment.py +43 -0
- custom_nodes/comfyui_controlnet_aux/node_wrappers/anyline.py +87 -0
- custom_nodes/comfyui_controlnet_aux/node_wrappers/binary.py +29 -0
- custom_nodes/comfyui_controlnet_aux/node_wrappers/canny.py +30 -0
- custom_nodes/comfyui_controlnet_aux/node_wrappers/color.py +26 -0
- custom_nodes/comfyui_controlnet_aux/node_wrappers/densepose.py +31 -0
- custom_nodes/comfyui_controlnet_aux/node_wrappers/depth_anything.py +55 -0
- custom_nodes/comfyui_controlnet_aux/node_wrappers/depth_anything_v2.py +56 -0
- custom_nodes/comfyui_controlnet_aux/node_wrappers/diffusion_edge.py +41 -0
- custom_nodes/comfyui_controlnet_aux/node_wrappers/dsine.py +31 -0
- custom_nodes/comfyui_controlnet_aux/node_wrappers/dwpose.py +162 -0
- custom_nodes/comfyui_controlnet_aux/node_wrappers/hed.py +53 -0
- custom_nodes/comfyui_controlnet_aux/node_wrappers/inpaint.py +32 -0
- custom_nodes/comfyui_controlnet_aux/node_wrappers/leres.py +32 -0
- custom_nodes/comfyui_controlnet_aux/node_wrappers/lineart.py +30 -0
- custom_nodes/comfyui_controlnet_aux/node_wrappers/lineart_anime.py +27 -0
    	
        .gitattributes
    CHANGED
    
    | @@ -42,3 +42,28 @@ custom_nodes/ComfyQR/example_generations/unscannable_00001_fixed_.png filter=lfs | |
| 42 | 
             
            custom_nodes/ComfyQR/img/for_deletion/badgers_levels_adjusted.png filter=lfs diff=lfs merge=lfs -text
         | 
| 43 | 
             
            custom_nodes/ComfyQR/img/for_deletion/badgers.png filter=lfs diff=lfs merge=lfs -text
         | 
| 44 | 
             
            custom_nodes/ComfyQR/img/node-mask-qr-errors.png filter=lfs diff=lfs merge=lfs -text
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 42 | 
             
            custom_nodes/ComfyQR/img/for_deletion/badgers_levels_adjusted.png filter=lfs diff=lfs merge=lfs -text
         | 
| 43 | 
             
            custom_nodes/ComfyQR/img/for_deletion/badgers.png filter=lfs diff=lfs merge=lfs -text
         | 
| 44 | 
             
            custom_nodes/ComfyQR/img/node-mask-qr-errors.png filter=lfs diff=lfs merge=lfs -text
         | 
| 45 | 
            +
            custom_nodes/comfyui_controlnet_aux/examples/CNAuxBanner.jpg filter=lfs diff=lfs merge=lfs -text
         | 
| 46 | 
            +
            custom_nodes/comfyui_controlnet_aux/examples/comfyui-controlnet-aux-logo.png filter=lfs diff=lfs merge=lfs -text
         | 
| 47 | 
            +
            custom_nodes/comfyui_controlnet_aux/examples/example_animal_pose.png filter=lfs diff=lfs merge=lfs -text
         | 
| 48 | 
            +
            custom_nodes/comfyui_controlnet_aux/examples/example_anime_face_segmentor.png filter=lfs diff=lfs merge=lfs -text
         | 
| 49 | 
            +
            custom_nodes/comfyui_controlnet_aux/examples/example_anyline.png filter=lfs diff=lfs merge=lfs -text
         | 
| 50 | 
            +
            custom_nodes/comfyui_controlnet_aux/examples/example_densepose.png filter=lfs diff=lfs merge=lfs -text
         | 
| 51 | 
            +
            custom_nodes/comfyui_controlnet_aux/examples/example_depth_anything_v2.png filter=lfs diff=lfs merge=lfs -text
         | 
| 52 | 
            +
            custom_nodes/comfyui_controlnet_aux/examples/example_depth_anything.png filter=lfs diff=lfs merge=lfs -text
         | 
| 53 | 
            +
            custom_nodes/comfyui_controlnet_aux/examples/example_dsine.png filter=lfs diff=lfs merge=lfs -text
         | 
| 54 | 
            +
            custom_nodes/comfyui_controlnet_aux/examples/example_marigold_flat.jpg filter=lfs diff=lfs merge=lfs -text
         | 
| 55 | 
            +
            custom_nodes/comfyui_controlnet_aux/examples/example_marigold.png filter=lfs diff=lfs merge=lfs -text
         | 
| 56 | 
            +
            custom_nodes/comfyui_controlnet_aux/examples/example_mesh_graphormer.png filter=lfs diff=lfs merge=lfs -text
         | 
| 57 | 
            +
            custom_nodes/comfyui_controlnet_aux/examples/example_metric3d.png filter=lfs diff=lfs merge=lfs -text
         | 
| 58 | 
            +
            custom_nodes/comfyui_controlnet_aux/examples/example_recolor.png filter=lfs diff=lfs merge=lfs -text
         | 
| 59 | 
            +
            custom_nodes/comfyui_controlnet_aux/examples/example_save_kps.png filter=lfs diff=lfs merge=lfs -text
         | 
| 60 | 
            +
            custom_nodes/comfyui_controlnet_aux/examples/example_teed.png filter=lfs diff=lfs merge=lfs -text
         | 
| 61 | 
            +
            custom_nodes/comfyui_controlnet_aux/examples/example_torchscript.png filter=lfs diff=lfs merge=lfs -text
         | 
| 62 | 
            +
            custom_nodes/comfyui_controlnet_aux/examples/example_unimatch.png filter=lfs diff=lfs merge=lfs -text
         | 
| 63 | 
            +
            custom_nodes/comfyui_controlnet_aux/examples/ExecuteAll.png filter=lfs diff=lfs merge=lfs -text
         | 
| 64 | 
            +
            custom_nodes/comfyui_controlnet_aux/examples/ExecuteAll1.jpg filter=lfs diff=lfs merge=lfs -text
         | 
| 65 | 
            +
            custom_nodes/comfyui_controlnet_aux/examples/ExecuteAll2.jpg filter=lfs diff=lfs merge=lfs -text
         | 
| 66 | 
            +
            custom_nodes/comfyui_controlnet_aux/NotoSans-Regular.ttf filter=lfs diff=lfs merge=lfs -text
         | 
| 67 | 
            +
            custom_nodes/comfyui_controlnet_aux/src/custom_controlnet_aux/mesh_graphormer/hand_landmarker.task filter=lfs diff=lfs merge=lfs -text
         | 
| 68 | 
            +
            custom_nodes/comfyui_controlnet_aux/src/custom_controlnet_aux/tests/test_image.png filter=lfs diff=lfs merge=lfs -text
         | 
| 69 | 
            +
            custom_nodes/comfyui_controlnet_aux/tests/pose.png filter=lfs diff=lfs merge=lfs -text
         | 
    	
        custom_nodes/comfyui_controlnet_aux/LICENSE.txt
    ADDED
    
    | @@ -0,0 +1,201 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
                                             Apache License
         | 
| 2 | 
            +
                                       Version 2.0, January 2004
         | 
| 3 | 
            +
                                    http://www.apache.org/licenses/
         | 
| 4 | 
            +
             | 
| 5 | 
            +
               TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
         | 
| 6 | 
            +
             | 
| 7 | 
            +
               1. Definitions.
         | 
| 8 | 
            +
             | 
| 9 | 
            +
                  "License" shall mean the terms and conditions for use, reproduction,
         | 
| 10 | 
            +
                  and distribution as defined by Sections 1 through 9 of this document.
         | 
| 11 | 
            +
             | 
| 12 | 
            +
                  "Licensor" shall mean the copyright owner or entity authorized by
         | 
| 13 | 
            +
                  the copyright owner that is granting the License.
         | 
| 14 | 
            +
             | 
| 15 | 
            +
                  "Legal Entity" shall mean the union of the acting entity and all
         | 
| 16 | 
            +
                  other entities that control, are controlled by, or are under common
         | 
| 17 | 
            +
                  control with that entity. For the purposes of this definition,
         | 
| 18 | 
            +
                  "control" means (i) the power, direct or indirect, to cause the
         | 
| 19 | 
            +
                  direction or management of such entity, whether by contract or
         | 
| 20 | 
            +
                  otherwise, or (ii) ownership of fifty percent (50%) or more of the
         | 
| 21 | 
            +
                  outstanding shares, or (iii) beneficial ownership of such entity.
         | 
| 22 | 
            +
             | 
| 23 | 
            +
                  "You" (or "Your") shall mean an individual or Legal Entity
         | 
| 24 | 
            +
                  exercising permissions granted by this License.
         | 
| 25 | 
            +
             | 
| 26 | 
            +
                  "Source" form shall mean the preferred form for making modifications,
         | 
| 27 | 
            +
                  including but not limited to software source code, documentation
         | 
| 28 | 
            +
                  source, and configuration files.
         | 
| 29 | 
            +
             | 
| 30 | 
            +
                  "Object" form shall mean any form resulting from mechanical
         | 
| 31 | 
            +
                  transformation or translation of a Source form, including but
         | 
| 32 | 
            +
                  not limited to compiled object code, generated documentation,
         | 
| 33 | 
            +
                  and conversions to other media types.
         | 
| 34 | 
            +
             | 
| 35 | 
            +
                  "Work" shall mean the work of authorship, whether in Source or
         | 
| 36 | 
            +
                  Object form, made available under the License, as indicated by a
         | 
| 37 | 
            +
                  copyright notice that is included in or attached to the work
         | 
| 38 | 
            +
                  (an example is provided in the Appendix below).
         | 
| 39 | 
            +
             | 
| 40 | 
            +
                  "Derivative Works" shall mean any work, whether in Source or Object
         | 
| 41 | 
            +
                  form, that is based on (or derived from) the Work and for which the
         | 
| 42 | 
            +
                  editorial revisions, annotations, elaborations, or other modifications
         | 
| 43 | 
            +
                  represent, as a whole, an original work of authorship. For the purposes
         | 
| 44 | 
            +
                  of this License, Derivative Works shall not include works that remain
         | 
| 45 | 
            +
                  separable from, or merely link (or bind by name) to the interfaces of,
         | 
| 46 | 
            +
                  the Work and Derivative Works thereof.
         | 
| 47 | 
            +
             | 
| 48 | 
            +
                  "Contribution" shall mean any work of authorship, including
         | 
| 49 | 
            +
                  the original version of the Work and any modifications or additions
         | 
| 50 | 
            +
                  to that Work or Derivative Works thereof, that is intentionally
         | 
| 51 | 
            +
                  submitted to Licensor for inclusion in the Work by the copyright owner
         | 
| 52 | 
            +
                  or by an individual or Legal Entity authorized to submit on behalf of
         | 
| 53 | 
            +
                  the copyright owner. For the purposes of this definition, "submitted"
         | 
| 54 | 
            +
                  means any form of electronic, verbal, or written communication sent
         | 
| 55 | 
            +
                  to the Licensor or its representatives, including but not limited to
         | 
| 56 | 
            +
                  communication on electronic mailing lists, source code control systems,
         | 
| 57 | 
            +
                  and issue tracking systems that are managed by, or on behalf of, the
         | 
| 58 | 
            +
                  Licensor for the purpose of discussing and improving the Work, but
         | 
| 59 | 
            +
                  excluding communication that is conspicuously marked or otherwise
         | 
| 60 | 
            +
                  designated in writing by the copyright owner as "Not a Contribution."
         | 
| 61 | 
            +
             | 
| 62 | 
            +
                  "Contributor" shall mean Licensor and any individual or Legal Entity
         | 
| 63 | 
            +
                  on behalf of whom a Contribution has been received by Licensor and
         | 
| 64 | 
            +
                  subsequently incorporated within the Work.
         | 
| 65 | 
            +
             | 
| 66 | 
            +
               2. Grant of Copyright License. Subject to the terms and conditions of
         | 
| 67 | 
            +
                  this License, each Contributor hereby grants to You a perpetual,
         | 
| 68 | 
            +
                  worldwide, non-exclusive, no-charge, royalty-free, irrevocable
         | 
| 69 | 
            +
                  copyright license to reproduce, prepare Derivative Works of,
         | 
| 70 | 
            +
                  publicly display, publicly perform, sublicense, and distribute the
         | 
| 71 | 
            +
                  Work and such Derivative Works in Source or Object form.
         | 
| 72 | 
            +
             | 
| 73 | 
            +
               3. Grant of Patent License. Subject to the terms and conditions of
         | 
| 74 | 
            +
                  this License, each Contributor hereby grants to You a perpetual,
         | 
| 75 | 
            +
                  worldwide, non-exclusive, no-charge, royalty-free, irrevocable
         | 
| 76 | 
            +
                  (except as stated in this section) patent license to make, have made,
         | 
| 77 | 
            +
                  use, offer to sell, sell, import, and otherwise transfer the Work,
         | 
| 78 | 
            +
                  where such license applies only to those patent claims licensable
         | 
| 79 | 
            +
                  by such Contributor that are necessarily infringed by their
         | 
| 80 | 
            +
                  Contribution(s) alone or by combination of their Contribution(s)
         | 
| 81 | 
            +
                  with the Work to which such Contribution(s) was submitted. If You
         | 
| 82 | 
            +
                  institute patent litigation against any entity (including a
         | 
| 83 | 
            +
                  cross-claim or counterclaim in a lawsuit) alleging that the Work
         | 
| 84 | 
            +
                  or a Contribution incorporated within the Work constitutes direct
         | 
| 85 | 
            +
                  or contributory patent infringement, then any patent licenses
         | 
| 86 | 
            +
                  granted to You under this License for that Work shall terminate
         | 
| 87 | 
            +
                  as of the date such litigation is filed.
         | 
| 88 | 
            +
             | 
| 89 | 
            +
               4. Redistribution. You may reproduce and distribute copies of the
         | 
| 90 | 
            +
                  Work or Derivative Works thereof in any medium, with or without
         | 
| 91 | 
            +
                  modifications, and in Source or Object form, provided that You
         | 
| 92 | 
            +
                  meet the following conditions:
         | 
| 93 | 
            +
             | 
| 94 | 
            +
                  (a) You must give any other recipients of the Work or
         | 
| 95 | 
            +
                      Derivative Works a copy of this License; and
         | 
| 96 | 
            +
             | 
| 97 | 
            +
                  (b) You must cause any modified files to carry prominent notices
         | 
| 98 | 
            +
                      stating that You changed the files; and
         | 
| 99 | 
            +
             | 
| 100 | 
            +
                  (c) You must retain, in the Source form of any Derivative Works
         | 
| 101 | 
            +
                      that You distribute, all copyright, patent, trademark, and
         | 
| 102 | 
            +
                      attribution notices from the Source form of the Work,
         | 
| 103 | 
            +
                      excluding those notices that do not pertain to any part of
         | 
| 104 | 
            +
                      the Derivative Works; and
         | 
| 105 | 
            +
             | 
| 106 | 
            +
                  (d) If the Work includes a "NOTICE" text file as part of its
         | 
| 107 | 
            +
                      distribution, then any Derivative Works that You distribute must
         | 
| 108 | 
            +
                      include a readable copy of the attribution notices contained
         | 
| 109 | 
            +
                      within such NOTICE file, excluding those notices that do not
         | 
| 110 | 
            +
                      pertain to any part of the Derivative Works, in at least one
         | 
| 111 | 
            +
                      of the following places: within a NOTICE text file distributed
         | 
| 112 | 
            +
                      as part of the Derivative Works; within the Source form or
         | 
| 113 | 
            +
                      documentation, if provided along with the Derivative Works; or,
         | 
| 114 | 
            +
                      within a display generated by the Derivative Works, if and
         | 
| 115 | 
            +
                      wherever such third-party notices normally appear. The contents
         | 
| 116 | 
            +
                      of the NOTICE file are for informational purposes only and
         | 
| 117 | 
            +
                      do not modify the License. You may add Your own attribution
         | 
| 118 | 
            +
                      notices within Derivative Works that You distribute, alongside
         | 
| 119 | 
            +
                      or as an addendum to the NOTICE text from the Work, provided
         | 
| 120 | 
            +
                      that such additional attribution notices cannot be construed
         | 
| 121 | 
            +
                      as modifying the License.
         | 
| 122 | 
            +
             | 
| 123 | 
            +
                  You may add Your own copyright statement to Your modifications and
         | 
| 124 | 
            +
                  may provide additional or different license terms and conditions
         | 
| 125 | 
            +
                  for use, reproduction, or distribution of Your modifications, or
         | 
| 126 | 
            +
                  for any such Derivative Works as a whole, provided Your use,
         | 
| 127 | 
            +
                  reproduction, and distribution of the Work otherwise complies with
         | 
| 128 | 
            +
                  the conditions stated in this License.
         | 
| 129 | 
            +
             | 
| 130 | 
            +
               5. Submission of Contributions. Unless You explicitly state otherwise,
         | 
| 131 | 
            +
                  any Contribution intentionally submitted for inclusion in the Work
         | 
| 132 | 
            +
                  by You to the Licensor shall be under the terms and conditions of
         | 
| 133 | 
            +
                  this License, without any additional terms or conditions.
         | 
| 134 | 
            +
                  Notwithstanding the above, nothing herein shall supersede or modify
         | 
| 135 | 
            +
                  the terms of any separate license agreement you may have executed
         | 
| 136 | 
            +
                  with Licensor regarding such Contributions.
         | 
| 137 | 
            +
             | 
| 138 | 
            +
               6. Trademarks. This License does not grant permission to use the trade
         | 
| 139 | 
            +
                  names, trademarks, service marks, or product names of the Licensor,
         | 
| 140 | 
            +
                  except as required for reasonable and customary use in describing the
         | 
| 141 | 
            +
                  origin of the Work and reproducing the content of the NOTICE file.
         | 
| 142 | 
            +
             | 
| 143 | 
            +
               7. Disclaimer of Warranty. Unless required by applicable law or
         | 
| 144 | 
            +
                  agreed to in writing, Licensor provides the Work (and each
         | 
| 145 | 
            +
                  Contributor provides its Contributions) on an "AS IS" BASIS,
         | 
| 146 | 
            +
                  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
         | 
| 147 | 
            +
                  implied, including, without limitation, any warranties or conditions
         | 
| 148 | 
            +
                  of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
         | 
| 149 | 
            +
                  PARTICULAR PURPOSE. You are solely responsible for determining the
         | 
| 150 | 
            +
                  appropriateness of using or redistributing the Work and assume any
         | 
| 151 | 
            +
                  risks associated with Your exercise of permissions under this License.
         | 
| 152 | 
            +
             | 
| 153 | 
            +
               8. Limitation of Liability. In no event and under no legal theory,
         | 
| 154 | 
            +
                  whether in tort (including negligence), contract, or otherwise,
         | 
| 155 | 
            +
                  unless required by applicable law (such as deliberate and grossly
         | 
| 156 | 
            +
                  negligent acts) or agreed to in writing, shall any Contributor be
         | 
| 157 | 
            +
                  liable to You for damages, including any direct, indirect, special,
         | 
| 158 | 
            +
                  incidental, or consequential damages of any character arising as a
         | 
| 159 | 
            +
                  result of this License or out of the use or inability to use the
         | 
| 160 | 
            +
                  Work (including but not limited to damages for loss of goodwill,
         | 
| 161 | 
            +
                  work stoppage, computer failure or malfunction, or any and all
         | 
| 162 | 
            +
                  other commercial damages or losses), even if such Contributor
         | 
| 163 | 
            +
                  has been advised of the possibility of such damages.
         | 
| 164 | 
            +
             | 
| 165 | 
            +
               9. Accepting Warranty or Additional Liability. While redistributing
         | 
| 166 | 
            +
                  the Work or Derivative Works thereof, You may choose to offer,
         | 
| 167 | 
            +
                  and charge a fee for, acceptance of support, warranty, indemnity,
         | 
| 168 | 
            +
                  or other liability obligations and/or rights consistent with this
         | 
| 169 | 
            +
                  License. However, in accepting such obligations, You may act only
         | 
| 170 | 
            +
                  on Your own behalf and on Your sole responsibility, not on behalf
         | 
| 171 | 
            +
                  of any other Contributor, and only if You agree to indemnify,
         | 
| 172 | 
            +
                  defend, and hold each Contributor harmless for any liability
         | 
| 173 | 
            +
                  incurred by, or claims asserted against, such Contributor by reason
         | 
| 174 | 
            +
                  of your accepting any such warranty or additional liability.
         | 
| 175 | 
            +
             | 
| 176 | 
            +
               END OF TERMS AND CONDITIONS
         | 
| 177 | 
            +
             | 
| 178 | 
            +
               APPENDIX: How to apply the Apache License to your work.
         | 
| 179 | 
            +
             | 
| 180 | 
            +
                  To apply the Apache License to your work, attach the following
         | 
| 181 | 
            +
                  boilerplate notice, with the fields enclosed by brackets "[]"
         | 
| 182 | 
            +
                  replaced with your own identifying information. (Don't include
         | 
| 183 | 
            +
                  the brackets!)  The text should be enclosed in the appropriate
         | 
| 184 | 
            +
                  comment syntax for the file format. We also recommend that a
         | 
| 185 | 
            +
                  file or class name and description of purpose be included on the
         | 
| 186 | 
            +
                  same "printed page" as the copyright notice for easier
         | 
| 187 | 
            +
                  identification within third-party archives.
         | 
| 188 | 
            +
             | 
| 189 | 
            +
               Copyright [yyyy] [name of copyright owner]
         | 
| 190 | 
            +
             | 
| 191 | 
            +
               Licensed under the Apache License, Version 2.0 (the "License");
         | 
| 192 | 
            +
               you may not use this file except in compliance with the License.
         | 
| 193 | 
            +
               You may obtain a copy of the License at
         | 
| 194 | 
            +
             | 
| 195 | 
            +
                   http://www.apache.org/licenses/LICENSE-2.0
         | 
| 196 | 
            +
             | 
| 197 | 
            +
               Unless required by applicable law or agreed to in writing, software
         | 
| 198 | 
            +
               distributed under the License is distributed on an "AS IS" BASIS,
         | 
| 199 | 
            +
               WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
         | 
| 200 | 
            +
               See the License for the specific language governing permissions and
         | 
| 201 | 
            +
               limitations under the License.
         | 
    	
        custom_nodes/comfyui_controlnet_aux/NotoSans-Regular.ttf
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:6b04c8dd65af6b73eb4279472ed1580b29102d6496a377340e80a40cdb3b22c9
         | 
| 3 | 
            +
            size 455188
         | 
    	
        custom_nodes/comfyui_controlnet_aux/README.md
    ADDED
    
    | @@ -0,0 +1,252 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            # ComfyUI's ControlNet Auxiliary Preprocessors
         | 
| 2 | 
            +
            Plug-and-play [ComfyUI](https://github.com/comfyanonymous/ComfyUI) node sets for making [ControlNet](https://github.com/lllyasviel/ControlNet/) hint images
         | 
| 3 | 
            +
             | 
| 4 | 
            +
            "anime style, a protest in the street, cyberpunk city, a woman with pink hair and golden eyes (looking at the viewer) is holding a sign with the text "ComfyUI ControlNet Aux" in bold, neon pink" on Flux.1 Dev
         | 
| 5 | 
            +
             | 
| 6 | 
            +
            
         | 
| 7 | 
            +
             | 
| 8 | 
            +
            The code is copy-pasted from the respective folders in https://github.com/lllyasviel/ControlNet/tree/main/annotator and connected to [the 🤗 Hub](https://huggingface.co/lllyasviel/Annotators).
         | 
| 9 | 
            +
             | 
| 10 | 
            +
            All credit & copyright goes to https://github.com/lllyasviel.
         | 
| 11 | 
            +
             | 
| 12 | 
            +
            # Updates
         | 
| 13 | 
            +
            Go to [Update page](./UPDATES.md) to follow updates
         | 
| 14 | 
            +
             | 
| 15 | 
            +
            # Installation:
         | 
| 16 | 
            +
            ## Using ComfyUI Manager (recommended):
         | 
| 17 | 
            +
            Install [ComfyUI Manager](https://github.com/ltdrdata/ComfyUI-Manager) and do steps introduced there to install this repo.
         | 
| 18 | 
            +
             | 
| 19 | 
            +
            ## Alternative:
         | 
| 20 | 
            +
            If you're running on Linux, or non-admin account on windows you'll want to ensure `/ComfyUI/custom_nodes` and `comfyui_controlnet_aux` has write permissions.
         | 
| 21 | 
            +
             | 
| 22 | 
            +
            There is now a **install.bat** you can run to install to portable if detected. Otherwise it will default to system and assume you followed ConfyUI's manual installation steps. 
         | 
| 23 | 
            +
             | 
| 24 | 
            +
            If you can't run **install.bat** (e.g. you are a Linux user). Open the CMD/Shell and do the following:
         | 
| 25 | 
            +
              - Navigate to your `/ComfyUI/custom_nodes/` folder
         | 
| 26 | 
            +
              - Run `git clone https://github.com/Fannovel16/comfyui_controlnet_aux/`
         | 
| 27 | 
            +
              - Navigate to your `comfyui_controlnet_aux` folder
         | 
| 28 | 
            +
                - Portable/venv:
         | 
| 29 | 
            +
                   - Run `path/to/ComfUI/python_embeded/python.exe -s -m pip install -r requirements.txt`
         | 
| 30 | 
            +
            	- With system python
         | 
| 31 | 
            +
            	   - Run `pip install -r requirements.txt`
         | 
| 32 | 
            +
              - Start ComfyUI
         | 
| 33 | 
            +
             | 
| 34 | 
            +
            # Nodes
         | 
| 35 | 
            +
            Please note that this repo only supports preprocessors making hint images (e.g. stickman, canny edge, etc).
         | 
| 36 | 
            +
            All preprocessors except Inpaint are intergrated into `AIO Aux Preprocessor` node. 
         | 
| 37 | 
            +
            This node allow you to quickly get the preprocessor but a preprocessor's own threshold parameters won't be able to set.
         | 
| 38 | 
            +
            You need to use its node directly to set thresholds.
         | 
| 39 | 
            +
             | 
| 40 | 
            +
            # Nodes (sections are categories in Comfy menu)
         | 
| 41 | 
            +
            ## Line Extractors
         | 
| 42 | 
            +
            | Preprocessor Node           | sd-webui-controlnet/other |          ControlNet/T2I-Adapter           |
         | 
| 43 | 
            +
            |-----------------------------|---------------------------|-------------------------------------------|
         | 
| 44 | 
            +
            | Binary Lines                | binary                    | control_scribble                          |
         | 
| 45 | 
            +
            | Canny Edge                  | canny                     | control_v11p_sd15_canny <br> control_canny <br> t2iadapter_canny |
         | 
| 46 | 
            +
            | HED Soft-Edge Lines         | hed                       | control_v11p_sd15_softedge <br> control_hed |
         | 
| 47 | 
            +
            | Standard Lineart            | standard_lineart          | control_v11p_sd15_lineart                 |
         | 
| 48 | 
            +
            | Realistic Lineart           | lineart (or `lineart_coarse` if `coarse` is enabled) | control_v11p_sd15_lineart |
         | 
| 49 | 
            +
            | Anime Lineart               | lineart_anime             | control_v11p_sd15s2_lineart_anime         |
         | 
| 50 | 
            +
            | Manga Lineart               | lineart_anime_denoise     | control_v11p_sd15s2_lineart_anime         |
         | 
| 51 | 
            +
            | M-LSD Lines                 | mlsd                      | control_v11p_sd15_mlsd <br> control_mlsd  |
         | 
| 52 | 
            +
            | PiDiNet Soft-Edge Lines     | pidinet                   | control_v11p_sd15_softedge <br> control_scribble |
         | 
| 53 | 
            +
            | Scribble Lines              | scribble                  | control_v11p_sd15_scribble <br> control_scribble |
         | 
| 54 | 
            +
            | Scribble XDoG Lines         | scribble_xdog             | control_v11p_sd15_scribble <br> control_scribble |
         | 
| 55 | 
            +
            | Fake Scribble Lines         | scribble_hed              | control_v11p_sd15_scribble <br> control_scribble |
         | 
| 56 | 
            +
            | TEED Soft-Edge Lines        | teed                      | [controlnet-sd-xl-1.0-softedge-dexined](https://huggingface.co/SargeZT/controlnet-sd-xl-1.0-softedge-dexined/blob/main/controlnet-sd-xl-1.0-softedge-dexined.safetensors) <br> control_v11p_sd15_softedge (Theoretically)
         | 
| 57 | 
            +
            | Scribble PiDiNet Lines      | scribble_pidinet          | control_v11p_sd15_scribble <br> control_scribble |
         | 
| 58 | 
            +
            | AnyLine Lineart             |                           | mistoLine_fp16.safetensors <br> mistoLine_rank256 <br> control_v11p_sd15s2_lineart_anime <br> control_v11p_sd15_lineart |
         | 
| 59 | 
            +
             | 
| 60 | 
            +
            ## Normal and Depth Estimators
         | 
| 61 | 
            +
            | Preprocessor Node           | sd-webui-controlnet/other |          ControlNet/T2I-Adapter           |
         | 
| 62 | 
            +
            |-----------------------------|---------------------------|-------------------------------------------|
         | 
| 63 | 
            +
            | MiDaS Depth Map           | (normal) depth            | control_v11f1p_sd15_depth <br> control_depth <br> t2iadapter_depth |
         | 
| 64 | 
            +
            | LeReS Depth Map           | depth_leres               | control_v11f1p_sd15_depth <br> control_depth <br> t2iadapter_depth |
         | 
| 65 | 
            +
            | Zoe Depth Map             | depth_zoe                 | control_v11f1p_sd15_depth <br> control_depth <br> t2iadapter_depth |
         | 
| 66 | 
            +
            | MiDaS Normal Map          | normal_map                | control_normal                            |
         | 
| 67 | 
            +
            | BAE Normal Map            | normal_bae                | control_v11p_sd15_normalbae               |
         | 
| 68 | 
            +
            | MeshGraphormer Hand Refiner ([HandRefinder](https://github.com/wenquanlu/HandRefiner))  | depth_hand_refiner | [control_sd15_inpaint_depth_hand_fp16](https://huggingface.co/hr16/ControlNet-HandRefiner-pruned/blob/main/control_sd15_inpaint_depth_hand_fp16.safetensors) |
         | 
| 69 | 
            +
            | Depth Anything            |  depth_anything           | [Depth-Anything](https://huggingface.co/spaces/LiheYoung/Depth-Anything/blob/main/checkpoints_controlnet/diffusion_pytorch_model.safetensors) |
         | 
| 70 | 
            +
            | Zoe Depth Anything <br> (Basically Zoe but the encoder is replaced with DepthAnything)       | depth_anything | [Depth-Anything](https://huggingface.co/spaces/LiheYoung/Depth-Anything/blob/main/checkpoints_controlnet/diffusion_pytorch_model.safetensors) |
         | 
| 71 | 
            +
            | Normal DSINE              |                           | control_normal/control_v11p_sd15_normalbae |
         | 
| 72 | 
            +
            | Metric3D Depth            |                           | control_v11f1p_sd15_depth <br> control_depth <br> t2iadapter_depth |
         | 
| 73 | 
            +
            | Metric3D Normal           |                           | control_v11p_sd15_normalbae |
         | 
| 74 | 
            +
            | Depth Anything V2         |                           | [Depth-Anything](https://huggingface.co/spaces/LiheYoung/Depth-Anything/blob/main/checkpoints_controlnet/diffusion_pytorch_model.safetensors) |
         | 
| 75 | 
            +
             | 
| 76 | 
            +
            ## Faces and Poses Estimators
         | 
| 77 | 
            +
            | Preprocessor Node           | sd-webui-controlnet/other |          ControlNet/T2I-Adapter           |
         | 
| 78 | 
            +
            |-----------------------------|---------------------------|-------------------------------------------|
         | 
| 79 | 
            +
            | DWPose Estimator                 | dw_openpose_full          | control_v11p_sd15_openpose <br> control_openpose <br> t2iadapter_openpose |
         | 
| 80 | 
            +
            | OpenPose Estimator               | openpose (detect_body) <br> openpose_hand (detect_body + detect_hand) <br> openpose_faceonly (detect_face) <br> openpose_full (detect_hand + detect_body + detect_face)    | control_v11p_sd15_openpose <br> control_openpose <br> t2iadapter_openpose |
         | 
| 81 | 
            +
            | MediaPipe Face Mesh         | mediapipe_face            | controlnet_sd21_laion_face_v2             | 
         | 
| 82 | 
            +
            | Animal Estimator                 | animal_openpose           | [control_sd15_animal_openpose_fp16](https://huggingface.co/huchenlei/animal_openpose/blob/main/control_sd15_animal_openpose_fp16.pth) |
         | 
| 83 | 
            +
             | 
| 84 | 
            +
            ## Optical Flow Estimators
         | 
| 85 | 
            +
            | Preprocessor Node           | sd-webui-controlnet/other |          ControlNet/T2I-Adapter           |
         | 
| 86 | 
            +
            |-----------------------------|---------------------------|-------------------------------------------|
         | 
| 87 | 
            +
            | Unimatch Optical Flow       |                           | [DragNUWA](https://github.com/ProjectNUWA/DragNUWA) |
         | 
| 88 | 
            +
             | 
| 89 | 
            +
            ### How to get OpenPose-format JSON?
         | 
| 90 | 
            +
            #### User-side
         | 
| 91 | 
            +
            This workflow will save images to ComfyUI's output folder (the same location as output images). If you haven't found `Save Pose Keypoints` node, update this extension
         | 
| 92 | 
            +
            
         | 
| 93 | 
            +
             | 
| 94 | 
            +
            #### Dev-side
         | 
| 95 | 
            +
            An array of [OpenPose-format JSON](https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/doc/02_output.md#json-output-format) corresponsding to each frame in an IMAGE batch can be gotten from DWPose and OpenPose using `app.nodeOutputs` on the UI or `/history` API endpoint. JSON output from AnimalPose uses a kinda similar format to OpenPose JSON:
         | 
| 96 | 
            +
            ```
         | 
| 97 | 
            +
            [
         | 
| 98 | 
            +
                {
         | 
| 99 | 
            +
                    "version": "ap10k",
         | 
| 100 | 
            +
                    "animals": [
         | 
| 101 | 
            +
                        [[x1, y1, 1], [x2, y2, 1],..., [x17, y17, 1]],
         | 
| 102 | 
            +
                        [[x1, y1, 1], [x2, y2, 1],..., [x17, y17, 1]],
         | 
| 103 | 
            +
                        ...
         | 
| 104 | 
            +
                    ],
         | 
| 105 | 
            +
                    "canvas_height": 512,
         | 
| 106 | 
            +
                    "canvas_width": 768
         | 
| 107 | 
            +
                },
         | 
| 108 | 
            +
                ...
         | 
| 109 | 
            +
            ]
         | 
| 110 | 
            +
            ```
         | 
| 111 | 
            +
             | 
| 112 | 
            +
            For extension developers (e.g. Openpose editor):
         | 
| 113 | 
            +
            ```js
         | 
| 114 | 
            +
            const poseNodes = app.graph._nodes.filter(node => ["OpenposePreprocessor", "DWPreprocessor", "AnimalPosePreprocessor"].includes(node.type))
         | 
| 115 | 
            +
            for (const poseNode of poseNodes) {
         | 
| 116 | 
            +
                const openposeResults = JSON.parse(app.nodeOutputs[poseNode.id].openpose_json[0])
         | 
| 117 | 
            +
                console.log(openposeResults) //An array containing Openpose JSON for each frame
         | 
| 118 | 
            +
            }
         | 
| 119 | 
            +
            ```
         | 
| 120 | 
            +
             | 
| 121 | 
            +
            For API users:
         | 
| 122 | 
            +
            Javascript
         | 
| 123 | 
            +
            ```js
         | 
| 124 | 
            +
            import fetch from "node-fetch" //Remember to add "type": "module" to "package.json"
         | 
| 125 | 
            +
            async function main() {
         | 
| 126 | 
            +
                const promptId = '792c1905-ecfe-41f4-8114-83e6a4a09a9f' //Too lazy to POST /queue
         | 
| 127 | 
            +
                let history = await fetch(`http://127.0.0.1:8188/history/${promptId}`).then(re => re.json())
         | 
| 128 | 
            +
                history = history[promptId]
         | 
| 129 | 
            +
                const nodeOutputs = Object.values(history.outputs).filter(output => output.openpose_json)
         | 
| 130 | 
            +
                for (const nodeOutput of nodeOutputs) {
         | 
| 131 | 
            +
                    const openposeResults = JSON.parse(nodeOutput.openpose_json[0])
         | 
| 132 | 
            +
                    console.log(openposeResults) //An array containing Openpose JSON for each frame
         | 
| 133 | 
            +
                }
         | 
| 134 | 
            +
            }
         | 
| 135 | 
            +
            main()
         | 
| 136 | 
            +
            ```
         | 
| 137 | 
            +
             | 
| 138 | 
            +
            Python
         | 
| 139 | 
            +
            ```py
         | 
| 140 | 
            +
            import json, urllib.request
         | 
| 141 | 
            +
             | 
| 142 | 
            +
            server_address = "127.0.0.1:8188"
         | 
| 143 | 
            +
            prompt_id = '' #Too lazy to POST /queue
         | 
| 144 | 
            +
             | 
| 145 | 
            +
            def get_history(prompt_id):
         | 
| 146 | 
            +
                with urllib.request.urlopen("http://{}/history/{}".format(server_address, prompt_id)) as response:
         | 
| 147 | 
            +
                    return json.loads(response.read())
         | 
| 148 | 
            +
             | 
| 149 | 
            +
            history = get_history(prompt_id)[prompt_id]
         | 
| 150 | 
            +
            for o in history['outputs']:
         | 
| 151 | 
            +
                for node_id in history['outputs']:
         | 
| 152 | 
            +
                    node_output = history['outputs'][node_id]
         | 
| 153 | 
            +
                    if 'openpose_json' in node_output:
         | 
| 154 | 
            +
                        print(json.loads(node_output['openpose_json'][0])) #An list containing Openpose JSON for each frame
         | 
| 155 | 
            +
            ```
         | 
| 156 | 
            +
            ## Semantic Segmentation
         | 
| 157 | 
            +
            | Preprocessor Node           | sd-webui-controlnet/other |          ControlNet/T2I-Adapter           |
         | 
| 158 | 
            +
            |-----------------------------|---------------------------|-------------------------------------------|
         | 
| 159 | 
            +
            | OneFormer ADE20K Segmentor  | oneformer_ade20k          | control_v11p_sd15_seg                     |
         | 
| 160 | 
            +
            | OneFormer COCO Segmentor    | oneformer_coco            | control_v11p_sd15_seg                     |
         | 
| 161 | 
            +
            | UniFormer Segmentor         | segmentation              |control_sd15_seg <br> control_v11p_sd15_seg|
         | 
| 162 | 
            +
             | 
| 163 | 
            +
            ## T2IAdapter-only
         | 
| 164 | 
            +
            | Preprocessor Node           | sd-webui-controlnet/other |          ControlNet/T2I-Adapter           |
         | 
| 165 | 
            +
            |-----------------------------|---------------------------|-------------------------------------------|
         | 
| 166 | 
            +
            | Color Pallete               | color                     | t2iadapter_color                          |
         | 
| 167 | 
            +
            | Content Shuffle             | shuffle                   | t2iadapter_style                          |
         | 
| 168 | 
            +
             | 
| 169 | 
            +
            ## Recolor
         | 
| 170 | 
            +
            | Preprocessor Node           | sd-webui-controlnet/other |          ControlNet/T2I-Adapter           |
         | 
| 171 | 
            +
            |-----------------------------|---------------------------|-------------------------------------------|
         | 
| 172 | 
            +
            | Image Luminance             | recolor_luminance         | [ioclab_sd15_recolor](https://huggingface.co/lllyasviel/sd_control_collection/resolve/main/ioclab_sd15_recolor.safetensors) <br> [sai_xl_recolor_256lora](https://huggingface.co/lllyasviel/sd_control_collection/resolve/main/sai_xl_recolor_256lora.safetensors) <br> [bdsqlsz_controlllite_xl_recolor_luminance](https://huggingface.co/bdsqlsz/qinglong_controlnet-lllite/resolve/main/bdsqlsz_controlllite_xl_recolor_luminance.safetensors) |
         | 
| 173 | 
            +
            | Image Intensity             | recolor_intensity         | Idk. Maybe same as above? |
         | 
| 174 | 
            +
             | 
| 175 | 
            +
            # Examples
         | 
| 176 | 
            +
            > A picture is worth a thousand words
         | 
| 177 | 
            +
             | 
| 178 | 
            +
            
         | 
| 179 | 
            +
            
         | 
| 180 | 
            +
             | 
| 181 | 
            +
            # Testing workflow
         | 
| 182 | 
            +
            https://github.com/Fannovel16/comfyui_controlnet_aux/blob/main/examples/ExecuteAll.png
         | 
| 183 | 
            +
            Input image: https://github.com/Fannovel16/comfyui_controlnet_aux/blob/main/examples/comfyui-controlnet-aux-logo.png
         | 
| 184 | 
            +
             | 
| 185 | 
            +
            # Q&A:
         | 
| 186 | 
            +
            ## Why some nodes doesn't appear after I installed this repo?
         | 
| 187 | 
            +
             | 
| 188 | 
            +
            This repo has a new mechanism which will skip any custom node can't be imported. If you meet this case, please create a issue on [Issues tab](https://github.com/Fannovel16/comfyui_controlnet_aux/issues) with the log from the command line.
         | 
| 189 | 
            +
             | 
| 190 | 
            +
            ## DWPose/AnimalPose only uses CPU so it's so slow. How can I make it use GPU?
         | 
| 191 | 
            +
            There are two ways to speed-up DWPose: using TorchScript checkpoints (.torchscript.pt) checkpoints or ONNXRuntime (.onnx). TorchScript way is little bit slower than ONNXRuntime but doesn't require any additional library and still way way faster than CPU. 
         | 
| 192 | 
            +
             | 
| 193 | 
            +
            A torchscript bbox detector is compatiable with an onnx pose estimator and vice versa.
         | 
| 194 | 
            +
            ### TorchScript
         | 
| 195 | 
            +
            Set `bbox_detector` and `pose_estimator` according to this picture. You can try other bbox detector endings with `.torchscript.pt` to reduce bbox detection time if input images are ideal.
         | 
| 196 | 
            +
            
         | 
| 197 | 
            +
            ### ONNXRuntime
         | 
| 198 | 
            +
            If onnxruntime is installed successfully and the checkpoint used endings with `.onnx`, it will replace default cv2 backend to take advantage of GPU. Note that if you are using NVidia card, this method currently can only works on CUDA 11.8 (ComfyUI_windows_portable_nvidia_cu118_or_cpu.7z) unless you compile onnxruntime yourself.
         | 
| 199 | 
            +
             | 
| 200 | 
            +
            1. Know your onnxruntime build:
         | 
| 201 | 
            +
            * * NVidia CUDA 11.x or bellow/AMD GPU: `onnxruntime-gpu`
         | 
| 202 | 
            +
            * * NVidia CUDA 12.x: `onnxruntime-gpu --extra-index-url https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/onnxruntime-cuda-12/pypi/simple/`
         | 
| 203 | 
            +
            * * DirectML: `onnxruntime-directml`
         | 
| 204 | 
            +
            * * OpenVINO: `onnxruntime-openvino`
         | 
| 205 | 
            +
             | 
| 206 | 
            +
            Note that if this is your first time using ComfyUI, please test if it can run on your device before doing next steps.
         | 
| 207 | 
            +
             | 
| 208 | 
            +
            2. Add it into `requirements.txt`
         | 
| 209 | 
            +
             | 
| 210 | 
            +
            3. Run `install.bat` or pip command mentioned in Installation
         | 
| 211 | 
            +
             | 
| 212 | 
            +
            
         | 
| 213 | 
            +
             | 
| 214 | 
            +
            # Assets files of preprocessors
         | 
| 215 | 
            +
            * anime_face_segment:  [bdsqlsz/qinglong_controlnet-lllite/Annotators/UNet.pth](https://huggingface.co/bdsqlsz/qinglong_controlnet-lllite/blob/main/Annotators/UNet.pth), [anime-seg/isnetis.ckpt](https://huggingface.co/skytnt/anime-seg/blob/main/isnetis.ckpt)
         | 
| 216 | 
            +
            * densepose:  [LayerNorm/DensePose-TorchScript-with-hint-image/densepose_r50_fpn_dl.torchscript](https://huggingface.co/LayerNorm/DensePose-TorchScript-with-hint-image/blob/main/densepose_r50_fpn_dl.torchscript)
         | 
| 217 | 
            +
            * dwpose:  
         | 
| 218 | 
            +
            * * bbox_detector: Either [yzd-v/DWPose/yolox_l.onnx](https://huggingface.co/yzd-v/DWPose/blob/main/yolox_l.onnx), [hr16/yolox-onnx/yolox_l.torchscript.pt](https://huggingface.co/hr16/yolox-onnx/blob/main/yolox_l.torchscript.pt), [hr16/yolo-nas-fp16/yolo_nas_l_fp16.onnx](https://huggingface.co/hr16/yolo-nas-fp16/blob/main/yolo_nas_l_fp16.onnx), [hr16/yolo-nas-fp16/yolo_nas_m_fp16.onnx](https://huggingface.co/hr16/yolo-nas-fp16/blob/main/yolo_nas_m_fp16.onnx), [hr16/yolo-nas-fp16/yolo_nas_s_fp16.onnx](https://huggingface.co/hr16/yolo-nas-fp16/blob/main/yolo_nas_s_fp16.onnx)
         | 
| 219 | 
            +
            * * pose_estimator: Either [hr16/DWPose-TorchScript-BatchSize5/dw-ll_ucoco_384_bs5.torchscript.pt](https://huggingface.co/hr16/DWPose-TorchScript-BatchSize5/blob/main/dw-ll_ucoco_384_bs5.torchscript.pt), [yzd-v/DWPose/dw-ll_ucoco_384.onnx](https://huggingface.co/yzd-v/DWPose/blob/main/dw-ll_ucoco_384.onnx)
         | 
| 220 | 
            +
            * animal_pose (ap10k):
         | 
| 221 | 
            +
            * * bbox_detector: Either [yzd-v/DWPose/yolox_l.onnx](https://huggingface.co/yzd-v/DWPose/blob/main/yolox_l.onnx), [hr16/yolox-onnx/yolox_l.torchscript.pt](https://huggingface.co/hr16/yolox-onnx/blob/main/yolox_l.torchscript.pt), [hr16/yolo-nas-fp16/yolo_nas_l_fp16.onnx](https://huggingface.co/hr16/yolo-nas-fp16/blob/main/yolo_nas_l_fp16.onnx), [hr16/yolo-nas-fp16/yolo_nas_m_fp16.onnx](https://huggingface.co/hr16/yolo-nas-fp16/blob/main/yolo_nas_m_fp16.onnx), [hr16/yolo-nas-fp16/yolo_nas_s_fp16.onnx](https://huggingface.co/hr16/yolo-nas-fp16/blob/main/yolo_nas_s_fp16.onnx)
         | 
| 222 | 
            +
            * * pose_estimator: Either [hr16/DWPose-TorchScript-BatchSize5/rtmpose-m_ap10k_256_bs5.torchscript.pt](https://huggingface.co/hr16/DWPose-TorchScript-BatchSize5/blob/main/rtmpose-m_ap10k_256_bs5.torchscript.pt), [hr16/UnJIT-DWPose/rtmpose-m_ap10k_256.onnx](https://huggingface.co/hr16/UnJIT-DWPose/blob/main/rtmpose-m_ap10k_256.onnx)
         | 
| 223 | 
            +
            * hed:  [lllyasviel/Annotators/ControlNetHED.pth](https://huggingface.co/lllyasviel/Annotators/blob/main/ControlNetHED.pth)
         | 
| 224 | 
            +
            * leres:  [lllyasviel/Annotators/res101.pth](https://huggingface.co/lllyasviel/Annotators/blob/main/res101.pth), [lllyasviel/Annotators/latest_net_G.pth](https://huggingface.co/lllyasviel/Annotators/blob/main/latest_net_G.pth)
         | 
| 225 | 
            +
            * lineart:  [lllyasviel/Annotators/sk_model.pth](https://huggingface.co/lllyasviel/Annotators/blob/main/sk_model.pth), [lllyasviel/Annotators/sk_model2.pth](https://huggingface.co/lllyasviel/Annotators/blob/main/sk_model2.pth)
         | 
| 226 | 
            +
            * lineart_anime:  [lllyasviel/Annotators/netG.pth](https://huggingface.co/lllyasviel/Annotators/blob/main/netG.pth)
         | 
| 227 | 
            +
            * manga_line:  [lllyasviel/Annotators/erika.pth](https://huggingface.co/lllyasviel/Annotators/blob/main/erika.pth)
         | 
| 228 | 
            +
            * mesh_graphormer:  [hr16/ControlNet-HandRefiner-pruned/graphormer_hand_state_dict.bin](https://huggingface.co/hr16/ControlNet-HandRefiner-pruned/blob/main/graphormer_hand_state_dict.bin), [hr16/ControlNet-HandRefiner-pruned/hrnetv2_w64_imagenet_pretrained.pth](https://huggingface.co/hr16/ControlNet-HandRefiner-pruned/blob/main/hrnetv2_w64_imagenet_pretrained.pth)
         | 
| 229 | 
            +
            * midas:  [lllyasviel/Annotators/dpt_hybrid-midas-501f0c75.pt](https://huggingface.co/lllyasviel/Annotators/blob/main/dpt_hybrid-midas-501f0c75.pt)
         | 
| 230 | 
            +
            * mlsd:  [lllyasviel/Annotators/mlsd_large_512_fp32.pth](https://huggingface.co/lllyasviel/Annotators/blob/main/mlsd_large_512_fp32.pth)
         | 
| 231 | 
            +
            * normalbae:  [lllyasviel/Annotators/scannet.pt](https://huggingface.co/lllyasviel/Annotators/blob/main/scannet.pt)
         | 
| 232 | 
            +
            * oneformer:  [lllyasviel/Annotators/250_16_swin_l_oneformer_ade20k_160k.pth](https://huggingface.co/lllyasviel/Annotators/blob/main/250_16_swin_l_oneformer_ade20k_160k.pth)
         | 
| 233 | 
            +
            * open_pose:  [lllyasviel/Annotators/body_pose_model.pth](https://huggingface.co/lllyasviel/Annotators/blob/main/body_pose_model.pth), [lllyasviel/Annotators/hand_pose_model.pth](https://huggingface.co/lllyasviel/Annotators/blob/main/hand_pose_model.pth), [lllyasviel/Annotators/facenet.pth](https://huggingface.co/lllyasviel/Annotators/blob/main/facenet.pth)
         | 
| 234 | 
            +
            * pidi:  [lllyasviel/Annotators/table5_pidinet.pth](https://huggingface.co/lllyasviel/Annotators/blob/main/table5_pidinet.pth)
         | 
| 235 | 
            +
            * sam:  [dhkim2810/MobileSAM/mobile_sam.pt](https://huggingface.co/dhkim2810/MobileSAM/blob/main/mobile_sam.pt)
         | 
| 236 | 
            +
            * uniformer:  [lllyasviel/Annotators/upernet_global_small.pth](https://huggingface.co/lllyasviel/Annotators/blob/main/upernet_global_small.pth)
         | 
| 237 | 
            +
            * zoe:  [lllyasviel/Annotators/ZoeD_M12_N.pt](https://huggingface.co/lllyasviel/Annotators/blob/main/ZoeD_M12_N.pt)
         | 
| 238 | 
            +
            * teed:  [bdsqlsz/qinglong_controlnet-lllite/7_model.pth](https://huggingface.co/bdsqlsz/qinglong_controlnet-lllite/blob/main/Annotators/7_model.pth)
         | 
| 239 | 
            +
            * depth_anything: Either [LiheYoung/Depth-Anything/checkpoints/depth_anything_vitl14.pth](https://huggingface.co/spaces/LiheYoung/Depth-Anything/blob/main/checkpoints/depth_anything_vitl14.pth), [LiheYoung/Depth-Anything/checkpoints/depth_anything_vitb14.pth](https://huggingface.co/spaces/LiheYoung/Depth-Anything/blob/main/checkpoints/depth_anything_vitb14.pth) or [LiheYoung/Depth-Anything/checkpoints/depth_anything_vits14.pth](https://huggingface.co/spaces/LiheYoung/Depth-Anything/blob/main/checkpoints/depth_anything_vits14.pth)
         | 
| 240 | 
            +
            * diffusion_edge: Either [hr16/Diffusion-Edge/diffusion_edge_indoor.pt](https://huggingface.co/hr16/Diffusion-Edge/blob/main/diffusion_edge_indoor.pt), [hr16/Diffusion-Edge/diffusion_edge_urban.pt](https://huggingface.co/hr16/Diffusion-Edge/blob/main/diffusion_edge_urban.pt) or [hr16/Diffusion-Edge/diffusion_edge_natrual.pt](https://huggingface.co/hr16/Diffusion-Edge/blob/main/diffusion_edge_natrual.pt)
         | 
| 241 | 
            +
            * unimatch: Either [hr16/Unimatch/gmflow-scale2-regrefine6-mixdata.pth](https://huggingface.co/hr16/Unimatch/blob/main/gmflow-scale2-regrefine6-mixdata.pth), [hr16/Unimatch/gmflow-scale2-mixdata.pth](https://huggingface.co/hr16/Unimatch/blob/main/gmflow-scale2-mixdata.pth) or [hr16/Unimatch/gmflow-scale1-mixdata.pth](https://huggingface.co/hr16/Unimatch/blob/main/gmflow-scale1-mixdata.pth)
         | 
| 242 | 
            +
            * zoe_depth_anything: Either [LiheYoung/Depth-Anything/checkpoints_metric_depth/depth_anything_metric_depth_indoor.pt](https://huggingface.co/spaces/LiheYoung/Depth-Anything/blob/main/checkpoints_metric_depth/depth_anything_metric_depth_indoor.pt) or [LiheYoung/Depth-Anything/checkpoints_metric_depth/depth_anything_metric_depth_outdoor.pt](https://huggingface.co/spaces/LiheYoung/Depth-Anything/blob/main/checkpoints_metric_depth/depth_anything_metric_depth_outdoor.pt)
         | 
| 243 | 
            +
            # 2000 Stars 😄
         | 
| 244 | 
            +
            <a href="https://star-history.com/#Fannovel16/comfyui_controlnet_aux&Date">
         | 
| 245 | 
            +
              <picture>
         | 
| 246 | 
            +
                <source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=Fannovel16/comfyui_controlnet_aux&type=Date&theme=dark" />
         | 
| 247 | 
            +
                <source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=Fannovel16/comfyui_controlnet_aux&type=Date" />
         | 
| 248 | 
            +
                <img alt="Star History Chart" src="https://api.star-history.com/svg?repos=Fannovel16/comfyui_controlnet_aux&type=Date" />
         | 
| 249 | 
            +
              </picture>
         | 
| 250 | 
            +
            </a>
         | 
| 251 | 
            +
             | 
| 252 | 
            +
            Thanks for yalls supports. I never thought the graph for stars would be linear lol.
         | 
    	
        custom_nodes/comfyui_controlnet_aux/UPDATES.md
    ADDED
    
    | @@ -0,0 +1,44 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            * `AIO Aux Preprocessor` intergrating all loadable aux preprocessors as dropdown options. Easy to copy, paste and get the preprocessor faster.
         | 
| 2 | 
            +
            * Added OpenPose-format JSON output from OpenPose Preprocessor and DWPose Preprocessor. Checks [here](#faces-and-poses).
         | 
| 3 | 
            +
            * Fixed wrong model path when downloading DWPose.
         | 
| 4 | 
            +
            * Make hint images less blurry.
         | 
| 5 | 
            +
            * Added `resolution` option, `PixelPerfectResolution` and `HintImageEnchance` nodes (TODO: Documentation).
         | 
| 6 | 
            +
            * Added `RAFT Optical Flow Embedder` for TemporalNet2 (TODO: Workflow example).
         | 
| 7 | 
            +
            * Fixed opencv's conflicts between this extension, [ReActor](https://github.com/Gourieff/comfyui-reactor-node) and Roop. Thanks `Gourieff` for [the solution](https://github.com/Fannovel16/comfyui_controlnet_aux/issues/7#issuecomment-1734319075)!
         | 
| 8 | 
            +
            * RAFT is removed as the code behind it doesn't match what what the original code does
         | 
| 9 | 
            +
            * Changed `lineart`'s display name from `Normal Lineart` to `Realistic Lineart`. This change won't affect old workflows
         | 
| 10 | 
            +
            * Added support for `onnxruntime` to speed-up DWPose (see the Q&A)
         | 
| 11 | 
            +
            * Fixed TypeError: expected size to be one of int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int], but got size with types [<class 'numpy.int64'>, <class 'numpy.int64'>]: [Issue](https://github.com/Fannovel16/comfyui_controlnet_aux/issues/2), [PR](https://github.com/Fannovel16/comfyui_controlnet_aux/pull/71))
         | 
| 12 | 
            +
            * Fixed ImageGenResolutionFromImage mishape (https://github.com/Fannovel16/comfyui_controlnet_aux/pull/74)
         | 
| 13 | 
            +
            * Fixed LeRes and MiDaS's incomatipility with MPS device
         | 
| 14 | 
            +
            * Fixed checking DWPose onnxruntime session multiple times: https://github.com/Fannovel16/comfyui_controlnet_aux/issues/89)
         | 
| 15 | 
            +
            * Added `Anime Face Segmentor` (in `ControlNet Preprocessors/Semantic Segmentation`) for [ControlNet AnimeFaceSegmentV2](https://huggingface.co/bdsqlsz/qinglong_controlnet-lllite#animefacesegmentv2). Checks [here](#anime-face-segmentor)
         | 
| 16 | 
            +
            * Change download functions and fix [download error](https://github.com/Fannovel16/comfyui_controlnet_aux/issues/39): [PR](https://github.com/Fannovel16/comfyui_controlnet_aux/pull/96)
         | 
| 17 | 
            +
            * Caching DWPose Onnxruntime during the first use of DWPose node instead of ComfyUI startup
         | 
| 18 | 
            +
            * Added alternative YOLOX models for faster speed when using DWPose
         | 
| 19 | 
            +
            * Added alternative DWPose models
         | 
| 20 | 
            +
            * Implemented the preprocessor for [AnimalPose ControlNet](https://github.com/abehonest/ControlNet_AnimalPose/tree/main). Check [Animal Pose AP-10K](#animal-pose-ap-10k) 
         | 
| 21 | 
            +
            * Added YOLO-NAS models which are drop-in replacements of YOLOX
         | 
| 22 | 
            +
            * Fixed Openpose Face/Hands no longer detecting: https://github.com/Fannovel16/comfyui_controlnet_aux/issues/54
         | 
| 23 | 
            +
            * Added TorchScript implementation of DWPose and AnimalPose
         | 
| 24 | 
            +
            * Added TorchScript implementation of DensePose from [Colab notebook](https://colab.research.google.com/drive/16hcaaKs210ivpxjoyGNuvEXZD4eqOOSQ) which doesn't require detectron2. [Example](#densepose). Thanks [@LayerNome](https://github.com/Layer-norm) for fixing bugs related.
         | 
| 25 | 
            +
            * Added Standard Lineart Preprocessor
         | 
| 26 | 
            +
            * Fixed OpenPose misplacements in some cases 
         | 
| 27 | 
            +
            * Added Mesh Graphormer - Hand Depth Map & Mask
         | 
| 28 | 
            +
            * Misaligned hands bug from MeshGraphormer was fixed
         | 
| 29 | 
            +
            * Added more mask options for MeshGraphormer
         | 
| 30 | 
            +
            * Added Save Pose Keypoint node for editing
         | 
| 31 | 
            +
            * Added Unimatch Optical Flow
         | 
| 32 | 
            +
            * Added Depth Anything & Zoe Depth Anything
         | 
| 33 | 
            +
            * Removed resolution field from Unimatch Optical Flow as that interpolating optical flow seems unstable
         | 
| 34 | 
            +
            * Added TEED Soft-Edge Preprocessor
         | 
| 35 | 
            +
            * Added DiffusionEdge
         | 
| 36 | 
            +
            * Added Image Luminance and Image Intensity
         | 
| 37 | 
            +
            * Added Normal DSINE
         | 
| 38 | 
            +
            * Added TTPlanet Tile (09/05/2024, DD/MM/YYYY)
         | 
| 39 | 
            +
            * Added AnyLine, Metric3D (18/05/2024)
         | 
| 40 | 
            +
            * Added Depth Anything V2 (16/06/2024)
         | 
| 41 | 
            +
            * Added Union model of ControlNet and preprocessors
         | 
| 42 | 
            +
            
         | 
| 43 | 
            +
            * Refactor INPUT_TYPES and add Execute All node during the process of learning [Execution Model Inversion](https://github.com/comfyanonymous/ComfyUI/pull/2666)
         | 
| 44 | 
            +
            * Added scale_stick_for_xinsr_cn (https://github.com/Fannovel16/comfyui_controlnet_aux/issues/447) (09/04/2024)
         | 
    	
        custom_nodes/comfyui_controlnet_aux/__init__.py
    ADDED
    
    | @@ -0,0 +1,214 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import sys, os
         | 
| 2 | 
            +
            from .utils import here, define_preprocessor_inputs, INPUT
         | 
| 3 | 
            +
            from pathlib import Path
         | 
| 4 | 
            +
            import traceback
         | 
| 5 | 
            +
            import importlib
         | 
| 6 | 
            +
            from .log import log, blue_text, cyan_text, get_summary, get_label
         | 
| 7 | 
            +
            from .hint_image_enchance import NODE_CLASS_MAPPINGS as HIE_NODE_CLASS_MAPPINGS
         | 
| 8 | 
            +
            from .hint_image_enchance import NODE_DISPLAY_NAME_MAPPINGS as HIE_NODE_DISPLAY_NAME_MAPPINGS
         | 
| 9 | 
            +
            #Ref: https://github.com/comfyanonymous/ComfyUI/blob/76d53c4622fc06372975ed2a43ad345935b8a551/nodes.py#L17
         | 
| 10 | 
            +
            sys.path.insert(0, str(Path(here, "src").resolve()))
         | 
| 11 | 
            +
            for pkg_name in ["custom_controlnet_aux", "custom_mmpkg"]:
         | 
| 12 | 
            +
                sys.path.append(str(Path(here, "src", pkg_name).resolve()))
         | 
| 13 | 
            +
             | 
| 14 | 
            +
            #Enable CPU fallback for ops not being supported by MPS like upsample_bicubic2d.out
         | 
| 15 | 
            +
            #https://github.com/pytorch/pytorch/issues/77764
         | 
| 16 | 
            +
            #https://github.com/Fannovel16/comfyui_controlnet_aux/issues/2#issuecomment-1763579485
         | 
| 17 | 
            +
            os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = os.getenv("PYTORCH_ENABLE_MPS_FALLBACK", '1')
         | 
| 18 | 
            +
             | 
| 19 | 
            +
             | 
| 20 | 
            +
            def load_nodes():
         | 
| 21 | 
            +
                shorted_errors = []
         | 
| 22 | 
            +
                full_error_messages = []
         | 
| 23 | 
            +
                node_class_mappings = {}
         | 
| 24 | 
            +
                node_display_name_mappings = {}
         | 
| 25 | 
            +
             | 
| 26 | 
            +
                for filename in (here / "node_wrappers").iterdir():
         | 
| 27 | 
            +
                    module_name = filename.stem
         | 
| 28 | 
            +
                    if module_name.startswith('.'): continue #Skip hidden files created by the OS (e.g. [.DS_Store](https://en.wikipedia.org/wiki/.DS_Store))
         | 
| 29 | 
            +
                    try:
         | 
| 30 | 
            +
                        module = importlib.import_module(
         | 
| 31 | 
            +
                            f".node_wrappers.{module_name}", package=__package__
         | 
| 32 | 
            +
                        )
         | 
| 33 | 
            +
                        node_class_mappings.update(getattr(module, "NODE_CLASS_MAPPINGS"))
         | 
| 34 | 
            +
                        if hasattr(module, "NODE_DISPLAY_NAME_MAPPINGS"):
         | 
| 35 | 
            +
                            node_display_name_mappings.update(getattr(module, "NODE_DISPLAY_NAME_MAPPINGS"))
         | 
| 36 | 
            +
             | 
| 37 | 
            +
                        log.debug(f"Imported {module_name} nodes")
         | 
| 38 | 
            +
             | 
| 39 | 
            +
                    except AttributeError:
         | 
| 40 | 
            +
                        pass  # wip nodes
         | 
| 41 | 
            +
                    except Exception:
         | 
| 42 | 
            +
                        error_message = traceback.format_exc()
         | 
| 43 | 
            +
                        full_error_messages.append(error_message)
         | 
| 44 | 
            +
                        error_message = error_message.splitlines()[-1]
         | 
| 45 | 
            +
                        shorted_errors.append(
         | 
| 46 | 
            +
                            f"Failed to import module {module_name} because {error_message}"
         | 
| 47 | 
            +
                        )
         | 
| 48 | 
            +
                
         | 
| 49 | 
            +
                if len(shorted_errors) > 0:
         | 
| 50 | 
            +
                    full_err_log = '\n\n'.join(full_error_messages)
         | 
| 51 | 
            +
                    print(f"\n\nFull error log from comfyui_controlnet_aux: \n{full_err_log}\n\n")
         | 
| 52 | 
            +
                    log.info(
         | 
| 53 | 
            +
                        f"Some nodes failed to load:\n\t"
         | 
| 54 | 
            +
                        + "\n\t".join(shorted_errors)
         | 
| 55 | 
            +
                        + "\n\n"
         | 
| 56 | 
            +
                        + "Check that you properly installed the dependencies.\n"
         | 
| 57 | 
            +
                        + "If you think this is a bug, please report it on the github page (https://github.com/Fannovel16/comfyui_controlnet_aux/issues)"
         | 
| 58 | 
            +
                    )
         | 
| 59 | 
            +
                return node_class_mappings, node_display_name_mappings
         | 
| 60 | 
            +
             | 
| 61 | 
            +
            AUX_NODE_MAPPINGS, AUX_DISPLAY_NAME_MAPPINGS = load_nodes()
         | 
| 62 | 
            +
             | 
| 63 | 
            +
            #For nodes not mapping image to image or has special requirements
         | 
| 64 | 
            +
            AIO_NOT_SUPPORTED = ["InpaintPreprocessor", "MeshGraphormer+ImpactDetector-DepthMapPreprocessor", "DiffusionEdge_Preprocessor"]
         | 
| 65 | 
            +
            AIO_NOT_SUPPORTED += ["SavePoseKpsAsJsonFile", "FacialPartColoringFromPoseKps", "UpperBodyTrackingFromPoseKps", "RenderPeopleKps", "RenderAnimalKps"]
         | 
| 66 | 
            +
            AIO_NOT_SUPPORTED += ["Unimatch_OptFlowPreprocessor", "MaskOptFlow"]
         | 
| 67 | 
            +
             | 
| 68 | 
            +
            def preprocessor_options():
         | 
| 69 | 
            +
                auxs = list(AUX_NODE_MAPPINGS.keys())
         | 
| 70 | 
            +
                auxs.insert(0, "none")
         | 
| 71 | 
            +
                for name in AIO_NOT_SUPPORTED:
         | 
| 72 | 
            +
                    if name in auxs:
         | 
| 73 | 
            +
                        auxs.remove(name)
         | 
| 74 | 
            +
                return auxs
         | 
| 75 | 
            +
             | 
| 76 | 
            +
             | 
| 77 | 
            +
            PREPROCESSOR_OPTIONS = preprocessor_options()
         | 
| 78 | 
            +
             | 
| 79 | 
            +
            class AIO_Preprocessor:
         | 
| 80 | 
            +
                @classmethod
         | 
| 81 | 
            +
                def INPUT_TYPES(s):
         | 
| 82 | 
            +
                    return define_preprocessor_inputs(
         | 
| 83 | 
            +
                        preprocessor=INPUT.COMBO(PREPROCESSOR_OPTIONS, default="none"),
         | 
| 84 | 
            +
                        resolution=INPUT.RESOLUTION()
         | 
| 85 | 
            +
                    )
         | 
| 86 | 
            +
             | 
| 87 | 
            +
                RETURN_TYPES = ("IMAGE",)
         | 
| 88 | 
            +
                FUNCTION = "execute"
         | 
| 89 | 
            +
             | 
| 90 | 
            +
                CATEGORY = "ControlNet Preprocessors"
         | 
| 91 | 
            +
             | 
| 92 | 
            +
                def execute(self, preprocessor, image, resolution=512):
         | 
| 93 | 
            +
                    if preprocessor == "none":
         | 
| 94 | 
            +
                        return (image, )
         | 
| 95 | 
            +
                    else:
         | 
| 96 | 
            +
                        aux_class = AUX_NODE_MAPPINGS[preprocessor]
         | 
| 97 | 
            +
                        input_types = aux_class.INPUT_TYPES()
         | 
| 98 | 
            +
                        input_types = {
         | 
| 99 | 
            +
                            **input_types["required"],
         | 
| 100 | 
            +
                            **(input_types["optional"] if "optional" in input_types else {})
         | 
| 101 | 
            +
                        }
         | 
| 102 | 
            +
                        params = {}
         | 
| 103 | 
            +
                        for name, input_type in input_types.items():
         | 
| 104 | 
            +
                            if name == "image":
         | 
| 105 | 
            +
                                params[name] = image
         | 
| 106 | 
            +
                                continue
         | 
| 107 | 
            +
             | 
| 108 | 
            +
                            if name == "resolution":
         | 
| 109 | 
            +
                                params[name] = resolution
         | 
| 110 | 
            +
                                continue
         | 
| 111 | 
            +
             | 
| 112 | 
            +
                            if len(input_type) == 2 and ("default" in input_type[1]):
         | 
| 113 | 
            +
                                params[name] = input_type[1]["default"]
         | 
| 114 | 
            +
                                continue
         | 
| 115 | 
            +
             | 
| 116 | 
            +
                            default_values = { "INT": 0, "FLOAT": 0.0 }
         | 
| 117 | 
            +
                            if input_type[0] in default_values:
         | 
| 118 | 
            +
                                params[name] = default_values[input_type[0]]
         | 
| 119 | 
            +
             | 
| 120 | 
            +
                        return getattr(aux_class(), aux_class.FUNCTION)(**params)
         | 
| 121 | 
            +
             | 
| 122 | 
            +
            class ControlNetAuxSimpleAddText:
         | 
| 123 | 
            +
                @classmethod
         | 
| 124 | 
            +
                def INPUT_TYPES(s):
         | 
| 125 | 
            +
                    return dict(
         | 
| 126 | 
            +
                        required=dict(image=INPUT.IMAGE(), text=INPUT.STRING())
         | 
| 127 | 
            +
                    )
         | 
| 128 | 
            +
                
         | 
| 129 | 
            +
                RETURN_TYPES = ("IMAGE",)
         | 
| 130 | 
            +
                FUNCTION = "execute"
         | 
| 131 | 
            +
                CATEGORY = "ControlNet Preprocessors"
         | 
| 132 | 
            +
                def execute(self, image, text):
         | 
| 133 | 
            +
                    from PIL import Image, ImageDraw, ImageFont
         | 
| 134 | 
            +
                    import numpy as np
         | 
| 135 | 
            +
                    import torch
         | 
| 136 | 
            +
             | 
| 137 | 
            +
                    font = ImageFont.truetype(str((here / "NotoSans-Regular.ttf").resolve()), 40)
         | 
| 138 | 
            +
                    img = Image.fromarray(image[0].cpu().numpy().__mul__(255.).astype(np.uint8))
         | 
| 139 | 
            +
                    ImageDraw.Draw(img).text((0,0), text, fill=(0,255,0), font=font)
         | 
| 140 | 
            +
                    return (torch.from_numpy(np.array(img)).unsqueeze(0) / 255.,)
         | 
| 141 | 
            +
             | 
| 142 | 
            +
            class ExecuteAllControlNetPreprocessors:
         | 
| 143 | 
            +
                @classmethod
         | 
| 144 | 
            +
                def INPUT_TYPES(s):
         | 
| 145 | 
            +
                    return define_preprocessor_inputs(resolution=INPUT.RESOLUTION())
         | 
| 146 | 
            +
                RETURN_TYPES = ("IMAGE",)
         | 
| 147 | 
            +
                FUNCTION = "execute"
         | 
| 148 | 
            +
             | 
| 149 | 
            +
                CATEGORY = "ControlNet Preprocessors"
         | 
| 150 | 
            +
             | 
| 151 | 
            +
                def execute(self, image, resolution=512):
         | 
| 152 | 
            +
                    try:
         | 
| 153 | 
            +
                        from comfy_execution.graph_utils import GraphBuilder
         | 
| 154 | 
            +
                    except:
         | 
| 155 | 
            +
                        raise RuntimeError("ExecuteAllControlNetPreprocessor requries [Execution Model Inversion](https://github.com/comfyanonymous/ComfyUI/commit/5cfe38). Update ComfyUI/SwarmUI to get this feature")
         | 
| 156 | 
            +
                    
         | 
| 157 | 
            +
                    graph = GraphBuilder()
         | 
| 158 | 
            +
                    curr_outputs = []
         | 
| 159 | 
            +
                    for preprocc in PREPROCESSOR_OPTIONS:
         | 
| 160 | 
            +
                        preprocc_node = graph.node("AIO_Preprocessor", preprocessor=preprocc, image=image, resolution=resolution)
         | 
| 161 | 
            +
                        hint_img = preprocc_node.out(0)
         | 
| 162 | 
            +
                        add_text_node = graph.node("ControlNetAuxSimpleAddText", image=hint_img, text=preprocc)
         | 
| 163 | 
            +
                        curr_outputs.append(add_text_node.out(0))
         | 
| 164 | 
            +
                    
         | 
| 165 | 
            +
                    while len(curr_outputs) > 1:
         | 
| 166 | 
            +
                        _outputs = []
         | 
| 167 | 
            +
                        for i in range(0, len(curr_outputs), 2):
         | 
| 168 | 
            +
                            if i+1 < len(curr_outputs):
         | 
| 169 | 
            +
                                image_batch = graph.node("ImageBatch", image1=curr_outputs[i], image2=curr_outputs[i+1])
         | 
| 170 | 
            +
                                _outputs.append(image_batch.out(0))
         | 
| 171 | 
            +
                            else:
         | 
| 172 | 
            +
                                _outputs.append(curr_outputs[i])
         | 
| 173 | 
            +
                        curr_outputs = _outputs
         | 
| 174 | 
            +
             | 
| 175 | 
            +
                    return {
         | 
| 176 | 
            +
                        "result": (curr_outputs[0],),
         | 
| 177 | 
            +
                        "expand": graph.finalize(),
         | 
| 178 | 
            +
                    }
         | 
| 179 | 
            +
             | 
| 180 | 
            +
            class ControlNetPreprocessorSelector:
         | 
| 181 | 
            +
                @classmethod
         | 
| 182 | 
            +
                def INPUT_TYPES(s):
         | 
| 183 | 
            +
                    return {
         | 
| 184 | 
            +
                        "required": {
         | 
| 185 | 
            +
                            "preprocessor": (PREPROCESSOR_OPTIONS,),
         | 
| 186 | 
            +
                        }
         | 
| 187 | 
            +
                    }
         | 
| 188 | 
            +
             | 
| 189 | 
            +
                RETURN_TYPES = (PREPROCESSOR_OPTIONS,)
         | 
| 190 | 
            +
                RETURN_NAMES = ("preprocessor",)
         | 
| 191 | 
            +
                FUNCTION = "get_preprocessor"
         | 
| 192 | 
            +
             | 
| 193 | 
            +
                CATEGORY = "ControlNet Preprocessors"
         | 
| 194 | 
            +
             | 
| 195 | 
            +
                def get_preprocessor(self, preprocessor: str):
         | 
| 196 | 
            +
                    return (preprocessor,)
         | 
| 197 | 
            +
             | 
| 198 | 
            +
             | 
| 199 | 
            +
            NODE_CLASS_MAPPINGS = {
         | 
| 200 | 
            +
                **AUX_NODE_MAPPINGS,
         | 
| 201 | 
            +
                "AIO_Preprocessor": AIO_Preprocessor,
         | 
| 202 | 
            +
                "ControlNetPreprocessorSelector": ControlNetPreprocessorSelector,
         | 
| 203 | 
            +
                **HIE_NODE_CLASS_MAPPINGS,
         | 
| 204 | 
            +
                "ExecuteAllControlNetPreprocessors": ExecuteAllControlNetPreprocessors,
         | 
| 205 | 
            +
                "ControlNetAuxSimpleAddText": ControlNetAuxSimpleAddText
         | 
| 206 | 
            +
            }
         | 
| 207 | 
            +
             | 
| 208 | 
            +
            NODE_DISPLAY_NAME_MAPPINGS = {
         | 
| 209 | 
            +
                **AUX_DISPLAY_NAME_MAPPINGS,
         | 
| 210 | 
            +
                "AIO_Preprocessor": "AIO Aux Preprocessor",
         | 
| 211 | 
            +
                "ControlNetPreprocessorSelector": "Preprocessor Selector",
         | 
| 212 | 
            +
                **HIE_NODE_DISPLAY_NAME_MAPPINGS,
         | 
| 213 | 
            +
                "ExecuteAllControlNetPreprocessors": "Execute All ControlNet Preprocessors"
         | 
| 214 | 
            +
            }
         | 
    	
        custom_nodes/comfyui_controlnet_aux/config.example.yaml
    ADDED
    
    | @@ -0,0 +1,20 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            # this is an example for config.yaml file, you can rename it to config.yaml if you want to use it
         | 
| 2 | 
            +
            # ###############################################################################################
         | 
| 3 | 
            +
            # This path is for custom pressesor models base folder. default is "./ckpts"
         | 
| 4 | 
            +
            # you can also use absolute paths like: "/root/ComfyUI/custom_nodes/comfyui_controlnet_aux/ckpts" or "D:\\ComfyUI\\custom_nodes\\comfyui_controlnet_aux\\ckpts"
         | 
| 5 | 
            +
            annotator_ckpts_path: "./ckpts"
         | 
| 6 | 
            +
            # ###############################################################################################
         | 
| 7 | 
            +
            # This path is for downloading temporary files.
         | 
| 8 | 
            +
            # You SHOULD use absolute path for this like"D:\\temp", DO NOT use relative paths. Empty for default.
         | 
| 9 | 
            +
            custom_temp_path: 
         | 
| 10 | 
            +
            # ###############################################################################################
         | 
| 11 | 
            +
            # if you already have downloaded ckpts via huggingface hub into default cache path like: ~/.cache/huggingface/hub, you can set this True to use symlinks to save space
         | 
| 12 | 
            +
            USE_SYMLINKS: False
         | 
| 13 | 
            +
            # ###############################################################################################
         | 
| 14 | 
            +
            # EP_list is a list of execution providers for onnxruntime, if one of them is not available or not working well, you can delete that provider from here(config.yaml)
         | 
| 15 | 
            +
            # you can find all available providers here: https://onnxruntime.ai/docs/execution-providers
         | 
| 16 | 
            +
            # for example, if you have CUDA installed, you can set it to: ["CUDAExecutionProvider", "CPUExecutionProvider"]
         | 
| 17 | 
            +
            # empty list or only keep ["CPUExecutionProvider"] means you use cv2.dnn.readNetFromONNX to load onnx models
         | 
| 18 | 
            +
            # if your onnx models can only run on the CPU or have other issues, we recommend using pt model instead.
         | 
| 19 | 
            +
            # default value is ["CUDAExecutionProvider", "DirectMLExecutionProvider", "OpenVINOExecutionProvider", "ROCMExecutionProvider", "CPUExecutionProvider"]
         | 
| 20 | 
            +
            EP_list: ["CUDAExecutionProvider", "DirectMLExecutionProvider", "OpenVINOExecutionProvider", "ROCMExecutionProvider", "CPUExecutionProvider"]
         | 
    	
        custom_nodes/comfyui_controlnet_aux/dev_interface.py
    ADDED
    
    | @@ -0,0 +1,6 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            from pathlib import Path
         | 
| 2 | 
            +
            from utils import here
         | 
| 3 | 
            +
            import sys
         | 
| 4 | 
            +
            sys.path.append(str(Path(here, "src")))
         | 
| 5 | 
            +
             | 
| 6 | 
            +
            from custom_controlnet_aux import *
         | 
    	
        custom_nodes/comfyui_controlnet_aux/examples/CNAuxBanner.jpg
    ADDED
    
    |   | 
| Git LFS Details
 | 
    	
        custom_nodes/comfyui_controlnet_aux/examples/ExecuteAll.png
    ADDED
    
    |   | 
| Git LFS Details
 | 
    	
        custom_nodes/comfyui_controlnet_aux/examples/ExecuteAll1.jpg
    ADDED
    
    |   | 
| Git LFS Details
 | 
    	
        custom_nodes/comfyui_controlnet_aux/examples/ExecuteAll2.jpg
    ADDED
    
    |   | 
| Git LFS Details
 | 
    	
        custom_nodes/comfyui_controlnet_aux/examples/comfyui-controlnet-aux-logo.png
    ADDED
    
    |   | 
| Git LFS Details
 | 
    	
        custom_nodes/comfyui_controlnet_aux/examples/example_animal_pose.png
    ADDED
    
    |   | 
| Git LFS Details
 | 
    	
        custom_nodes/comfyui_controlnet_aux/examples/example_anime_face_segmentor.png
    ADDED
    
    |   | 
| Git LFS Details
 | 
    	
        custom_nodes/comfyui_controlnet_aux/examples/example_anyline.png
    ADDED
    
    |   | 
| Git LFS Details
 | 
    	
        custom_nodes/comfyui_controlnet_aux/examples/example_densepose.png
    ADDED
    
    |   | 
| Git LFS Details
 | 
    	
        custom_nodes/comfyui_controlnet_aux/examples/example_depth_anything.png
    ADDED
    
    |   | 
| Git LFS Details
 | 
    	
        custom_nodes/comfyui_controlnet_aux/examples/example_depth_anything_v2.png
    ADDED
    
    |   | 
| Git LFS Details
 | 
    	
        custom_nodes/comfyui_controlnet_aux/examples/example_dsine.png
    ADDED
    
    |   | 
| Git LFS Details
 | 
    	
        custom_nodes/comfyui_controlnet_aux/examples/example_marigold.png
    ADDED
    
    |   | 
| Git LFS Details
 | 
    	
        custom_nodes/comfyui_controlnet_aux/examples/example_marigold_flat.jpg
    ADDED
    
    |   | 
| Git LFS Details
 | 
    	
        custom_nodes/comfyui_controlnet_aux/examples/example_mesh_graphormer.png
    ADDED
    
    |   | 
| Git LFS Details
 | 
    	
        custom_nodes/comfyui_controlnet_aux/examples/example_metric3d.png
    ADDED
    
    |   | 
| Git LFS Details
 | 
    	
        custom_nodes/comfyui_controlnet_aux/examples/example_onnx.png
    ADDED
    
    |   | 
    	
        custom_nodes/comfyui_controlnet_aux/examples/example_recolor.png
    ADDED
    
    |   | 
| Git LFS Details
 | 
    	
        custom_nodes/comfyui_controlnet_aux/examples/example_save_kps.png
    ADDED
    
    |   | 
| Git LFS Details
 | 
    	
        custom_nodes/comfyui_controlnet_aux/examples/example_teed.png
    ADDED
    
    |   | 
| Git LFS Details
 | 
    	
        custom_nodes/comfyui_controlnet_aux/examples/example_torchscript.png
    ADDED
    
    |   | 
| Git LFS Details
 | 
    	
        custom_nodes/comfyui_controlnet_aux/examples/example_unimatch.png
    ADDED
    
    |   | 
| Git LFS Details
 | 
    	
        custom_nodes/comfyui_controlnet_aux/hint_image_enchance.py
    ADDED
    
    | @@ -0,0 +1,233 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            from .log import log
         | 
| 2 | 
            +
            from .utils import ResizeMode, safe_numpy
         | 
| 3 | 
            +
            import numpy as np
         | 
| 4 | 
            +
            import torch
         | 
| 5 | 
            +
            import cv2
         | 
| 6 | 
            +
            from .utils import get_unique_axis0
         | 
| 7 | 
            +
            from .lvminthin import nake_nms, lvmin_thin
         | 
| 8 | 
            +
             | 
| 9 | 
            +
            MAX_IMAGEGEN_RESOLUTION = 8192 #https://github.com/comfyanonymous/ComfyUI/blob/c910b4a01ca58b04e5d4ab4c747680b996ada02b/nodes.py#L42
         | 
| 10 | 
            +
            RESIZE_MODES = [ResizeMode.RESIZE.value, ResizeMode.INNER_FIT.value, ResizeMode.OUTER_FIT.value]
         | 
| 11 | 
            +
             | 
| 12 | 
            +
            #Port from https://github.com/Mikubill/sd-webui-controlnet/blob/e67e017731aad05796b9615dc6eadce911298ea1/internal_controlnet/external_code.py#L89
         | 
| 13 | 
            +
            class PixelPerfectResolution:
         | 
| 14 | 
            +
                @classmethod
         | 
| 15 | 
            +
                def INPUT_TYPES(s):
         | 
| 16 | 
            +
                    return {
         | 
| 17 | 
            +
                        "required": {
         | 
| 18 | 
            +
                            "original_image": ("IMAGE", ),
         | 
| 19 | 
            +
                            "image_gen_width": ("INT", {"default": 512, "min": 64, "max": MAX_IMAGEGEN_RESOLUTION, "step": 8}),
         | 
| 20 | 
            +
                            "image_gen_height": ("INT", {"default": 512, "min": 64, "max": MAX_IMAGEGEN_RESOLUTION, "step": 8}),
         | 
| 21 | 
            +
                            #https://github.com/comfyanonymous/ComfyUI/blob/c910b4a01ca58b04e5d4ab4c747680b996ada02b/nodes.py#L854
         | 
| 22 | 
            +
                            "resize_mode": (RESIZE_MODES, {"default": ResizeMode.RESIZE.value})
         | 
| 23 | 
            +
                        }
         | 
| 24 | 
            +
                    }
         | 
| 25 | 
            +
                
         | 
| 26 | 
            +
                RETURN_TYPES = ("INT",)
         | 
| 27 | 
            +
                RETURN_NAMES = ("RESOLUTION (INT)", )
         | 
| 28 | 
            +
                FUNCTION = "execute"
         | 
| 29 | 
            +
             | 
| 30 | 
            +
                CATEGORY = "ControlNet Preprocessors"
         | 
| 31 | 
            +
             | 
| 32 | 
            +
                def execute(self, original_image, image_gen_width, image_gen_height, resize_mode):
         | 
| 33 | 
            +
                    _, raw_H, raw_W, _ = original_image.shape
         | 
| 34 | 
            +
             | 
| 35 | 
            +
                    k0 = float(image_gen_height) / float(raw_H)
         | 
| 36 | 
            +
                    k1 = float(image_gen_width) / float(raw_W)
         | 
| 37 | 
            +
             | 
| 38 | 
            +
                    if resize_mode == ResizeMode.OUTER_FIT.value:
         | 
| 39 | 
            +
                        estimation = min(k0, k1) * float(min(raw_H, raw_W))
         | 
| 40 | 
            +
                    else:
         | 
| 41 | 
            +
                        estimation = max(k0, k1) * float(min(raw_H, raw_W))
         | 
| 42 | 
            +
             | 
| 43 | 
            +
                    log.debug(f"Pixel Perfect Computation:")
         | 
| 44 | 
            +
                    log.debug(f"resize_mode = {resize_mode}")
         | 
| 45 | 
            +
                    log.debug(f"raw_H = {raw_H}")
         | 
| 46 | 
            +
                    log.debug(f"raw_W = {raw_W}")
         | 
| 47 | 
            +
                    log.debug(f"target_H = {image_gen_height}")
         | 
| 48 | 
            +
                    log.debug(f"target_W = {image_gen_width}")
         | 
| 49 | 
            +
                    log.debug(f"estimation = {estimation}")
         | 
| 50 | 
            +
             | 
| 51 | 
            +
                    return (int(np.round(estimation)), )
         | 
| 52 | 
            +
             | 
| 53 | 
            +
            class HintImageEnchance:
         | 
| 54 | 
            +
                @classmethod
         | 
| 55 | 
            +
                def INPUT_TYPES(s):
         | 
| 56 | 
            +
                    return {
         | 
| 57 | 
            +
                        "required": {
         | 
| 58 | 
            +
                            "hint_image": ("IMAGE", ),
         | 
| 59 | 
            +
                            "image_gen_width": ("INT", {"default": 512, "min": 64, "max": MAX_IMAGEGEN_RESOLUTION, "step": 8}),
         | 
| 60 | 
            +
                            "image_gen_height": ("INT", {"default": 512, "min": 64, "max": MAX_IMAGEGEN_RESOLUTION, "step": 8}),
         | 
| 61 | 
            +
                            #https://github.com/comfyanonymous/ComfyUI/blob/c910b4a01ca58b04e5d4ab4c747680b996ada02b/nodes.py#L854
         | 
| 62 | 
            +
                            "resize_mode": (RESIZE_MODES, {"default": ResizeMode.RESIZE.value})
         | 
| 63 | 
            +
                        }
         | 
| 64 | 
            +
                    }
         | 
| 65 | 
            +
                
         | 
| 66 | 
            +
                RETURN_TYPES = ("IMAGE",)
         | 
| 67 | 
            +
                FUNCTION = "execute"
         | 
| 68 | 
            +
             | 
| 69 | 
            +
                CATEGORY = "ControlNet Preprocessors"
         | 
| 70 | 
            +
                def execute(self, hint_image, image_gen_width, image_gen_height, resize_mode):
         | 
| 71 | 
            +
                    outs = []
         | 
| 72 | 
            +
                    for single_hint_image in hint_image:
         | 
| 73 | 
            +
                        np_hint_image = np.asarray(single_hint_image * 255., dtype=np.uint8)
         | 
| 74 | 
            +
             | 
| 75 | 
            +
                        if resize_mode == ResizeMode.RESIZE.value:
         | 
| 76 | 
            +
                            np_hint_image = self.execute_resize(np_hint_image, image_gen_width, image_gen_height)
         | 
| 77 | 
            +
                        elif resize_mode == ResizeMode.OUTER_FIT.value:
         | 
| 78 | 
            +
                            np_hint_image = self.execute_outer_fit(np_hint_image, image_gen_width, image_gen_height)
         | 
| 79 | 
            +
                        else:
         | 
| 80 | 
            +
                            np_hint_image = self.execute_inner_fit(np_hint_image, image_gen_width, image_gen_height)
         | 
| 81 | 
            +
                        
         | 
| 82 | 
            +
                        outs.append(torch.from_numpy(np_hint_image.astype(np.float32) / 255.0))
         | 
| 83 | 
            +
                    
         | 
| 84 | 
            +
                    return (torch.stack(outs, dim=0),)
         | 
| 85 | 
            +
                
         | 
| 86 | 
            +
                def execute_resize(self, detected_map, w, h):
         | 
| 87 | 
            +
                    detected_map = self.high_quality_resize(detected_map, (w, h))
         | 
| 88 | 
            +
                    detected_map = safe_numpy(detected_map)
         | 
| 89 | 
            +
                    return detected_map
         | 
| 90 | 
            +
                
         | 
| 91 | 
            +
                def execute_outer_fit(self, detected_map, w, h):
         | 
| 92 | 
            +
                    old_h, old_w, _ = detected_map.shape
         | 
| 93 | 
            +
                    old_w = float(old_w)
         | 
| 94 | 
            +
                    old_h = float(old_h)
         | 
| 95 | 
            +
                    k0 = float(h) / old_h
         | 
| 96 | 
            +
                    k1 = float(w) / old_w
         | 
| 97 | 
            +
                    safeint = lambda x: int(np.round(x))
         | 
| 98 | 
            +
                    k = min(k0, k1)
         | 
| 99 | 
            +
                    
         | 
| 100 | 
            +
                    borders = np.concatenate([detected_map[0, :, :], detected_map[-1, :, :], detected_map[:, 0, :], detected_map[:, -1, :]], axis=0)
         | 
| 101 | 
            +
                    high_quality_border_color = np.median(borders, axis=0).astype(detected_map.dtype)
         | 
| 102 | 
            +
                    if len(high_quality_border_color) == 4:
         | 
| 103 | 
            +
                        # Inpaint hijack
         | 
| 104 | 
            +
                        high_quality_border_color[3] = 255
         | 
| 105 | 
            +
                    high_quality_background = np.tile(high_quality_border_color[None, None], [h, w, 1])
         | 
| 106 | 
            +
                    detected_map = self.high_quality_resize(detected_map, (safeint(old_w * k), safeint(old_h * k)))
         | 
| 107 | 
            +
                    new_h, new_w, _ = detected_map.shape
         | 
| 108 | 
            +
                    pad_h = max(0, (h - new_h) // 2)
         | 
| 109 | 
            +
                    pad_w = max(0, (w - new_w) // 2)
         | 
| 110 | 
            +
                    high_quality_background[pad_h:pad_h + new_h, pad_w:pad_w + new_w] = detected_map
         | 
| 111 | 
            +
                    detected_map = high_quality_background
         | 
| 112 | 
            +
                    detected_map = safe_numpy(detected_map)
         | 
| 113 | 
            +
                    return detected_map
         | 
| 114 | 
            +
                
         | 
| 115 | 
            +
                def execute_inner_fit(self, detected_map, w, h):
         | 
| 116 | 
            +
                    old_h, old_w, _ = detected_map.shape
         | 
| 117 | 
            +
                    old_w = float(old_w)
         | 
| 118 | 
            +
                    old_h = float(old_h)
         | 
| 119 | 
            +
                    k0 = float(h) / old_h
         | 
| 120 | 
            +
                    k1 = float(w) / old_w
         | 
| 121 | 
            +
                    safeint = lambda x: int(np.round(x))
         | 
| 122 | 
            +
                    k = max(k0, k1)
         | 
| 123 | 
            +
             | 
| 124 | 
            +
                    detected_map = self.high_quality_resize(detected_map, (safeint(old_w * k), safeint(old_h * k)))
         | 
| 125 | 
            +
                    new_h, new_w, _ = detected_map.shape
         | 
| 126 | 
            +
                    pad_h = max(0, (new_h - h) // 2)
         | 
| 127 | 
            +
                    pad_w = max(0, (new_w - w) // 2)
         | 
| 128 | 
            +
                    detected_map = detected_map[pad_h:pad_h+h, pad_w:pad_w+w]
         | 
| 129 | 
            +
                    detected_map = safe_numpy(detected_map)
         | 
| 130 | 
            +
                    return detected_map
         | 
| 131 | 
            +
             | 
| 132 | 
            +
                def high_quality_resize(self, x, size):
         | 
| 133 | 
            +
                    # Written by lvmin
         | 
| 134 | 
            +
                    # Super high-quality control map up-scaling, considering binary, seg, and one-pixel edges
         | 
| 135 | 
            +
             | 
| 136 | 
            +
                    inpaint_mask = None
         | 
| 137 | 
            +
                    if x.ndim == 3 and x.shape[2] == 4:
         | 
| 138 | 
            +
                        inpaint_mask = x[:, :, 3]
         | 
| 139 | 
            +
                        x = x[:, :, 0:3]
         | 
| 140 | 
            +
             | 
| 141 | 
            +
                    if x.shape[0] != size[1] or x.shape[1] != size[0]:
         | 
| 142 | 
            +
                        new_size_is_smaller = (size[0] * size[1]) < (x.shape[0] * x.shape[1])
         | 
| 143 | 
            +
                        new_size_is_bigger = (size[0] * size[1]) > (x.shape[0] * x.shape[1])
         | 
| 144 | 
            +
                        unique_color_count = len(get_unique_axis0(x.reshape(-1, x.shape[2])))
         | 
| 145 | 
            +
                        is_one_pixel_edge = False
         | 
| 146 | 
            +
                        is_binary = False
         | 
| 147 | 
            +
                        if unique_color_count == 2:
         | 
| 148 | 
            +
                            is_binary = np.min(x) < 16 and np.max(x) > 240
         | 
| 149 | 
            +
                            if is_binary:
         | 
| 150 | 
            +
                                xc = x
         | 
| 151 | 
            +
                                xc = cv2.erode(xc, np.ones(shape=(3, 3), dtype=np.uint8), iterations=1)
         | 
| 152 | 
            +
                                xc = cv2.dilate(xc, np.ones(shape=(3, 3), dtype=np.uint8), iterations=1)
         | 
| 153 | 
            +
                                one_pixel_edge_count = np.where(xc < x)[0].shape[0]
         | 
| 154 | 
            +
                                all_edge_count = np.where(x > 127)[0].shape[0]
         | 
| 155 | 
            +
                                is_one_pixel_edge = one_pixel_edge_count * 2 > all_edge_count
         | 
| 156 | 
            +
             | 
| 157 | 
            +
                        if 2 < unique_color_count < 200:
         | 
| 158 | 
            +
                            interpolation = cv2.INTER_NEAREST
         | 
| 159 | 
            +
                        elif new_size_is_smaller:
         | 
| 160 | 
            +
                            interpolation = cv2.INTER_AREA
         | 
| 161 | 
            +
                        else:
         | 
| 162 | 
            +
                            interpolation = cv2.INTER_CUBIC  # Must be CUBIC because we now use nms. NEVER CHANGE THIS
         | 
| 163 | 
            +
             | 
| 164 | 
            +
                        y = cv2.resize(x, size, interpolation=interpolation)
         | 
| 165 | 
            +
                        if inpaint_mask is not None:
         | 
| 166 | 
            +
                            inpaint_mask = cv2.resize(inpaint_mask, size, interpolation=interpolation)
         | 
| 167 | 
            +
             | 
| 168 | 
            +
                        if is_binary:
         | 
| 169 | 
            +
                            y = np.mean(y.astype(np.float32), axis=2).clip(0, 255).astype(np.uint8)
         | 
| 170 | 
            +
                            if is_one_pixel_edge:
         | 
| 171 | 
            +
                                y = nake_nms(y)
         | 
| 172 | 
            +
                                _, y = cv2.threshold(y, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
         | 
| 173 | 
            +
                                y = lvmin_thin(y, prunings=new_size_is_bigger)
         | 
| 174 | 
            +
                            else:
         | 
| 175 | 
            +
                                _, y = cv2.threshold(y, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
         | 
| 176 | 
            +
                            y = np.stack([y] * 3, axis=2)
         | 
| 177 | 
            +
                    else:
         | 
| 178 | 
            +
                        y = x
         | 
| 179 | 
            +
             | 
| 180 | 
            +
                    if inpaint_mask is not None:
         | 
| 181 | 
            +
                        inpaint_mask = (inpaint_mask > 127).astype(np.float32) * 255.0
         | 
| 182 | 
            +
                        inpaint_mask = inpaint_mask[:, :, None].clip(0, 255).astype(np.uint8)
         | 
| 183 | 
            +
                        y = np.concatenate([y, inpaint_mask], axis=2)
         | 
| 184 | 
            +
             | 
| 185 | 
            +
                    return y
         | 
| 186 | 
            +
             | 
| 187 | 
            +
             | 
| 188 | 
            +
            class ImageGenResolutionFromLatent:
         | 
| 189 | 
            +
                @classmethod
         | 
| 190 | 
            +
                def INPUT_TYPES(s):
         | 
| 191 | 
            +
                    return {
         | 
| 192 | 
            +
                        "required": { "latent": ("LATENT", ) }
         | 
| 193 | 
            +
                    }
         | 
| 194 | 
            +
                
         | 
| 195 | 
            +
                RETURN_TYPES = ("INT", "INT")
         | 
| 196 | 
            +
                RETURN_NAMES = ("IMAGE_GEN_WIDTH (INT)", "IMAGE_GEN_HEIGHT (INT)")
         | 
| 197 | 
            +
                FUNCTION = "execute"
         | 
| 198 | 
            +
             | 
| 199 | 
            +
                CATEGORY = "ControlNet Preprocessors"
         | 
| 200 | 
            +
             | 
| 201 | 
            +
                def execute(self, latent):
         | 
| 202 | 
            +
                    _, _, H, W = latent["samples"].shape
         | 
| 203 | 
            +
                    return (W * 8, H * 8)
         | 
| 204 | 
            +
             | 
| 205 | 
            +
            class ImageGenResolutionFromImage:
         | 
| 206 | 
            +
                @classmethod
         | 
| 207 | 
            +
                def INPUT_TYPES(s):
         | 
| 208 | 
            +
                    return {
         | 
| 209 | 
            +
                        "required": { "image": ("IMAGE", ) }
         | 
| 210 | 
            +
                    }
         | 
| 211 | 
            +
                
         | 
| 212 | 
            +
                RETURN_TYPES = ("INT", "INT")
         | 
| 213 | 
            +
                RETURN_NAMES = ("IMAGE_GEN_WIDTH (INT)", "IMAGE_GEN_HEIGHT (INT)")
         | 
| 214 | 
            +
                FUNCTION = "execute"
         | 
| 215 | 
            +
             | 
| 216 | 
            +
                CATEGORY = "ControlNet Preprocessors"
         | 
| 217 | 
            +
             | 
| 218 | 
            +
                def execute(self, image):
         | 
| 219 | 
            +
                    _, H, W, _ = image.shape
         | 
| 220 | 
            +
                    return (W, H)
         | 
| 221 | 
            +
                
         | 
| 222 | 
            +
            NODE_CLASS_MAPPINGS = {
         | 
| 223 | 
            +
                "PixelPerfectResolution": PixelPerfectResolution,
         | 
| 224 | 
            +
                "ImageGenResolutionFromImage": ImageGenResolutionFromImage,
         | 
| 225 | 
            +
                "ImageGenResolutionFromLatent": ImageGenResolutionFromLatent,
         | 
| 226 | 
            +
                "HintImageEnchance": HintImageEnchance
         | 
| 227 | 
            +
            }
         | 
| 228 | 
            +
            NODE_DISPLAY_NAME_MAPPINGS = {
         | 
| 229 | 
            +
                "PixelPerfectResolution": "Pixel Perfect Resolution",
         | 
| 230 | 
            +
                "ImageGenResolutionFromImage": "Generation Resolution From Image",
         | 
| 231 | 
            +
                "ImageGenResolutionFromLatent": "Generation Resolution From Latent",
         | 
| 232 | 
            +
                "HintImageEnchance": "Enchance And Resize Hint Images"
         | 
| 233 | 
            +
            }
         | 
    	
        custom_nodes/comfyui_controlnet_aux/install.bat
    ADDED
    
    | @@ -0,0 +1,20 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            @echo off
         | 
| 2 | 
            +
             | 
| 3 | 
            +
            set "requirements_txt=%~dp0\requirements.txt"
         | 
| 4 | 
            +
            set "python_exec=..\..\..\python_embeded\python.exe"
         | 
| 5 | 
            +
             | 
| 6 | 
            +
            echo Installing ComfyUI's ControlNet Auxiliary Preprocessors..
         | 
| 7 | 
            +
             | 
| 8 | 
            +
            if exist "%python_exec%" (
         | 
| 9 | 
            +
                echo Installing with ComfyUI Portable
         | 
| 10 | 
            +
                for /f "delims=" %%i in (%requirements_txt%) do (
         | 
| 11 | 
            +
                    %python_exec% -s -m pip install "%%i"
         | 
| 12 | 
            +
                )
         | 
| 13 | 
            +
            ) else (
         | 
| 14 | 
            +
                echo Installing with system Python
         | 
| 15 | 
            +
                for /f "delims=" %%i in (%requirements_txt%) do (
         | 
| 16 | 
            +
                    pip install "%%i"
         | 
| 17 | 
            +
                )
         | 
| 18 | 
            +
            )
         | 
| 19 | 
            +
             | 
| 20 | 
            +
            pause
         | 
    	
        custom_nodes/comfyui_controlnet_aux/log.py
    ADDED
    
    | @@ -0,0 +1,80 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            #Cre: https://github.com/melMass/comfy_mtb/blob/main/log.py
         | 
| 2 | 
            +
            import logging
         | 
| 3 | 
            +
            import re
         | 
| 4 | 
            +
            import os
         | 
| 5 | 
            +
             | 
| 6 | 
            +
            base_log_level = logging.INFO
         | 
| 7 | 
            +
             | 
| 8 | 
            +
             | 
| 9 | 
            +
            # Custom object that discards the output
         | 
| 10 | 
            +
            class NullWriter:
         | 
| 11 | 
            +
                def write(self, text):
         | 
| 12 | 
            +
                    pass
         | 
| 13 | 
            +
             | 
| 14 | 
            +
             | 
| 15 | 
            +
            class Formatter(logging.Formatter):
         | 
| 16 | 
            +
                grey = "\x1b[38;20m"
         | 
| 17 | 
            +
                cyan = "\x1b[36;20m"
         | 
| 18 | 
            +
                purple = "\x1b[35;20m"
         | 
| 19 | 
            +
                yellow = "\x1b[33;20m"
         | 
| 20 | 
            +
                red = "\x1b[31;20m"
         | 
| 21 | 
            +
                bold_red = "\x1b[31;1m"
         | 
| 22 | 
            +
                reset = "\x1b[0m"
         | 
| 23 | 
            +
                # format = "%(asctime)s - [%(name)s] - %(levelname)s - %(message)s (%(filename)s:%(lineno)d)"
         | 
| 24 | 
            +
                format = "[%(name)s] | %(levelname)s -> %(message)s"
         | 
| 25 | 
            +
             | 
| 26 | 
            +
                FORMATS = {
         | 
| 27 | 
            +
                    logging.DEBUG: purple + format + reset,
         | 
| 28 | 
            +
                    logging.INFO: cyan + format + reset,
         | 
| 29 | 
            +
                    logging.WARNING: yellow + format + reset,
         | 
| 30 | 
            +
                    logging.ERROR: red + format + reset,
         | 
| 31 | 
            +
                    logging.CRITICAL: bold_red + format + reset,
         | 
| 32 | 
            +
                }
         | 
| 33 | 
            +
             | 
| 34 | 
            +
                def format(self, record):
         | 
| 35 | 
            +
                    log_fmt = self.FORMATS.get(record.levelno)
         | 
| 36 | 
            +
                    formatter = logging.Formatter(log_fmt)
         | 
| 37 | 
            +
                    return formatter.format(record)
         | 
| 38 | 
            +
             | 
| 39 | 
            +
             | 
| 40 | 
            +
            def mklog(name, level=base_log_level):
         | 
| 41 | 
            +
                logger = logging.getLogger(name)
         | 
| 42 | 
            +
                logger.setLevel(level)
         | 
| 43 | 
            +
             | 
| 44 | 
            +
                for handler in logger.handlers:
         | 
| 45 | 
            +
                    logger.removeHandler(handler)
         | 
| 46 | 
            +
             | 
| 47 | 
            +
                ch = logging.StreamHandler()
         | 
| 48 | 
            +
                ch.setLevel(level)
         | 
| 49 | 
            +
                ch.setFormatter(Formatter())
         | 
| 50 | 
            +
                logger.addHandler(ch)
         | 
| 51 | 
            +
             | 
| 52 | 
            +
                # Disable log propagation
         | 
| 53 | 
            +
                logger.propagate = False
         | 
| 54 | 
            +
             | 
| 55 | 
            +
                return logger
         | 
| 56 | 
            +
             | 
| 57 | 
            +
             | 
| 58 | 
            +
            # - The main app logger
         | 
| 59 | 
            +
            log = mklog(__package__, base_log_level)
         | 
| 60 | 
            +
             | 
| 61 | 
            +
             | 
| 62 | 
            +
            def log_user(arg):
         | 
| 63 | 
            +
                print("\033[34mComfyUI ControlNet AUX:\033[0m {arg}")
         | 
| 64 | 
            +
             | 
| 65 | 
            +
             | 
| 66 | 
            +
            def get_summary(docstring):
         | 
| 67 | 
            +
                return docstring.strip().split("\n\n", 1)[0]
         | 
| 68 | 
            +
             | 
| 69 | 
            +
             | 
| 70 | 
            +
            def blue_text(text):
         | 
| 71 | 
            +
                return f"\033[94m{text}\033[0m"
         | 
| 72 | 
            +
             | 
| 73 | 
            +
             | 
| 74 | 
            +
            def cyan_text(text):
         | 
| 75 | 
            +
                return f"\033[96m{text}\033[0m"
         | 
| 76 | 
            +
             | 
| 77 | 
            +
             | 
| 78 | 
            +
            def get_label(label):
         | 
| 79 | 
            +
                words = re.findall(r"(?:^|[A-Z])[a-z]*", label)
         | 
| 80 | 
            +
                return " ".join(words).strip()
         | 
    	
        custom_nodes/comfyui_controlnet_aux/lvminthin.py
    ADDED
    
    | @@ -0,0 +1,87 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            # High Quality Edge Thinning using Pure Python
         | 
| 2 | 
            +
            # Written by Lvmin Zhang
         | 
| 3 | 
            +
            # 2023 April
         | 
| 4 | 
            +
            # Stanford University
         | 
| 5 | 
            +
            # If you use this, please Cite "High Quality Edge Thinning using Pure Python", Lvmin Zhang, In Mikubill/sd-webui-controlnet.
         | 
| 6 | 
            +
             | 
| 7 | 
            +
             | 
| 8 | 
            +
            import cv2
         | 
| 9 | 
            +
            import numpy as np
         | 
| 10 | 
            +
             | 
| 11 | 
            +
             | 
| 12 | 
            +
            lvmin_kernels_raw = [
         | 
| 13 | 
            +
                np.array([
         | 
| 14 | 
            +
                    [-1, -1, -1],
         | 
| 15 | 
            +
                    [0, 1, 0],
         | 
| 16 | 
            +
                    [1, 1, 1]
         | 
| 17 | 
            +
                ], dtype=np.int32),
         | 
| 18 | 
            +
                np.array([
         | 
| 19 | 
            +
                    [0, -1, -1],
         | 
| 20 | 
            +
                    [1, 1, -1],
         | 
| 21 | 
            +
                    [0, 1, 0]
         | 
| 22 | 
            +
                ], dtype=np.int32)
         | 
| 23 | 
            +
            ]
         | 
| 24 | 
            +
             | 
| 25 | 
            +
            lvmin_kernels = []
         | 
| 26 | 
            +
            lvmin_kernels += [np.rot90(x, k=0, axes=(0, 1)) for x in lvmin_kernels_raw]
         | 
| 27 | 
            +
            lvmin_kernels += [np.rot90(x, k=1, axes=(0, 1)) for x in lvmin_kernels_raw]
         | 
| 28 | 
            +
            lvmin_kernels += [np.rot90(x, k=2, axes=(0, 1)) for x in lvmin_kernels_raw]
         | 
| 29 | 
            +
            lvmin_kernels += [np.rot90(x, k=3, axes=(0, 1)) for x in lvmin_kernels_raw]
         | 
| 30 | 
            +
             | 
| 31 | 
            +
            lvmin_prunings_raw = [
         | 
| 32 | 
            +
                np.array([
         | 
| 33 | 
            +
                    [-1, -1, -1],
         | 
| 34 | 
            +
                    [-1, 1, -1],
         | 
| 35 | 
            +
                    [0, 0, -1]
         | 
| 36 | 
            +
                ], dtype=np.int32),
         | 
| 37 | 
            +
                np.array([
         | 
| 38 | 
            +
                    [-1, -1, -1],
         | 
| 39 | 
            +
                    [-1, 1, -1],
         | 
| 40 | 
            +
                    [-1, 0, 0]
         | 
| 41 | 
            +
                ], dtype=np.int32)
         | 
| 42 | 
            +
            ]
         | 
| 43 | 
            +
             | 
| 44 | 
            +
            lvmin_prunings = []
         | 
| 45 | 
            +
            lvmin_prunings += [np.rot90(x, k=0, axes=(0, 1)) for x in lvmin_prunings_raw]
         | 
| 46 | 
            +
            lvmin_prunings += [np.rot90(x, k=1, axes=(0, 1)) for x in lvmin_prunings_raw]
         | 
| 47 | 
            +
            lvmin_prunings += [np.rot90(x, k=2, axes=(0, 1)) for x in lvmin_prunings_raw]
         | 
| 48 | 
            +
            lvmin_prunings += [np.rot90(x, k=3, axes=(0, 1)) for x in lvmin_prunings_raw]
         | 
| 49 | 
            +
             | 
| 50 | 
            +
             | 
| 51 | 
            +
            def remove_pattern(x, kernel):
         | 
| 52 | 
            +
                objects = cv2.morphologyEx(x, cv2.MORPH_HITMISS, kernel)
         | 
| 53 | 
            +
                objects = np.where(objects > 127)
         | 
| 54 | 
            +
                x[objects] = 0
         | 
| 55 | 
            +
                return x, objects[0].shape[0] > 0
         | 
| 56 | 
            +
             | 
| 57 | 
            +
             | 
| 58 | 
            +
            def thin_one_time(x, kernels):
         | 
| 59 | 
            +
                y = x
         | 
| 60 | 
            +
                is_done = True
         | 
| 61 | 
            +
                for k in kernels:
         | 
| 62 | 
            +
                    y, has_update = remove_pattern(y, k)
         | 
| 63 | 
            +
                    if has_update:
         | 
| 64 | 
            +
                        is_done = False
         | 
| 65 | 
            +
                return y, is_done
         | 
| 66 | 
            +
             | 
| 67 | 
            +
             | 
| 68 | 
            +
            def lvmin_thin(x, prunings=True):
         | 
| 69 | 
            +
                y = x
         | 
| 70 | 
            +
                for i in range(32):
         | 
| 71 | 
            +
                    y, is_done = thin_one_time(y, lvmin_kernels)
         | 
| 72 | 
            +
                    if is_done:
         | 
| 73 | 
            +
                        break
         | 
| 74 | 
            +
                if prunings:
         | 
| 75 | 
            +
                    y, _ = thin_one_time(y, lvmin_prunings)
         | 
| 76 | 
            +
                return y
         | 
| 77 | 
            +
             | 
| 78 | 
            +
             | 
| 79 | 
            +
            def nake_nms(x):
         | 
| 80 | 
            +
                f1 = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]], dtype=np.uint8)
         | 
| 81 | 
            +
                f2 = np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], dtype=np.uint8)
         | 
| 82 | 
            +
                f3 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.uint8)
         | 
| 83 | 
            +
                f4 = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=np.uint8)
         | 
| 84 | 
            +
                y = np.zeros_like(x)
         | 
| 85 | 
            +
                for f in [f1, f2, f3, f4]:
         | 
| 86 | 
            +
                    np.putmask(y, cv2.dilate(x, kernel=f) == x, x)
         | 
| 87 | 
            +
                return y
         | 
    	
        custom_nodes/comfyui_controlnet_aux/node_wrappers/anime_face_segment.py
    ADDED
    
    | @@ -0,0 +1,43 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            from ..utils import common_annotator_call, define_preprocessor_inputs, INPUT
         | 
| 2 | 
            +
            import comfy.model_management as model_management
         | 
| 3 | 
            +
            import torch
         | 
| 4 | 
            +
            from einops import rearrange
         | 
| 5 | 
            +
             | 
| 6 | 
            +
            class AnimeFace_SemSegPreprocessor:
         | 
| 7 | 
            +
                @classmethod
         | 
| 8 | 
            +
                def INPUT_TYPES(s):
         | 
| 9 | 
            +
                    #This preprocessor is only trained on 512x resolution
         | 
| 10 | 
            +
                    #https://github.com/siyeong0/Anime-Face-Segmentation/blob/main/predict.py#L25
         | 
| 11 | 
            +
                    return define_preprocessor_inputs(
         | 
| 12 | 
            +
                        remove_background_using_abg=INPUT.BOOLEAN(True),
         | 
| 13 | 
            +
                        resolution=INPUT.RESOLUTION(default=512, min=512, max=512)
         | 
| 14 | 
            +
                    )
         | 
| 15 | 
            +
             | 
| 16 | 
            +
                RETURN_TYPES = ("IMAGE", "MASK")
         | 
| 17 | 
            +
                RETURN_NAMES = ("IMAGE", "ABG_CHARACTER_MASK (MASK)")
         | 
| 18 | 
            +
                FUNCTION = "execute"
         | 
| 19 | 
            +
             | 
| 20 | 
            +
                CATEGORY = "ControlNet Preprocessors/Semantic Segmentation"
         | 
| 21 | 
            +
             | 
| 22 | 
            +
                def execute(self, image, remove_background_using_abg=True, resolution=512, **kwargs):
         | 
| 23 | 
            +
                    from custom_controlnet_aux.anime_face_segment import AnimeFaceSegmentor
         | 
| 24 | 
            +
             | 
| 25 | 
            +
                    model = AnimeFaceSegmentor.from_pretrained().to(model_management.get_torch_device())
         | 
| 26 | 
            +
                    if remove_background_using_abg:
         | 
| 27 | 
            +
                        out_image_with_mask = common_annotator_call(model, image, resolution=resolution, remove_background=True)
         | 
| 28 | 
            +
                        out_image = out_image_with_mask[..., :3]
         | 
| 29 | 
            +
                        mask = out_image_with_mask[..., 3:]
         | 
| 30 | 
            +
                        mask = rearrange(mask, "n h w c -> n c h w")
         | 
| 31 | 
            +
                    else:
         | 
| 32 | 
            +
                        out_image = common_annotator_call(model, image, resolution=resolution, remove_background=False)
         | 
| 33 | 
            +
                        N, H, W, C = out_image.shape
         | 
| 34 | 
            +
                        mask = torch.ones(N, C, H, W)
         | 
| 35 | 
            +
                    del model
         | 
| 36 | 
            +
                    return (out_image, mask)
         | 
| 37 | 
            +
             | 
| 38 | 
            +
            NODE_CLASS_MAPPINGS = {
         | 
| 39 | 
            +
                "AnimeFace_SemSegPreprocessor": AnimeFace_SemSegPreprocessor
         | 
| 40 | 
            +
            }
         | 
| 41 | 
            +
            NODE_DISPLAY_NAME_MAPPINGS = {
         | 
| 42 | 
            +
                "AnimeFace_SemSegPreprocessor": "Anime Face Segmentor"
         | 
| 43 | 
            +
            }
         | 
    	
        custom_nodes/comfyui_controlnet_aux/node_wrappers/anyline.py
    ADDED
    
    | @@ -0,0 +1,87 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import torch
         | 
| 2 | 
            +
            import numpy as np
         | 
| 3 | 
            +
            import comfy.model_management as model_management
         | 
| 4 | 
            +
            import comfy.utils
         | 
| 5 | 
            +
             | 
| 6 | 
            +
            # Requires comfyui_controlnet_aux funcsions and classes
         | 
| 7 | 
            +
            from ..utils import common_annotator_call, INPUT, define_preprocessor_inputs
         | 
| 8 | 
            +
             | 
| 9 | 
            +
            def get_intensity_mask(image_array, lower_bound, upper_bound):
         | 
| 10 | 
            +
                mask = image_array[:, :, 0]
         | 
| 11 | 
            +
                mask = np.where((mask >= lower_bound) & (mask <= upper_bound), mask, 0)
         | 
| 12 | 
            +
                mask = np.expand_dims(mask, 2).repeat(3, axis=2)
         | 
| 13 | 
            +
                return mask
         | 
| 14 | 
            +
             | 
| 15 | 
            +
            def combine_layers(base_layer, top_layer):
         | 
| 16 | 
            +
                mask = top_layer.astype(bool)
         | 
| 17 | 
            +
                temp = 1 - (1 - top_layer) * (1 - base_layer)
         | 
| 18 | 
            +
                result = base_layer * (~mask) + temp * mask
         | 
| 19 | 
            +
                return result
         | 
| 20 | 
            +
             | 
| 21 | 
            +
            class AnyLinePreprocessor:
         | 
| 22 | 
            +
                @classmethod
         | 
| 23 | 
            +
                def INPUT_TYPES(s):
         | 
| 24 | 
            +
                    return define_preprocessor_inputs(
         | 
| 25 | 
            +
                        merge_with_lineart=INPUT.COMBO(["lineart_standard", "lineart_realisitic", "lineart_anime", "manga_line"], default="lineart_standard"),
         | 
| 26 | 
            +
                        resolution=INPUT.RESOLUTION(default=1280, step=8),
         | 
| 27 | 
            +
                        lineart_lower_bound=INPUT.FLOAT(default=0),
         | 
| 28 | 
            +
                        lineart_upper_bound=INPUT.FLOAT(default=1),
         | 
| 29 | 
            +
                        object_min_size=INPUT.INT(default=36, min=1),
         | 
| 30 | 
            +
                        object_connectivity=INPUT.INT(default=1, min=1)
         | 
| 31 | 
            +
                    )
         | 
| 32 | 
            +
             | 
| 33 | 
            +
                RETURN_TYPES = ("IMAGE",)
         | 
| 34 | 
            +
                RETURN_NAMES = ("image",)
         | 
| 35 | 
            +
             | 
| 36 | 
            +
                FUNCTION = "get_anyline"
         | 
| 37 | 
            +
                CATEGORY = "ControlNet Preprocessors/Line Extractors"
         | 
| 38 | 
            +
             | 
| 39 | 
            +
                def __init__(self):
         | 
| 40 | 
            +
                    self.device = model_management.get_torch_device()
         | 
| 41 | 
            +
             | 
| 42 | 
            +
                def get_anyline(self, image, merge_with_lineart="lineart_standard", resolution=512, lineart_lower_bound=0, lineart_upper_bound=1, object_min_size=36, object_connectivity=1):
         | 
| 43 | 
            +
                    from custom_controlnet_aux.teed import TEDDetector
         | 
| 44 | 
            +
                    from skimage import morphology
         | 
| 45 | 
            +
                    pbar = comfy.utils.ProgressBar(3)
         | 
| 46 | 
            +
             | 
| 47 | 
            +
                    # Process the image with MTEED model
         | 
| 48 | 
            +
                    mteed_model = TEDDetector.from_pretrained("TheMistoAI/MistoLine", "MTEED.pth", subfolder="Anyline").to(self.device)
         | 
| 49 | 
            +
                    mteed_result = common_annotator_call(mteed_model, image, resolution=resolution, show_pbar=False)
         | 
| 50 | 
            +
                    mteed_result = mteed_result.numpy()
         | 
| 51 | 
            +
                    del mteed_model
         | 
| 52 | 
            +
                    pbar.update(1)
         | 
| 53 | 
            +
             | 
| 54 | 
            +
                    # Process the image with the lineart standard preprocessor
         | 
| 55 | 
            +
                    if merge_with_lineart == "lineart_standard":
         | 
| 56 | 
            +
                        from custom_controlnet_aux.lineart_standard import LineartStandardDetector
         | 
| 57 | 
            +
                        lineart_standard_detector = LineartStandardDetector()
         | 
| 58 | 
            +
                        lineart_result = common_annotator_call(lineart_standard_detector, image, guassian_sigma=2, intensity_threshold=3, resolution=resolution, show_pbar=False).numpy()
         | 
| 59 | 
            +
                        del lineart_standard_detector
         | 
| 60 | 
            +
                    else:
         | 
| 61 | 
            +
                        from custom_controlnet_aux.lineart import LineartDetector
         | 
| 62 | 
            +
                        from custom_controlnet_aux.lineart_anime import LineartAnimeDetector
         | 
| 63 | 
            +
                        from custom_controlnet_aux.manga_line import LineartMangaDetector
         | 
| 64 | 
            +
                        lineart_detector = dict(lineart_realisitic=LineartDetector, lineart_anime=LineartAnimeDetector, manga_line=LineartMangaDetector)[merge_with_lineart]
         | 
| 65 | 
            +
                        lineart_detector = lineart_detector.from_pretrained().to(self.device)
         | 
| 66 | 
            +
                        lineart_result = common_annotator_call(lineart_detector, image, resolution=resolution, show_pbar=False).numpy()
         | 
| 67 | 
            +
                        del lineart_detector
         | 
| 68 | 
            +
                    pbar.update(1)
         | 
| 69 | 
            +
                    
         | 
| 70 | 
            +
                    final_result = []
         | 
| 71 | 
            +
                    for i in range(len(image)):
         | 
| 72 | 
            +
                        _lineart_result  = get_intensity_mask(lineart_result[i], lower_bound=lineart_lower_bound, upper_bound=lineart_upper_bound)
         | 
| 73 | 
            +
                        _cleaned = morphology.remove_small_objects(_lineart_result.astype(bool), min_size=object_min_size, connectivity=object_connectivity)
         | 
| 74 | 
            +
                        _lineart_result = _lineart_result * _cleaned
         | 
| 75 | 
            +
                        _mteed_result = mteed_result[i]
         | 
| 76 | 
            +
             | 
| 77 | 
            +
                        # Combine the results
         | 
| 78 | 
            +
                        final_result.append(torch.from_numpy(combine_layers(_mteed_result, _lineart_result)))
         | 
| 79 | 
            +
                    pbar.update(1)
         | 
| 80 | 
            +
                    return (torch.stack(final_result),)
         | 
| 81 | 
            +
             | 
| 82 | 
            +
            NODE_CLASS_MAPPINGS = {
         | 
| 83 | 
            +
                "AnyLineArtPreprocessor_aux": AnyLinePreprocessor
         | 
| 84 | 
            +
            }
         | 
| 85 | 
            +
            NODE_DISPLAY_NAME_MAPPINGS = {
         | 
| 86 | 
            +
                "AnyLineArtPreprocessor_aux": "AnyLine Lineart"
         | 
| 87 | 
            +
            }
         | 
    	
        custom_nodes/comfyui_controlnet_aux/node_wrappers/binary.py
    ADDED
    
    | @@ -0,0 +1,29 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            from ..utils import common_annotator_call, INPUT, define_preprocessor_inputs
         | 
| 2 | 
            +
            import comfy.model_management as model_management
         | 
| 3 | 
            +
             | 
| 4 | 
            +
            class Binary_Preprocessor:
         | 
| 5 | 
            +
                @classmethod
         | 
| 6 | 
            +
                def INPUT_TYPES(s):
         | 
| 7 | 
            +
                    return define_preprocessor_inputs(
         | 
| 8 | 
            +
                        bin_threshold=INPUT.INT(default=100, max=255),
         | 
| 9 | 
            +
                        resolution=INPUT.RESOLUTION()
         | 
| 10 | 
            +
                    )
         | 
| 11 | 
            +
             | 
| 12 | 
            +
                RETURN_TYPES = ("IMAGE",)
         | 
| 13 | 
            +
                FUNCTION = "execute"
         | 
| 14 | 
            +
             | 
| 15 | 
            +
                CATEGORY = "ControlNet Preprocessors/Line Extractors"
         | 
| 16 | 
            +
             | 
| 17 | 
            +
                def execute(self, image, bin_threshold=100, resolution=512, **kwargs):
         | 
| 18 | 
            +
                    from custom_controlnet_aux.binary import BinaryDetector
         | 
| 19 | 
            +
             | 
| 20 | 
            +
                    return (common_annotator_call(BinaryDetector(), image, bin_threshold=bin_threshold, resolution=resolution), )
         | 
| 21 | 
            +
             | 
| 22 | 
            +
             | 
| 23 | 
            +
             | 
| 24 | 
            +
            NODE_CLASS_MAPPINGS = {
         | 
| 25 | 
            +
                "BinaryPreprocessor": Binary_Preprocessor
         | 
| 26 | 
            +
            }
         | 
| 27 | 
            +
            NODE_DISPLAY_NAME_MAPPINGS = {
         | 
| 28 | 
            +
                "BinaryPreprocessor": "Binary Lines"
         | 
| 29 | 
            +
            }
         | 
    	
        custom_nodes/comfyui_controlnet_aux/node_wrappers/canny.py
    ADDED
    
    | @@ -0,0 +1,30 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            from ..utils import common_annotator_call, INPUT, define_preprocessor_inputs
         | 
| 2 | 
            +
            import comfy.model_management as model_management
         | 
| 3 | 
            +
             | 
| 4 | 
            +
            class Canny_Edge_Preprocessor:
         | 
| 5 | 
            +
                @classmethod
         | 
| 6 | 
            +
                def INPUT_TYPES(s):
         | 
| 7 | 
            +
                    return define_preprocessor_inputs(
         | 
| 8 | 
            +
                        low_threshold=INPUT.INT(default=100, max=255),
         | 
| 9 | 
            +
                        high_threshold=INPUT.INT(default=200, max=255),
         | 
| 10 | 
            +
                        resolution=INPUT.RESOLUTION()
         | 
| 11 | 
            +
                    )
         | 
| 12 | 
            +
             | 
| 13 | 
            +
                RETURN_TYPES = ("IMAGE",)
         | 
| 14 | 
            +
                FUNCTION = "execute"
         | 
| 15 | 
            +
             | 
| 16 | 
            +
                CATEGORY = "ControlNet Preprocessors/Line Extractors"
         | 
| 17 | 
            +
             | 
| 18 | 
            +
                def execute(self, image, low_threshold=100, high_threshold=200, resolution=512, **kwargs):
         | 
| 19 | 
            +
                    from custom_controlnet_aux.canny import CannyDetector
         | 
| 20 | 
            +
             | 
| 21 | 
            +
                    return (common_annotator_call(CannyDetector(), image, low_threshold=low_threshold, high_threshold=high_threshold, resolution=resolution), )
         | 
| 22 | 
            +
             | 
| 23 | 
            +
             | 
| 24 | 
            +
             | 
| 25 | 
            +
            NODE_CLASS_MAPPINGS = {
         | 
| 26 | 
            +
                "CannyEdgePreprocessor": Canny_Edge_Preprocessor
         | 
| 27 | 
            +
            }
         | 
| 28 | 
            +
            NODE_DISPLAY_NAME_MAPPINGS = {
         | 
| 29 | 
            +
                "CannyEdgePreprocessor": "Canny Edge"
         | 
| 30 | 
            +
            }
         | 
    	
        custom_nodes/comfyui_controlnet_aux/node_wrappers/color.py
    ADDED
    
    | @@ -0,0 +1,26 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            from ..utils import common_annotator_call, INPUT, define_preprocessor_inputs
         | 
| 2 | 
            +
            import comfy.model_management as model_management
         | 
| 3 | 
            +
             | 
| 4 | 
            +
            class Color_Preprocessor:
         | 
| 5 | 
            +
                @classmethod
         | 
| 6 | 
            +
                def INPUT_TYPES(s):
         | 
| 7 | 
            +
                    return define_preprocessor_inputs(resolution=INPUT.RESOLUTION())
         | 
| 8 | 
            +
             | 
| 9 | 
            +
                RETURN_TYPES = ("IMAGE",)
         | 
| 10 | 
            +
                FUNCTION = "execute"
         | 
| 11 | 
            +
             | 
| 12 | 
            +
                CATEGORY = "ControlNet Preprocessors/T2IAdapter-only"
         | 
| 13 | 
            +
             | 
| 14 | 
            +
                def execute(self, image, resolution=512, **kwargs):
         | 
| 15 | 
            +
                    from custom_controlnet_aux.color import ColorDetector
         | 
| 16 | 
            +
             | 
| 17 | 
            +
                    return (common_annotator_call(ColorDetector(), image, resolution=resolution), )
         | 
| 18 | 
            +
             | 
| 19 | 
            +
             | 
| 20 | 
            +
             | 
| 21 | 
            +
            NODE_CLASS_MAPPINGS = {
         | 
| 22 | 
            +
                "ColorPreprocessor": Color_Preprocessor
         | 
| 23 | 
            +
            }
         | 
| 24 | 
            +
            NODE_DISPLAY_NAME_MAPPINGS = {
         | 
| 25 | 
            +
                "ColorPreprocessor": "Color Pallete"
         | 
| 26 | 
            +
            }
         | 
    	
        custom_nodes/comfyui_controlnet_aux/node_wrappers/densepose.py
    ADDED
    
    | @@ -0,0 +1,31 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            from ..utils import common_annotator_call, INPUT, define_preprocessor_inputs
         | 
| 2 | 
            +
            import comfy.model_management as model_management
         | 
| 3 | 
            +
             | 
| 4 | 
            +
            class DensePose_Preprocessor:
         | 
| 5 | 
            +
                @classmethod
         | 
| 6 | 
            +
                def INPUT_TYPES(s):
         | 
| 7 | 
            +
                    return define_preprocessor_inputs(
         | 
| 8 | 
            +
                        model=INPUT.COMBO(["densepose_r50_fpn_dl.torchscript", "densepose_r101_fpn_dl.torchscript"]),
         | 
| 9 | 
            +
                        cmap=INPUT.COMBO(["Viridis (MagicAnimate)", "Parula (CivitAI)"]),
         | 
| 10 | 
            +
                        resolution=INPUT.RESOLUTION()
         | 
| 11 | 
            +
                    )
         | 
| 12 | 
            +
             | 
| 13 | 
            +
                RETURN_TYPES = ("IMAGE",)
         | 
| 14 | 
            +
                FUNCTION = "execute"
         | 
| 15 | 
            +
             | 
| 16 | 
            +
                CATEGORY = "ControlNet Preprocessors/Faces and Poses Estimators"
         | 
| 17 | 
            +
             | 
| 18 | 
            +
                def execute(self, image, model="densepose_r50_fpn_dl.torchscript", cmap="Viridis (MagicAnimate)", resolution=512):
         | 
| 19 | 
            +
                    from custom_controlnet_aux.densepose import DenseposeDetector
         | 
| 20 | 
            +
                    model = DenseposeDetector \
         | 
| 21 | 
            +
                                .from_pretrained(filename=model) \
         | 
| 22 | 
            +
                                .to(model_management.get_torch_device())
         | 
| 23 | 
            +
                    return (common_annotator_call(model, image, cmap="viridis" if "Viridis" in cmap else "parula", resolution=resolution), )
         | 
| 24 | 
            +
             | 
| 25 | 
            +
             | 
| 26 | 
            +
            NODE_CLASS_MAPPINGS = {
         | 
| 27 | 
            +
                "DensePosePreprocessor": DensePose_Preprocessor
         | 
| 28 | 
            +
            }
         | 
| 29 | 
            +
            NODE_DISPLAY_NAME_MAPPINGS = {
         | 
| 30 | 
            +
                "DensePosePreprocessor": "DensePose Estimator"
         | 
| 31 | 
            +
            }
         | 
    	
        custom_nodes/comfyui_controlnet_aux/node_wrappers/depth_anything.py
    ADDED
    
    | @@ -0,0 +1,55 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            from ..utils import common_annotator_call, define_preprocessor_inputs, INPUT
         | 
| 2 | 
            +
            import comfy.model_management as model_management
         | 
| 3 | 
            +
             | 
| 4 | 
            +
            class Depth_Anything_Preprocessor:
         | 
| 5 | 
            +
                @classmethod
         | 
| 6 | 
            +
                def INPUT_TYPES(s):
         | 
| 7 | 
            +
                    return define_preprocessor_inputs(
         | 
| 8 | 
            +
                        ckpt_name=INPUT.COMBO(
         | 
| 9 | 
            +
                            ["depth_anything_vitl14.pth", "depth_anything_vitb14.pth", "depth_anything_vits14.pth"]
         | 
| 10 | 
            +
                        ),
         | 
| 11 | 
            +
                        resolution=INPUT.RESOLUTION()
         | 
| 12 | 
            +
                    )
         | 
| 13 | 
            +
             | 
| 14 | 
            +
                RETURN_TYPES = ("IMAGE",)
         | 
| 15 | 
            +
                FUNCTION = "execute"
         | 
| 16 | 
            +
             | 
| 17 | 
            +
                CATEGORY = "ControlNet Preprocessors/Normal and Depth Estimators"
         | 
| 18 | 
            +
             | 
| 19 | 
            +
                def execute(self, image, ckpt_name="depth_anything_vitl14.pth", resolution=512, **kwargs):
         | 
| 20 | 
            +
                    from custom_controlnet_aux.depth_anything import DepthAnythingDetector
         | 
| 21 | 
            +
             | 
| 22 | 
            +
                    model = DepthAnythingDetector.from_pretrained(filename=ckpt_name).to(model_management.get_torch_device())
         | 
| 23 | 
            +
                    out = common_annotator_call(model, image, resolution=resolution)
         | 
| 24 | 
            +
                    del model
         | 
| 25 | 
            +
                    return (out, )
         | 
| 26 | 
            +
             | 
| 27 | 
            +
            class Zoe_Depth_Anything_Preprocessor:
         | 
| 28 | 
            +
                @classmethod
         | 
| 29 | 
            +
                def INPUT_TYPES(s):
         | 
| 30 | 
            +
                    return define_preprocessor_inputs(
         | 
| 31 | 
            +
                        environment=INPUT.COMBO(["indoor", "outdoor"]),
         | 
| 32 | 
            +
                        resolution=INPUT.RESOLUTION()
         | 
| 33 | 
            +
                    )
         | 
| 34 | 
            +
             | 
| 35 | 
            +
                RETURN_TYPES = ("IMAGE",)
         | 
| 36 | 
            +
                FUNCTION = "execute"
         | 
| 37 | 
            +
             | 
| 38 | 
            +
                CATEGORY = "ControlNet Preprocessors/Normal and Depth Estimators"
         | 
| 39 | 
            +
             | 
| 40 | 
            +
                def execute(self, image, environment="indoor", resolution=512, **kwargs):
         | 
| 41 | 
            +
                    from custom_controlnet_aux.zoe import ZoeDepthAnythingDetector
         | 
| 42 | 
            +
                    ckpt_name = "depth_anything_metric_depth_indoor.pt" if environment == "indoor" else "depth_anything_metric_depth_outdoor.pt"
         | 
| 43 | 
            +
                    model = ZoeDepthAnythingDetector.from_pretrained(filename=ckpt_name).to(model_management.get_torch_device())
         | 
| 44 | 
            +
                    out = common_annotator_call(model, image, resolution=resolution)
         | 
| 45 | 
            +
                    del model
         | 
| 46 | 
            +
                    return (out, )
         | 
| 47 | 
            +
             | 
| 48 | 
            +
            NODE_CLASS_MAPPINGS = {
         | 
| 49 | 
            +
                "DepthAnythingPreprocessor": Depth_Anything_Preprocessor,
         | 
| 50 | 
            +
                "Zoe_DepthAnythingPreprocessor": Zoe_Depth_Anything_Preprocessor
         | 
| 51 | 
            +
            }
         | 
| 52 | 
            +
            NODE_DISPLAY_NAME_MAPPINGS = {
         | 
| 53 | 
            +
                "DepthAnythingPreprocessor": "Depth Anything",
         | 
| 54 | 
            +
                "Zoe_DepthAnythingPreprocessor": "Zoe Depth Anything"
         | 
| 55 | 
            +
            }
         | 
    	
        custom_nodes/comfyui_controlnet_aux/node_wrappers/depth_anything_v2.py
    ADDED
    
    | @@ -0,0 +1,56 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            from ..utils import common_annotator_call, INPUT, define_preprocessor_inputs
         | 
| 2 | 
            +
            import comfy.model_management as model_management
         | 
| 3 | 
            +
             | 
| 4 | 
            +
            class Depth_Anything_V2_Preprocessor:
         | 
| 5 | 
            +
                @classmethod
         | 
| 6 | 
            +
                def INPUT_TYPES(s):
         | 
| 7 | 
            +
                    return define_preprocessor_inputs(
         | 
| 8 | 
            +
                        ckpt_name=INPUT.COMBO(
         | 
| 9 | 
            +
                            ["depth_anything_v2_vitg.pth", "depth_anything_v2_vitl.pth", "depth_anything_v2_vitb.pth", "depth_anything_v2_vits.pth"],
         | 
| 10 | 
            +
                            default="depth_anything_v2_vitl.pth"
         | 
| 11 | 
            +
                        ),
         | 
| 12 | 
            +
                        resolution=INPUT.RESOLUTION()
         | 
| 13 | 
            +
                    )
         | 
| 14 | 
            +
             | 
| 15 | 
            +
                RETURN_TYPES = ("IMAGE",)
         | 
| 16 | 
            +
                FUNCTION = "execute"
         | 
| 17 | 
            +
             | 
| 18 | 
            +
                CATEGORY = "ControlNet Preprocessors/Normal and Depth Estimators"
         | 
| 19 | 
            +
             | 
| 20 | 
            +
                def execute(self, image, ckpt_name="depth_anything_v2_vitl.pth", resolution=512, **kwargs):
         | 
| 21 | 
            +
                    from custom_controlnet_aux.depth_anything_v2 import DepthAnythingV2Detector
         | 
| 22 | 
            +
             | 
| 23 | 
            +
                    model = DepthAnythingV2Detector.from_pretrained(filename=ckpt_name).to(model_management.get_torch_device())
         | 
| 24 | 
            +
                    out = common_annotator_call(model, image, resolution=resolution, max_depth=1)
         | 
| 25 | 
            +
                    del model
         | 
| 26 | 
            +
                    return (out, )
         | 
| 27 | 
            +
             | 
| 28 | 
            +
            """ class Depth_Anything_Metric_V2_Preprocessor:
         | 
| 29 | 
            +
                @classmethod
         | 
| 30 | 
            +
                def INPUT_TYPES(s):
         | 
| 31 | 
            +
                    return create_node_input_types(
         | 
| 32 | 
            +
                        environment=(["indoor", "outdoor"], {"default": "indoor"}),
         | 
| 33 | 
            +
                        max_depth=("FLOAT", {"min": 0, "max": 100, "default": 20.0, "step": 0.01})
         | 
| 34 | 
            +
                    )
         | 
| 35 | 
            +
             | 
| 36 | 
            +
                RETURN_TYPES = ("IMAGE",)
         | 
| 37 | 
            +
                FUNCTION = "execute"
         | 
| 38 | 
            +
             | 
| 39 | 
            +
                CATEGORY = "ControlNet Preprocessors/Normal and Depth Estimators"
         | 
| 40 | 
            +
             | 
| 41 | 
            +
                def execute(self, image, environment, resolution=512, max_depth=20.0, **kwargs):
         | 
| 42 | 
            +
                    from custom_controlnet_aux.depth_anything_v2 import DepthAnythingV2Detector
         | 
| 43 | 
            +
                    filename = dict(indoor="depth_anything_v2_metric_hypersim_vitl.pth", outdoor="depth_anything_v2_metric_vkitti_vitl.pth")[environment]
         | 
| 44 | 
            +
                    model = DepthAnythingV2Detector.from_pretrained(filename=filename).to(model_management.get_torch_device())
         | 
| 45 | 
            +
                    out = common_annotator_call(model, image, resolution=resolution, max_depth=max_depth)
         | 
| 46 | 
            +
                    del model
         | 
| 47 | 
            +
                    return (out, ) """
         | 
| 48 | 
            +
             | 
| 49 | 
            +
            NODE_CLASS_MAPPINGS = {
         | 
| 50 | 
            +
                "DepthAnythingV2Preprocessor": Depth_Anything_V2_Preprocessor,
         | 
| 51 | 
            +
                #"Metric_DepthAnythingV2Preprocessor": Depth_Anything_Metric_V2_Preprocessor
         | 
| 52 | 
            +
            }
         | 
| 53 | 
            +
            NODE_DISPLAY_NAME_MAPPINGS = {
         | 
| 54 | 
            +
                "DepthAnythingV2Preprocessor": "Depth Anything V2 - Relative",
         | 
| 55 | 
            +
                #"Metric_DepthAnythingV2Preprocessor": "Depth Anything V2 - Metric"
         | 
| 56 | 
            +
            }
         | 
    	
        custom_nodes/comfyui_controlnet_aux/node_wrappers/diffusion_edge.py
    ADDED
    
    | @@ -0,0 +1,41 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            from ..utils import common_annotator_call, define_preprocessor_inputs, INPUT, run_script
         | 
| 2 | 
            +
            import comfy.model_management as model_management
         | 
| 3 | 
            +
            import sys
         | 
| 4 | 
            +
             | 
| 5 | 
            +
            def install_deps():
         | 
| 6 | 
            +
                try:
         | 
| 7 | 
            +
                    import sklearn
         | 
| 8 | 
            +
                except:
         | 
| 9 | 
            +
                    run_script([sys.executable, '-s', '-m', 'pip', 'install', 'scikit-learn'])
         | 
| 10 | 
            +
             | 
| 11 | 
            +
            class DiffusionEdge_Preprocessor:
         | 
| 12 | 
            +
                @classmethod
         | 
| 13 | 
            +
                def INPUT_TYPES(s):
         | 
| 14 | 
            +
                    return define_preprocessor_inputs(
         | 
| 15 | 
            +
                        environment=INPUT.COMBO(["indoor", "urban", "natrual"]),
         | 
| 16 | 
            +
                        patch_batch_size=INPUT.INT(default=4, min=1, max=16),
         | 
| 17 | 
            +
                        resolution=INPUT.RESOLUTION()
         | 
| 18 | 
            +
                    )
         | 
| 19 | 
            +
             | 
| 20 | 
            +
                RETURN_TYPES = ("IMAGE",)
         | 
| 21 | 
            +
                FUNCTION = "execute"
         | 
| 22 | 
            +
             | 
| 23 | 
            +
                CATEGORY = "ControlNet Preprocessors/Line Extractors"
         | 
| 24 | 
            +
             | 
| 25 | 
            +
                def execute(self, image, environment="indoor", patch_batch_size=4, resolution=512, **kwargs):
         | 
| 26 | 
            +
                    install_deps()
         | 
| 27 | 
            +
                    from custom_controlnet_aux.diffusion_edge import DiffusionEdgeDetector
         | 
| 28 | 
            +
             | 
| 29 | 
            +
                    model = DiffusionEdgeDetector \
         | 
| 30 | 
            +
                        .from_pretrained(filename = f"diffusion_edge_{environment}.pt") \
         | 
| 31 | 
            +
                        .to(model_management.get_torch_device())
         | 
| 32 | 
            +
                    out = common_annotator_call(model, image, resolution=resolution, patch_batch_size=patch_batch_size)
         | 
| 33 | 
            +
                    del model
         | 
| 34 | 
            +
                    return (out, )
         | 
| 35 | 
            +
             | 
| 36 | 
            +
            NODE_CLASS_MAPPINGS = {
         | 
| 37 | 
            +
                "DiffusionEdge_Preprocessor": DiffusionEdge_Preprocessor,
         | 
| 38 | 
            +
            }
         | 
| 39 | 
            +
            NODE_DISPLAY_NAME_MAPPINGS = {
         | 
| 40 | 
            +
                "DiffusionEdge_Preprocessor": "Diffusion Edge (batch size ↑ => speed ↑, VRAM ↑)",
         | 
| 41 | 
            +
            }
         | 
    	
        custom_nodes/comfyui_controlnet_aux/node_wrappers/dsine.py
    ADDED
    
    | @@ -0,0 +1,31 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            from ..utils import common_annotator_call, define_preprocessor_inputs, INPUT
         | 
| 2 | 
            +
            import comfy.model_management as model_management
         | 
| 3 | 
            +
             | 
| 4 | 
            +
            class DSINE_Normal_Map_Preprocessor:
         | 
| 5 | 
            +
                @classmethod
         | 
| 6 | 
            +
                def INPUT_TYPES(s):
         | 
| 7 | 
            +
                    return define_preprocessor_inputs(
         | 
| 8 | 
            +
                        fov=INPUT.FLOAT(max=365.0, default=60.0),
         | 
| 9 | 
            +
                        iterations=INPUT.INT(min=1, max=20, default=5),
         | 
| 10 | 
            +
                        resolution=INPUT.RESOLUTION()
         | 
| 11 | 
            +
                    )
         | 
| 12 | 
            +
             | 
| 13 | 
            +
                RETURN_TYPES = ("IMAGE",)
         | 
| 14 | 
            +
                FUNCTION = "execute"
         | 
| 15 | 
            +
             | 
| 16 | 
            +
                CATEGORY = "ControlNet Preprocessors/Normal and Depth Estimators"
         | 
| 17 | 
            +
             | 
| 18 | 
            +
                def execute(self, image, fov=60.0, iterations=5, resolution=512, **kwargs):
         | 
| 19 | 
            +
                    from custom_controlnet_aux.dsine import DsineDetector
         | 
| 20 | 
            +
             | 
| 21 | 
            +
                    model = DsineDetector.from_pretrained().to(model_management.get_torch_device())
         | 
| 22 | 
            +
                    out = common_annotator_call(model, image, fov=fov, iterations=iterations, resolution=resolution)
         | 
| 23 | 
            +
                    del model
         | 
| 24 | 
            +
                    return (out,)
         | 
| 25 | 
            +
             | 
| 26 | 
            +
            NODE_CLASS_MAPPINGS = {
         | 
| 27 | 
            +
                "DSINE-NormalMapPreprocessor": DSINE_Normal_Map_Preprocessor
         | 
| 28 | 
            +
            }
         | 
| 29 | 
            +
            NODE_DISPLAY_NAME_MAPPINGS = {
         | 
| 30 | 
            +
                "DSINE-NormalMapPreprocessor": "DSINE Normal Map"
         | 
| 31 | 
            +
            }
         | 
    	
        custom_nodes/comfyui_controlnet_aux/node_wrappers/dwpose.py
    ADDED
    
    | @@ -0,0 +1,162 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            from ..utils import common_annotator_call, define_preprocessor_inputs, INPUT
         | 
| 2 | 
            +
            import comfy.model_management as model_management
         | 
| 3 | 
            +
            import numpy as np
         | 
| 4 | 
            +
            import warnings
         | 
| 5 | 
            +
            from custom_controlnet_aux.dwpose import DwposeDetector, AnimalposeDetector
         | 
| 6 | 
            +
            import os
         | 
| 7 | 
            +
            import json
         | 
| 8 | 
            +
             | 
| 9 | 
            +
            DWPOSE_MODEL_NAME = "yzd-v/DWPose"
         | 
| 10 | 
            +
            #Trigger startup caching for onnxruntime
         | 
| 11 | 
            +
            GPU_PROVIDERS = ["CUDAExecutionProvider", "DirectMLExecutionProvider", "OpenVINOExecutionProvider", "ROCMExecutionProvider", "CoreMLExecutionProvider"]
         | 
| 12 | 
            +
            def check_ort_gpu():
         | 
| 13 | 
            +
                try:
         | 
| 14 | 
            +
                    import onnxruntime as ort
         | 
| 15 | 
            +
                    for provider in GPU_PROVIDERS:
         | 
| 16 | 
            +
                        if provider in ort.get_available_providers():
         | 
| 17 | 
            +
                            return True
         | 
| 18 | 
            +
                    return False
         | 
| 19 | 
            +
                except:
         | 
| 20 | 
            +
                    return False
         | 
| 21 | 
            +
             | 
| 22 | 
            +
            if not os.environ.get("DWPOSE_ONNXRT_CHECKED"):
         | 
| 23 | 
            +
                if check_ort_gpu():
         | 
| 24 | 
            +
                    print("DWPose: Onnxruntime with acceleration providers detected")
         | 
| 25 | 
            +
                else:
         | 
| 26 | 
            +
                    warnings.warn("DWPose: Onnxruntime not found or doesn't come with acceleration providers, switch to OpenCV with CPU device. DWPose might run very slowly")
         | 
| 27 | 
            +
                    os.environ['AUX_ORT_PROVIDERS'] = ''
         | 
| 28 | 
            +
                os.environ["DWPOSE_ONNXRT_CHECKED"] = '1'
         | 
| 29 | 
            +
             | 
| 30 | 
            +
            class DWPose_Preprocessor:
         | 
| 31 | 
            +
                @classmethod
         | 
| 32 | 
            +
                def INPUT_TYPES(s):
         | 
| 33 | 
            +
                    return define_preprocessor_inputs(
         | 
| 34 | 
            +
                        detect_hand=INPUT.COMBO(["enable", "disable"]),
         | 
| 35 | 
            +
                        detect_body=INPUT.COMBO(["enable", "disable"]),
         | 
| 36 | 
            +
                        detect_face=INPUT.COMBO(["enable", "disable"]),
         | 
| 37 | 
            +
                        resolution=INPUT.RESOLUTION(),
         | 
| 38 | 
            +
                        bbox_detector=INPUT.COMBO(
         | 
| 39 | 
            +
                            ["yolox_l.torchscript.pt", "yolox_l.onnx", "yolo_nas_l_fp16.onnx", "yolo_nas_m_fp16.onnx", "yolo_nas_s_fp16.onnx"],
         | 
| 40 | 
            +
                            default="yolox_l.onnx"
         | 
| 41 | 
            +
                        ),
         | 
| 42 | 
            +
                        pose_estimator=INPUT.COMBO(
         | 
| 43 | 
            +
                            ["dw-ll_ucoco_384_bs5.torchscript.pt", "dw-ll_ucoco_384.onnx", "dw-ll_ucoco.onnx"],
         | 
| 44 | 
            +
                            default="dw-ll_ucoco_384_bs5.torchscript.pt"
         | 
| 45 | 
            +
                        ),
         | 
| 46 | 
            +
                        scale_stick_for_xinsr_cn=INPUT.COMBO(["disable", "enable"])
         | 
| 47 | 
            +
                    )
         | 
| 48 | 
            +
             | 
| 49 | 
            +
                RETURN_TYPES = ("IMAGE", "POSE_KEYPOINT")
         | 
| 50 | 
            +
                FUNCTION = "estimate_pose"
         | 
| 51 | 
            +
             | 
| 52 | 
            +
                CATEGORY = "ControlNet Preprocessors/Faces and Poses Estimators"
         | 
| 53 | 
            +
             | 
| 54 | 
            +
                def estimate_pose(self, image, detect_hand="enable", detect_body="enable", detect_face="enable", resolution=512, bbox_detector="yolox_l.onnx", pose_estimator="dw-ll_ucoco_384.onnx", scale_stick_for_xinsr_cn="disable", **kwargs):
         | 
| 55 | 
            +
                    if bbox_detector == "yolox_l.onnx":
         | 
| 56 | 
            +
                        yolo_repo = DWPOSE_MODEL_NAME
         | 
| 57 | 
            +
                    elif "yolox" in bbox_detector:
         | 
| 58 | 
            +
                        yolo_repo = "hr16/yolox-onnx"
         | 
| 59 | 
            +
                    elif "yolo_nas" in bbox_detector:
         | 
| 60 | 
            +
                        yolo_repo = "hr16/yolo-nas-fp16"
         | 
| 61 | 
            +
                    else:
         | 
| 62 | 
            +
                        raise NotImplementedError(f"Download mechanism for {bbox_detector}")
         | 
| 63 | 
            +
             | 
| 64 | 
            +
                    if pose_estimator == "dw-ll_ucoco_384.onnx":
         | 
| 65 | 
            +
                        pose_repo = DWPOSE_MODEL_NAME
         | 
| 66 | 
            +
                    elif pose_estimator.endswith(".onnx"):
         | 
| 67 | 
            +
                        pose_repo = "hr16/UnJIT-DWPose"
         | 
| 68 | 
            +
                    elif pose_estimator.endswith(".torchscript.pt"):
         | 
| 69 | 
            +
                        pose_repo = "hr16/DWPose-TorchScript-BatchSize5"
         | 
| 70 | 
            +
                    else:
         | 
| 71 | 
            +
                        raise NotImplementedError(f"Download mechanism for {pose_estimator}")
         | 
| 72 | 
            +
             | 
| 73 | 
            +
                    model = DwposeDetector.from_pretrained(
         | 
| 74 | 
            +
                        pose_repo,
         | 
| 75 | 
            +
                        yolo_repo,
         | 
| 76 | 
            +
                        det_filename=bbox_detector, pose_filename=pose_estimator,
         | 
| 77 | 
            +
                        torchscript_device=model_management.get_torch_device()
         | 
| 78 | 
            +
                    )
         | 
| 79 | 
            +
                    detect_hand = detect_hand == "enable"
         | 
| 80 | 
            +
                    detect_body = detect_body == "enable"
         | 
| 81 | 
            +
                    detect_face = detect_face == "enable"
         | 
| 82 | 
            +
                    scale_stick_for_xinsr_cn = scale_stick_for_xinsr_cn == "enable"
         | 
| 83 | 
            +
                    self.openpose_dicts = []
         | 
| 84 | 
            +
                    def func(image, **kwargs):
         | 
| 85 | 
            +
                        pose_img, openpose_dict = model(image, **kwargs)
         | 
| 86 | 
            +
                        self.openpose_dicts.append(openpose_dict)
         | 
| 87 | 
            +
                        return pose_img
         | 
| 88 | 
            +
             | 
| 89 | 
            +
                    out = common_annotator_call(func, image, include_hand=detect_hand, include_face=detect_face, include_body=detect_body, image_and_json=True, resolution=resolution, xinsr_stick_scaling=scale_stick_for_xinsr_cn)
         | 
| 90 | 
            +
                    del model
         | 
| 91 | 
            +
                    return {
         | 
| 92 | 
            +
                        'ui': { "openpose_json": [json.dumps(self.openpose_dicts, indent=4)] },
         | 
| 93 | 
            +
                        "result": (out, self.openpose_dicts)
         | 
| 94 | 
            +
                    }
         | 
| 95 | 
            +
             | 
| 96 | 
            +
            class AnimalPose_Preprocessor:
         | 
| 97 | 
            +
                @classmethod
         | 
| 98 | 
            +
                def INPUT_TYPES(s):
         | 
| 99 | 
            +
                    return define_preprocessor_inputs(
         | 
| 100 | 
            +
                        bbox_detector = INPUT.COMBO(
         | 
| 101 | 
            +
                            ["yolox_l.torchscript.pt", "yolox_l.onnx", "yolo_nas_l_fp16.onnx", "yolo_nas_m_fp16.onnx", "yolo_nas_s_fp16.onnx"],
         | 
| 102 | 
            +
                            default="yolox_l.torchscript.pt"
         | 
| 103 | 
            +
                        ),
         | 
| 104 | 
            +
                        pose_estimator = INPUT.COMBO(
         | 
| 105 | 
            +
                            ["rtmpose-m_ap10k_256_bs5.torchscript.pt", "rtmpose-m_ap10k_256.onnx"],
         | 
| 106 | 
            +
                            default="rtmpose-m_ap10k_256_bs5.torchscript.pt"
         | 
| 107 | 
            +
                        ),
         | 
| 108 | 
            +
                        resolution = INPUT.RESOLUTION()
         | 
| 109 | 
            +
                    )
         | 
| 110 | 
            +
             | 
| 111 | 
            +
                RETURN_TYPES = ("IMAGE", "POSE_KEYPOINT")
         | 
| 112 | 
            +
                FUNCTION = "estimate_pose"
         | 
| 113 | 
            +
             | 
| 114 | 
            +
                CATEGORY = "ControlNet Preprocessors/Faces and Poses Estimators"
         | 
| 115 | 
            +
             | 
| 116 | 
            +
                def estimate_pose(self, image, resolution=512, bbox_detector="yolox_l.onnx", pose_estimator="rtmpose-m_ap10k_256.onnx", **kwargs):
         | 
| 117 | 
            +
                    if bbox_detector == "yolox_l.onnx":
         | 
| 118 | 
            +
                        yolo_repo = DWPOSE_MODEL_NAME
         | 
| 119 | 
            +
                    elif "yolox" in bbox_detector:
         | 
| 120 | 
            +
                        yolo_repo = "hr16/yolox-onnx"
         | 
| 121 | 
            +
                    elif "yolo_nas" in bbox_detector:
         | 
| 122 | 
            +
                        yolo_repo = "hr16/yolo-nas-fp16"
         | 
| 123 | 
            +
                    else:
         | 
| 124 | 
            +
                        raise NotImplementedError(f"Download mechanism for {bbox_detector}")
         | 
| 125 | 
            +
             | 
| 126 | 
            +
                    if pose_estimator == "dw-ll_ucoco_384.onnx":
         | 
| 127 | 
            +
                        pose_repo = DWPOSE_MODEL_NAME
         | 
| 128 | 
            +
                    elif pose_estimator.endswith(".onnx"):
         | 
| 129 | 
            +
                        pose_repo = "hr16/UnJIT-DWPose"
         | 
| 130 | 
            +
                    elif pose_estimator.endswith(".torchscript.pt"):
         | 
| 131 | 
            +
                        pose_repo = "hr16/DWPose-TorchScript-BatchSize5"
         | 
| 132 | 
            +
                    else:
         | 
| 133 | 
            +
                        raise NotImplementedError(f"Download mechanism for {pose_estimator}")
         | 
| 134 | 
            +
             | 
| 135 | 
            +
                    model = AnimalposeDetector.from_pretrained(
         | 
| 136 | 
            +
                        pose_repo,
         | 
| 137 | 
            +
                        yolo_repo,
         | 
| 138 | 
            +
                        det_filename=bbox_detector, pose_filename=pose_estimator,
         | 
| 139 | 
            +
                        torchscript_device=model_management.get_torch_device()
         | 
| 140 | 
            +
                    )
         | 
| 141 | 
            +
             | 
| 142 | 
            +
                    self.openpose_dicts = []
         | 
| 143 | 
            +
                    def func(image, **kwargs):
         | 
| 144 | 
            +
                        pose_img, openpose_dict = model(image, **kwargs)
         | 
| 145 | 
            +
                        self.openpose_dicts.append(openpose_dict)
         | 
| 146 | 
            +
                        return pose_img
         | 
| 147 | 
            +
             | 
| 148 | 
            +
                    out = common_annotator_call(func, image, image_and_json=True, resolution=resolution)
         | 
| 149 | 
            +
                    del model
         | 
| 150 | 
            +
                    return {
         | 
| 151 | 
            +
                        'ui': { "openpose_json": [json.dumps(self.openpose_dicts, indent=4)] },
         | 
| 152 | 
            +
                        "result": (out, self.openpose_dicts)
         | 
| 153 | 
            +
                    }
         | 
| 154 | 
            +
             | 
| 155 | 
            +
            NODE_CLASS_MAPPINGS = {
         | 
| 156 | 
            +
                "DWPreprocessor": DWPose_Preprocessor,
         | 
| 157 | 
            +
                "AnimalPosePreprocessor": AnimalPose_Preprocessor
         | 
| 158 | 
            +
            }
         | 
| 159 | 
            +
            NODE_DISPLAY_NAME_MAPPINGS = {
         | 
| 160 | 
            +
                "DWPreprocessor": "DWPose Estimator",
         | 
| 161 | 
            +
                "AnimalPosePreprocessor": "AnimalPose Estimator (AP10K)"
         | 
| 162 | 
            +
            }
         | 
    	
        custom_nodes/comfyui_controlnet_aux/node_wrappers/hed.py
    ADDED
    
    | @@ -0,0 +1,53 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            from ..utils import common_annotator_call, define_preprocessor_inputs, INPUT
         | 
| 2 | 
            +
            import comfy.model_management as model_management
         | 
| 3 | 
            +
             | 
| 4 | 
            +
            class HED_Preprocessor:
         | 
| 5 | 
            +
                @classmethod
         | 
| 6 | 
            +
                def INPUT_TYPES(s):
         | 
| 7 | 
            +
                    return define_preprocessor_inputs(
         | 
| 8 | 
            +
                        safe=INPUT.COMBO(["enable", "disable"]),
         | 
| 9 | 
            +
                        resolution=INPUT.RESOLUTION()
         | 
| 10 | 
            +
                    )
         | 
| 11 | 
            +
             | 
| 12 | 
            +
                RETURN_TYPES = ("IMAGE",)
         | 
| 13 | 
            +
                FUNCTION = "execute"
         | 
| 14 | 
            +
             | 
| 15 | 
            +
                CATEGORY = "ControlNet Preprocessors/Line Extractors"
         | 
| 16 | 
            +
             | 
| 17 | 
            +
                def execute(self, image, resolution=512, **kwargs):
         | 
| 18 | 
            +
                    from custom_controlnet_aux.hed import HEDdetector
         | 
| 19 | 
            +
             | 
| 20 | 
            +
                    model = HEDdetector.from_pretrained().to(model_management.get_torch_device())
         | 
| 21 | 
            +
                    out = common_annotator_call(model, image, resolution=resolution, safe = kwargs["safe"] == "enable")
         | 
| 22 | 
            +
                    del model
         | 
| 23 | 
            +
                    return (out, )
         | 
| 24 | 
            +
             | 
| 25 | 
            +
            class Fake_Scribble_Preprocessor:
         | 
| 26 | 
            +
                @classmethod
         | 
| 27 | 
            +
                def INPUT_TYPES(s):
         | 
| 28 | 
            +
                    return define_preprocessor_inputs(
         | 
| 29 | 
            +
                        safe=INPUT.COMBO(["enable", "disable"]),
         | 
| 30 | 
            +
                        resolution=INPUT.RESOLUTION()
         | 
| 31 | 
            +
                    )
         | 
| 32 | 
            +
             | 
| 33 | 
            +
                RETURN_TYPES = ("IMAGE",)
         | 
| 34 | 
            +
                FUNCTION = "execute"
         | 
| 35 | 
            +
             | 
| 36 | 
            +
                CATEGORY = "ControlNet Preprocessors/Line Extractors"
         | 
| 37 | 
            +
             | 
| 38 | 
            +
                def execute(self, image, resolution=512, **kwargs):
         | 
| 39 | 
            +
                    from custom_controlnet_aux.hed import HEDdetector
         | 
| 40 | 
            +
                    
         | 
| 41 | 
            +
                    model = HEDdetector.from_pretrained().to(model_management.get_torch_device())
         | 
| 42 | 
            +
                    out = common_annotator_call(model, image, resolution=resolution, scribble=True, safe=kwargs["safe"]=="enable")
         | 
| 43 | 
            +
                    del model
         | 
| 44 | 
            +
                    return (out, )
         | 
| 45 | 
            +
             | 
| 46 | 
            +
            NODE_CLASS_MAPPINGS = {
         | 
| 47 | 
            +
                "HEDPreprocessor": HED_Preprocessor,
         | 
| 48 | 
            +
                "FakeScribblePreprocessor": Fake_Scribble_Preprocessor
         | 
| 49 | 
            +
            }
         | 
| 50 | 
            +
            NODE_DISPLAY_NAME_MAPPINGS = {
         | 
| 51 | 
            +
                "HEDPreprocessor": "HED Soft-Edge Lines",
         | 
| 52 | 
            +
                "FakeScribblePreprocessor": "Fake Scribble Lines (aka scribble_hed)"
         | 
| 53 | 
            +
            }
         | 
    	
        custom_nodes/comfyui_controlnet_aux/node_wrappers/inpaint.py
    ADDED
    
    | @@ -0,0 +1,32 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import torch
         | 
| 2 | 
            +
            from ..utils import INPUT
         | 
| 3 | 
            +
             | 
| 4 | 
            +
            class InpaintPreprocessor:
         | 
| 5 | 
            +
                @classmethod
         | 
| 6 | 
            +
                def INPUT_TYPES(s):
         | 
| 7 | 
            +
                    return dict(
         | 
| 8 | 
            +
                        required=dict(image=INPUT.IMAGE(), mask=INPUT.MASK()),
         | 
| 9 | 
            +
                        optional=dict(black_pixel_for_xinsir_cn=INPUT.BOOLEAN(False))
         | 
| 10 | 
            +
                    )
         | 
| 11 | 
            +
                RETURN_TYPES = ("IMAGE",)
         | 
| 12 | 
            +
                FUNCTION = "preprocess"
         | 
| 13 | 
            +
             | 
| 14 | 
            +
                CATEGORY = "ControlNet Preprocessors/others"
         | 
| 15 | 
            +
             | 
| 16 | 
            +
                def preprocess(self, image, mask, black_pixel_for_xinsir_cn=False):
         | 
| 17 | 
            +
                    mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(image.shape[1], image.shape[2]), mode="bilinear")
         | 
| 18 | 
            +
                    mask = mask.movedim(1,-1).expand((-1,-1,-1,3))
         | 
| 19 | 
            +
                    image = image.clone()
         | 
| 20 | 
            +
                    if black_pixel_for_xinsir_cn:
         | 
| 21 | 
            +
                        masked_pixel = 0.0
         | 
| 22 | 
            +
                    else:
         | 
| 23 | 
            +
                        masked_pixel = -1.0
         | 
| 24 | 
            +
                    image[mask > 0.5] = masked_pixel
         | 
| 25 | 
            +
                    return (image,)
         | 
| 26 | 
            +
             | 
| 27 | 
            +
            NODE_CLASS_MAPPINGS = {
         | 
| 28 | 
            +
                "InpaintPreprocessor": InpaintPreprocessor
         | 
| 29 | 
            +
            }
         | 
| 30 | 
            +
            NODE_DISPLAY_NAME_MAPPINGS = {
         | 
| 31 | 
            +
                "InpaintPreprocessor": "Inpaint Preprocessor"
         | 
| 32 | 
            +
            }
         | 
    	
        custom_nodes/comfyui_controlnet_aux/node_wrappers/leres.py
    ADDED
    
    | @@ -0,0 +1,32 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            from ..utils import common_annotator_call, define_preprocessor_inputs, INPUT
         | 
| 2 | 
            +
            import comfy.model_management as model_management
         | 
| 3 | 
            +
             | 
| 4 | 
            +
            class LERES_Depth_Map_Preprocessor:
         | 
| 5 | 
            +
                @classmethod
         | 
| 6 | 
            +
                def INPUT_TYPES(s):
         | 
| 7 | 
            +
                    return define_preprocessor_inputs(
         | 
| 8 | 
            +
                        rm_nearest=INPUT.FLOAT(max=100.0),
         | 
| 9 | 
            +
                        rm_background=INPUT.FLOAT(max=100.0),
         | 
| 10 | 
            +
                        boost=INPUT.COMBO(["disable", "enable"]),
         | 
| 11 | 
            +
                        resolution=INPUT.RESOLUTION()
         | 
| 12 | 
            +
                    )
         | 
| 13 | 
            +
             | 
| 14 | 
            +
                RETURN_TYPES = ("IMAGE",)
         | 
| 15 | 
            +
                FUNCTION = "execute"
         | 
| 16 | 
            +
             | 
| 17 | 
            +
                CATEGORY = "ControlNet Preprocessors/Normal and Depth Estimators"
         | 
| 18 | 
            +
             | 
| 19 | 
            +
                def execute(self, image, rm_nearest=0, rm_background=0, resolution=512, boost="disable", **kwargs):
         | 
| 20 | 
            +
                    from custom_controlnet_aux.leres import LeresDetector
         | 
| 21 | 
            +
             | 
| 22 | 
            +
                    model = LeresDetector.from_pretrained().to(model_management.get_torch_device())
         | 
| 23 | 
            +
                    out = common_annotator_call(model, image, resolution=resolution, thr_a=rm_nearest, thr_b=rm_background, boost=boost == "enable")
         | 
| 24 | 
            +
                    del model
         | 
| 25 | 
            +
                    return (out, )
         | 
| 26 | 
            +
                
         | 
| 27 | 
            +
            NODE_CLASS_MAPPINGS = {
         | 
| 28 | 
            +
                "LeReS-DepthMapPreprocessor": LERES_Depth_Map_Preprocessor
         | 
| 29 | 
            +
            }
         | 
| 30 | 
            +
            NODE_DISPLAY_NAME_MAPPINGS = {
         | 
| 31 | 
            +
                "LeReS-DepthMapPreprocessor": "LeReS Depth Map (enable boost for leres++)"
         | 
| 32 | 
            +
            }
         | 
    	
        custom_nodes/comfyui_controlnet_aux/node_wrappers/lineart.py
    ADDED
    
    | @@ -0,0 +1,30 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            from ..utils import common_annotator_call, define_preprocessor_inputs, INPUT
         | 
| 2 | 
            +
            import comfy.model_management as model_management
         | 
| 3 | 
            +
             | 
| 4 | 
            +
            class LineArt_Preprocessor:
         | 
| 5 | 
            +
                @classmethod
         | 
| 6 | 
            +
                def INPUT_TYPES(s):
         | 
| 7 | 
            +
                    return define_preprocessor_inputs(
         | 
| 8 | 
            +
                        coarse=INPUT.COMBO((["disable", "enable"])),
         | 
| 9 | 
            +
                        resolution=INPUT.RESOLUTION()
         | 
| 10 | 
            +
                    )
         | 
| 11 | 
            +
             | 
| 12 | 
            +
                RETURN_TYPES = ("IMAGE",)
         | 
| 13 | 
            +
                FUNCTION = "execute"
         | 
| 14 | 
            +
             | 
| 15 | 
            +
                CATEGORY = "ControlNet Preprocessors/Line Extractors"
         | 
| 16 | 
            +
             | 
| 17 | 
            +
                def execute(self, image, resolution=512, **kwargs):
         | 
| 18 | 
            +
                    from custom_controlnet_aux.lineart import LineartDetector
         | 
| 19 | 
            +
             | 
| 20 | 
            +
                    model = LineartDetector.from_pretrained().to(model_management.get_torch_device())
         | 
| 21 | 
            +
                    out = common_annotator_call(model, image, resolution=resolution, coarse = kwargs["coarse"] == "enable")
         | 
| 22 | 
            +
                    del model
         | 
| 23 | 
            +
                    return (out, )
         | 
| 24 | 
            +
             | 
| 25 | 
            +
            NODE_CLASS_MAPPINGS = {
         | 
| 26 | 
            +
                "LineArtPreprocessor": LineArt_Preprocessor
         | 
| 27 | 
            +
            }
         | 
| 28 | 
            +
            NODE_DISPLAY_NAME_MAPPINGS = {
         | 
| 29 | 
            +
                "LineArtPreprocessor": "Realistic Lineart"
         | 
| 30 | 
            +
            }
         | 
    	
        custom_nodes/comfyui_controlnet_aux/node_wrappers/lineart_anime.py
    ADDED
    
    | @@ -0,0 +1,27 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            from ..utils import common_annotator_call, define_preprocessor_inputs, INPUT
         | 
| 2 | 
            +
            import comfy.model_management as model_management
         | 
| 3 | 
            +
             | 
| 4 | 
            +
            class AnimeLineArt_Preprocessor:
         | 
| 5 | 
            +
                @classmethod
         | 
| 6 | 
            +
                def INPUT_TYPES(s):
         | 
| 7 | 
            +
                    return define_preprocessor_inputs(resolution=INPUT.RESOLUTION())
         | 
| 8 | 
            +
             | 
| 9 | 
            +
                RETURN_TYPES = ("IMAGE",)
         | 
| 10 | 
            +
                FUNCTION = "execute"
         | 
| 11 | 
            +
             | 
| 12 | 
            +
                CATEGORY = "ControlNet Preprocessors/Line Extractors"
         | 
| 13 | 
            +
             | 
| 14 | 
            +
                def execute(self, image, resolution=512, **kwargs):
         | 
| 15 | 
            +
                    from custom_controlnet_aux.lineart_anime import LineartAnimeDetector
         | 
| 16 | 
            +
             | 
| 17 | 
            +
                    model = LineartAnimeDetector.from_pretrained().to(model_management.get_torch_device())
         | 
| 18 | 
            +
                    out = common_annotator_call(model, image, resolution=resolution)
         | 
| 19 | 
            +
                    del model
         | 
| 20 | 
            +
                    return (out, )
         | 
| 21 | 
            +
             | 
| 22 | 
            +
            NODE_CLASS_MAPPINGS = {
         | 
| 23 | 
            +
                "AnimeLineArtPreprocessor": AnimeLineArt_Preprocessor
         | 
| 24 | 
            +
            }
         | 
| 25 | 
            +
            NODE_DISPLAY_NAME_MAPPINGS = {
         | 
| 26 | 
            +
                "AnimeLineArtPreprocessor": "Anime Lineart"
         | 
| 27 | 
            +
            }
         | 
