Spaces:
				
			
			
	
			
			
		Running
		
			on 
			
			Zero
	
	
	
			
			
	
	
	
	
		
		
		Running
		
			on 
			
			Zero
	Upload 130 files
Browse filesThis view is limited to 50 files because it contains too many changes.  
							See raw diff
- .gitattributes +3 -0
- LICENSE +201 -0
- audioldm/__init__.py +8 -0
- audioldm/__main__.py +183 -0
- audioldm/__pycache__/__init__.cpython-310.pyc +0 -0
- audioldm/__pycache__/ldm.cpython-310.pyc +0 -0
- audioldm/__pycache__/pipeline.cpython-310.pyc +0 -0
- audioldm/__pycache__/utils.cpython-310.pyc +0 -0
- audioldm/audio/__init__.py +2 -0
- audioldm/audio/__pycache__/__init__.cpython-310.pyc +0 -0
- audioldm/audio/__pycache__/audio_processing.cpython-310.pyc +0 -0
- audioldm/audio/__pycache__/stft.cpython-310.pyc +0 -0
- audioldm/audio/__pycache__/tools.cpython-310.pyc +0 -0
- audioldm/audio/audio_processing.py +100 -0
- audioldm/audio/stft.py +186 -0
- audioldm/audio/tools.py +85 -0
- audioldm/clap/__init__.py +0 -0
- audioldm/clap/encoders.py +170 -0
- audioldm/clap/open_clip/__init__.py +25 -0
- audioldm/clap/open_clip/bert.py +40 -0
- audioldm/clap/open_clip/bpe_simple_vocab_16e6.txt.gz +3 -0
- audioldm/clap/open_clip/factory.py +279 -0
- audioldm/clap/open_clip/feature_fusion.py +192 -0
- audioldm/clap/open_clip/htsat.py +1308 -0
- audioldm/clap/open_clip/linear_probe.py +66 -0
- audioldm/clap/open_clip/loss.py +398 -0
- audioldm/clap/open_clip/model.py +936 -0
- audioldm/clap/open_clip/model_configs/HTSAT-base.json +23 -0
- audioldm/clap/open_clip/model_configs/HTSAT-large.json +23 -0
- audioldm/clap/open_clip/model_configs/HTSAT-tiny-win-1536.json +23 -0
- audioldm/clap/open_clip/model_configs/HTSAT-tiny.json +23 -0
- audioldm/clap/open_clip/model_configs/PANN-10.json +23 -0
- audioldm/clap/open_clip/model_configs/PANN-14-fmax-18k.json +23 -0
- audioldm/clap/open_clip/model_configs/PANN-14-fmax-8k-20s.json +23 -0
- audioldm/clap/open_clip/model_configs/PANN-14-tiny-transformer.json +23 -0
- audioldm/clap/open_clip/model_configs/PANN-14-win-1536.json +23 -0
- audioldm/clap/open_clip/model_configs/PANN-14.json +23 -0
- audioldm/clap/open_clip/model_configs/PANN-6.json +23 -0
- audioldm/clap/open_clip/model_configs/RN101-quickgelu.json +22 -0
- audioldm/clap/open_clip/model_configs/RN101.json +21 -0
- audioldm/clap/open_clip/model_configs/RN50-quickgelu.json +22 -0
- audioldm/clap/open_clip/model_configs/RN50.json +21 -0
- audioldm/clap/open_clip/model_configs/RN50x16.json +21 -0
- audioldm/clap/open_clip/model_configs/RN50x4.json +21 -0
- audioldm/clap/open_clip/model_configs/ViT-B-16.json +16 -0
- audioldm/clap/open_clip/model_configs/ViT-B-32-quickgelu.json +17 -0
- audioldm/clap/open_clip/model_configs/ViT-B-32.json +16 -0
- audioldm/clap/open_clip/model_configs/ViT-L-14.json +16 -0
- audioldm/clap/open_clip/openai.py +159 -0
- audioldm/clap/open_clip/pann_model.py +704 -0
    	
        .gitattributes
    CHANGED
    
    | @@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text | |
| 33 | 
             
            *.zip filter=lfs diff=lfs merge=lfs -text
         | 
| 34 | 
             
            *.zst filter=lfs diff=lfs merge=lfs -text
         | 
| 35 | 
             
            *tfevents* filter=lfs diff=lfs merge=lfs -text
         | 
|  | |
|  | |
|  | 
|  | |
| 33 | 
             
            *.zip filter=lfs diff=lfs merge=lfs -text
         | 
| 34 | 
             
            *.zst filter=lfs diff=lfs merge=lfs -text
         | 
| 35 | 
             
            *tfevents* filter=lfs diff=lfs merge=lfs -text
         | 
| 36 | 
            +
            data/-26aVYRtEAc_000030.mp4 filter=lfs diff=lfs merge=lfs -text
         | 
| 37 | 
            +
            data/-yoaSondvkw_000071.mp4 filter=lfs diff=lfs merge=lfs -text
         | 
| 38 | 
            +
            data/0Bp8c3PfAAA_000053.mp4 filter=lfs diff=lfs merge=lfs -text
         | 
    	
        LICENSE
    ADDED
    
    | @@ -0,0 +1,201 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
                                             Apache License
         | 
| 2 | 
            +
                                       Version 2.0, January 2004
         | 
| 3 | 
            +
                                    http://www.apache.org/licenses/
         | 
| 4 | 
            +
             | 
| 5 | 
            +
               TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
         | 
| 6 | 
            +
             | 
| 7 | 
            +
               1. Definitions.
         | 
| 8 | 
            +
             | 
| 9 | 
            +
                  "License" shall mean the terms and conditions for use, reproduction,
         | 
| 10 | 
            +
                  and distribution as defined by Sections 1 through 9 of this document.
         | 
| 11 | 
            +
             | 
| 12 | 
            +
                  "Licensor" shall mean the copyright owner or entity authorized by
         | 
| 13 | 
            +
                  the copyright owner that is granting the License.
         | 
| 14 | 
            +
             | 
| 15 | 
            +
                  "Legal Entity" shall mean the union of the acting entity and all
         | 
| 16 | 
            +
                  other entities that control, are controlled by, or are under common
         | 
| 17 | 
            +
                  control with that entity. For the purposes of this definition,
         | 
| 18 | 
            +
                  "control" means (i) the power, direct or indirect, to cause the
         | 
| 19 | 
            +
                  direction or management of such entity, whether by contract or
         | 
| 20 | 
            +
                  otherwise, or (ii) ownership of fifty percent (50%) or more of the
         | 
| 21 | 
            +
                  outstanding shares, or (iii) beneficial ownership of such entity.
         | 
| 22 | 
            +
             | 
| 23 | 
            +
                  "You" (or "Your") shall mean an individual or Legal Entity
         | 
| 24 | 
            +
                  exercising permissions granted by this License.
         | 
| 25 | 
            +
             | 
| 26 | 
            +
                  "Source" form shall mean the preferred form for making modifications,
         | 
| 27 | 
            +
                  including but not limited to software source code, documentation
         | 
| 28 | 
            +
                  source, and configuration files.
         | 
| 29 | 
            +
             | 
| 30 | 
            +
                  "Object" form shall mean any form resulting from mechanical
         | 
| 31 | 
            +
                  transformation or translation of a Source form, including but
         | 
| 32 | 
            +
                  not limited to compiled object code, generated documentation,
         | 
| 33 | 
            +
                  and conversions to other media types.
         | 
| 34 | 
            +
             | 
| 35 | 
            +
                  "Work" shall mean the work of authorship, whether in Source or
         | 
| 36 | 
            +
                  Object form, made available under the License, as indicated by a
         | 
| 37 | 
            +
                  copyright notice that is included in or attached to the work
         | 
| 38 | 
            +
                  (an example is provided in the Appendix below).
         | 
| 39 | 
            +
             | 
| 40 | 
            +
                  "Derivative Works" shall mean any work, whether in Source or Object
         | 
| 41 | 
            +
                  form, that is based on (or derived from) the Work and for which the
         | 
| 42 | 
            +
                  editorial revisions, annotations, elaborations, or other modifications
         | 
| 43 | 
            +
                  represent, as a whole, an original work of authorship. For the purposes
         | 
| 44 | 
            +
                  of this License, Derivative Works shall not include works that remain
         | 
| 45 | 
            +
                  separable from, or merely link (or bind by name) to the interfaces of,
         | 
| 46 | 
            +
                  the Work and Derivative Works thereof.
         | 
| 47 | 
            +
             | 
| 48 | 
            +
                  "Contribution" shall mean any work of authorship, including
         | 
| 49 | 
            +
                  the original version of the Work and any modifications or additions
         | 
| 50 | 
            +
                  to that Work or Derivative Works thereof, that is intentionally
         | 
| 51 | 
            +
                  submitted to Licensor for inclusion in the Work by the copyright owner
         | 
| 52 | 
            +
                  or by an individual or Legal Entity authorized to submit on behalf of
         | 
| 53 | 
            +
                  the copyright owner. For the purposes of this definition, "submitted"
         | 
| 54 | 
            +
                  means any form of electronic, verbal, or written communication sent
         | 
| 55 | 
            +
                  to the Licensor or its representatives, including but not limited to
         | 
| 56 | 
            +
                  communication on electronic mailing lists, source code control systems,
         | 
| 57 | 
            +
                  and issue tracking systems that are managed by, or on behalf of, the
         | 
| 58 | 
            +
                  Licensor for the purpose of discussing and improving the Work, but
         | 
| 59 | 
            +
                  excluding communication that is conspicuously marked or otherwise
         | 
| 60 | 
            +
                  designated in writing by the copyright owner as "Not a Contribution."
         | 
| 61 | 
            +
             | 
| 62 | 
            +
                  "Contributor" shall mean Licensor and any individual or Legal Entity
         | 
| 63 | 
            +
                  on behalf of whom a Contribution has been received by Licensor and
         | 
| 64 | 
            +
                  subsequently incorporated within the Work.
         | 
| 65 | 
            +
             | 
| 66 | 
            +
               2. Grant of Copyright License. Subject to the terms and conditions of
         | 
| 67 | 
            +
                  this License, each Contributor hereby grants to You a perpetual,
         | 
| 68 | 
            +
                  worldwide, non-exclusive, no-charge, royalty-free, irrevocable
         | 
| 69 | 
            +
                  copyright license to reproduce, prepare Derivative Works of,
         | 
| 70 | 
            +
                  publicly display, publicly perform, sublicense, and distribute the
         | 
| 71 | 
            +
                  Work and such Derivative Works in Source or Object form.
         | 
| 72 | 
            +
             | 
| 73 | 
            +
               3. Grant of Patent License. Subject to the terms and conditions of
         | 
| 74 | 
            +
                  this License, each Contributor hereby grants to You a perpetual,
         | 
| 75 | 
            +
                  worldwide, non-exclusive, no-charge, royalty-free, irrevocable
         | 
| 76 | 
            +
                  (except as stated in this section) patent license to make, have made,
         | 
| 77 | 
            +
                  use, offer to sell, sell, import, and otherwise transfer the Work,
         | 
| 78 | 
            +
                  where such license applies only to those patent claims licensable
         | 
| 79 | 
            +
                  by such Contributor that are necessarily infringed by their
         | 
| 80 | 
            +
                  Contribution(s) alone or by combination of their Contribution(s)
         | 
| 81 | 
            +
                  with the Work to which such Contribution(s) was submitted. If You
         | 
| 82 | 
            +
                  institute patent litigation against any entity (including a
         | 
| 83 | 
            +
                  cross-claim or counterclaim in a lawsuit) alleging that the Work
         | 
| 84 | 
            +
                  or a Contribution incorporated within the Work constitutes direct
         | 
| 85 | 
            +
                  or contributory patent infringement, then any patent licenses
         | 
| 86 | 
            +
                  granted to You under this License for that Work shall terminate
         | 
| 87 | 
            +
                  as of the date such litigation is filed.
         | 
| 88 | 
            +
             | 
| 89 | 
            +
               4. Redistribution. You may reproduce and distribute copies of the
         | 
| 90 | 
            +
                  Work or Derivative Works thereof in any medium, with or without
         | 
| 91 | 
            +
                  modifications, and in Source or Object form, provided that You
         | 
| 92 | 
            +
                  meet the following conditions:
         | 
| 93 | 
            +
             | 
| 94 | 
            +
                  (a) You must give any other recipients of the Work or
         | 
| 95 | 
            +
                      Derivative Works a copy of this License; and
         | 
| 96 | 
            +
             | 
| 97 | 
            +
                  (b) You must cause any modified files to carry prominent notices
         | 
| 98 | 
            +
                      stating that You changed the files; and
         | 
| 99 | 
            +
             | 
| 100 | 
            +
                  (c) You must retain, in the Source form of any Derivative Works
         | 
| 101 | 
            +
                      that You distribute, all copyright, patent, trademark, and
         | 
| 102 | 
            +
                      attribution notices from the Source form of the Work,
         | 
| 103 | 
            +
                      excluding those notices that do not pertain to any part of
         | 
| 104 | 
            +
                      the Derivative Works; and
         | 
| 105 | 
            +
             | 
| 106 | 
            +
                  (d) If the Work includes a "NOTICE" text file as part of its
         | 
| 107 | 
            +
                      distribution, then any Derivative Works that You distribute must
         | 
| 108 | 
            +
                      include a readable copy of the attribution notices contained
         | 
| 109 | 
            +
                      within such NOTICE file, excluding those notices that do not
         | 
| 110 | 
            +
                      pertain to any part of the Derivative Works, in at least one
         | 
| 111 | 
            +
                      of the following places: within a NOTICE text file distributed
         | 
| 112 | 
            +
                      as part of the Derivative Works; within the Source form or
         | 
| 113 | 
            +
                      documentation, if provided along with the Derivative Works; or,
         | 
| 114 | 
            +
                      within a display generated by the Derivative Works, if and
         | 
| 115 | 
            +
                      wherever such third-party notices normally appear. The contents
         | 
| 116 | 
            +
                      of the NOTICE file are for informational purposes only and
         | 
| 117 | 
            +
                      do not modify the License. You may add Your own attribution
         | 
| 118 | 
            +
                      notices within Derivative Works that You distribute, alongside
         | 
| 119 | 
            +
                      or as an addendum to the NOTICE text from the Work, provided
         | 
| 120 | 
            +
                      that such additional attribution notices cannot be construed
         | 
| 121 | 
            +
                      as modifying the License.
         | 
| 122 | 
            +
             | 
| 123 | 
            +
                  You may add Your own copyright statement to Your modifications and
         | 
| 124 | 
            +
                  may provide additional or different license terms and conditions
         | 
| 125 | 
            +
                  for use, reproduction, or distribution of Your modifications, or
         | 
| 126 | 
            +
                  for any such Derivative Works as a whole, provided Your use,
         | 
| 127 | 
            +
                  reproduction, and distribution of the Work otherwise complies with
         | 
| 128 | 
            +
                  the conditions stated in this License.
         | 
| 129 | 
            +
             | 
| 130 | 
            +
               5. Submission of Contributions. Unless You explicitly state otherwise,
         | 
| 131 | 
            +
                  any Contribution intentionally submitted for inclusion in the Work
         | 
| 132 | 
            +
                  by You to the Licensor shall be under the terms and conditions of
         | 
| 133 | 
            +
                  this License, without any additional terms or conditions.
         | 
| 134 | 
            +
                  Notwithstanding the above, nothing herein shall supersede or modify
         | 
| 135 | 
            +
                  the terms of any separate license agreement you may have executed
         | 
| 136 | 
            +
                  with Licensor regarding such Contributions.
         | 
| 137 | 
            +
             | 
| 138 | 
            +
               6. Trademarks. This License does not grant permission to use the trade
         | 
| 139 | 
            +
                  names, trademarks, service marks, or product names of the Licensor,
         | 
| 140 | 
            +
                  except as required for reasonable and customary use in describing the
         | 
| 141 | 
            +
                  origin of the Work and reproducing the content of the NOTICE file.
         | 
| 142 | 
            +
             | 
| 143 | 
            +
               7. Disclaimer of Warranty. Unless required by applicable law or
         | 
| 144 | 
            +
                  agreed to in writing, Licensor provides the Work (and each
         | 
| 145 | 
            +
                  Contributor provides its Contributions) on an "AS IS" BASIS,
         | 
| 146 | 
            +
                  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
         | 
| 147 | 
            +
                  implied, including, without limitation, any warranties or conditions
         | 
| 148 | 
            +
                  of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
         | 
| 149 | 
            +
                  PARTICULAR PURPOSE. You are solely responsible for determining the
         | 
| 150 | 
            +
                  appropriateness of using or redistributing the Work and assume any
         | 
| 151 | 
            +
                  risks associated with Your exercise of permissions under this License.
         | 
| 152 | 
            +
             | 
| 153 | 
            +
               8. Limitation of Liability. In no event and under no legal theory,
         | 
| 154 | 
            +
                  whether in tort (including negligence), contract, or otherwise,
         | 
| 155 | 
            +
                  unless required by applicable law (such as deliberate and grossly
         | 
| 156 | 
            +
                  negligent acts) or agreed to in writing, shall any Contributor be
         | 
| 157 | 
            +
                  liable to You for damages, including any direct, indirect, special,
         | 
| 158 | 
            +
                  incidental, or consequential damages of any character arising as a
         | 
| 159 | 
            +
                  result of this License or out of the use or inability to use the
         | 
| 160 | 
            +
                  Work (including but not limited to damages for loss of goodwill,
         | 
| 161 | 
            +
                  work stoppage, computer failure or malfunction, or any and all
         | 
| 162 | 
            +
                  other commercial damages or losses), even if such Contributor
         | 
| 163 | 
            +
                  has been advised of the possibility of such damages.
         | 
| 164 | 
            +
             | 
| 165 | 
            +
               9. Accepting Warranty or Additional Liability. While redistributing
         | 
| 166 | 
            +
                  the Work or Derivative Works thereof, You may choose to offer,
         | 
| 167 | 
            +
                  and charge a fee for, acceptance of support, warranty, indemnity,
         | 
| 168 | 
            +
                  or other liability obligations and/or rights consistent with this
         | 
| 169 | 
            +
                  License. However, in accepting such obligations, You may act only
         | 
| 170 | 
            +
                  on Your own behalf and on Your sole responsibility, not on behalf
         | 
| 171 | 
            +
                  of any other Contributor, and only if You agree to indemnify,
         | 
| 172 | 
            +
                  defend, and hold each Contributor harmless for any liability
         | 
| 173 | 
            +
                  incurred by, or claims asserted against, such Contributor by reason
         | 
| 174 | 
            +
                  of your accepting any such warranty or additional liability.
         | 
| 175 | 
            +
             | 
| 176 | 
            +
               END OF TERMS AND CONDITIONS
         | 
| 177 | 
            +
             | 
| 178 | 
            +
               APPENDIX: How to apply the Apache License to your work.
         | 
| 179 | 
            +
             | 
| 180 | 
            +
                  To apply the Apache License to your work, attach the following
         | 
| 181 | 
            +
                  boilerplate notice, with the fields enclosed by brackets "[]"
         | 
| 182 | 
            +
                  replaced with your own identifying information. (Don't include
         | 
| 183 | 
            +
                  the brackets!)  The text should be enclosed in the appropriate
         | 
| 184 | 
            +
                  comment syntax for the file format. We also recommend that a
         | 
| 185 | 
            +
                  file or class name and description of purpose be included on the
         | 
| 186 | 
            +
                  same "printed page" as the copyright notice for easier
         | 
| 187 | 
            +
                  identification within third-party archives.
         | 
| 188 | 
            +
             | 
| 189 | 
            +
               Copyright [yyyy] [name of copyright owner]
         | 
| 190 | 
            +
             | 
| 191 | 
            +
               Licensed under the Apache License, Version 2.0 (the "License");
         | 
| 192 | 
            +
               you may not use this file except in compliance with the License.
         | 
| 193 | 
            +
               You may obtain a copy of the License at
         | 
| 194 | 
            +
             | 
| 195 | 
            +
                   http://www.apache.org/licenses/LICENSE-2.0
         | 
| 196 | 
            +
             | 
| 197 | 
            +
               Unless required by applicable law or agreed to in writing, software
         | 
| 198 | 
            +
               distributed under the License is distributed on an "AS IS" BASIS,
         | 
| 199 | 
            +
               WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
         | 
| 200 | 
            +
               See the License for the specific language governing permissions and
         | 
| 201 | 
            +
               limitations under the License.
         | 
    	
        audioldm/__init__.py
    ADDED
    
    | @@ -0,0 +1,8 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            from .ldm import LatentDiffusion
         | 
| 2 | 
            +
            from .utils import seed_everything, save_wave, get_time, get_duration
         | 
| 3 | 
            +
            from .pipeline import *
         | 
| 4 | 
            +
             | 
| 5 | 
            +
             | 
| 6 | 
            +
             | 
| 7 | 
            +
             | 
| 8 | 
            +
             | 
    	
        audioldm/__main__.py
    ADDED
    
    | @@ -0,0 +1,183 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            #!/usr/bin/python3
         | 
| 2 | 
            +
            import os
         | 
| 3 | 
            +
            from audioldm import text_to_audio, style_transfer, build_model, save_wave, get_time, round_up_duration, get_duration
         | 
| 4 | 
            +
            import argparse
         | 
| 5 | 
            +
             | 
| 6 | 
            +
            CACHE_DIR = os.getenv(
         | 
| 7 | 
            +
                "AUDIOLDM_CACHE_DIR",
         | 
| 8 | 
            +
                os.path.join(os.path.expanduser("~"), ".cache/audioldm"))
         | 
| 9 | 
            +
             | 
| 10 | 
            +
            parser = argparse.ArgumentParser()
         | 
| 11 | 
            +
             | 
| 12 | 
            +
            parser.add_argument(
         | 
| 13 | 
            +
                "--mode",
         | 
| 14 | 
            +
                type=str,
         | 
| 15 | 
            +
                required=False,
         | 
| 16 | 
            +
                default="generation",
         | 
| 17 | 
            +
                help="generation: text-to-audio generation; transfer: style transfer",
         | 
| 18 | 
            +
                choices=["generation", "transfer"]
         | 
| 19 | 
            +
            )
         | 
| 20 | 
            +
             | 
| 21 | 
            +
            parser.add_argument(
         | 
| 22 | 
            +
                "-t",
         | 
| 23 | 
            +
                "--text",
         | 
| 24 | 
            +
                type=str,
         | 
| 25 | 
            +
                required=False,
         | 
| 26 | 
            +
                default="",
         | 
| 27 | 
            +
                help="Text prompt to the model for audio generation",
         | 
| 28 | 
            +
            )
         | 
| 29 | 
            +
             | 
| 30 | 
            +
            parser.add_argument(
         | 
| 31 | 
            +
                "-f",
         | 
| 32 | 
            +
                "--file_path",
         | 
| 33 | 
            +
                type=str,
         | 
| 34 | 
            +
                required=False,
         | 
| 35 | 
            +
                default=None,
         | 
| 36 | 
            +
                help="(--mode transfer): Original audio file for style transfer; Or (--mode generation): the guidance audio file for generating simialr audio",
         | 
| 37 | 
            +
            )
         | 
| 38 | 
            +
             | 
| 39 | 
            +
            parser.add_argument(
         | 
| 40 | 
            +
                "--transfer_strength",
         | 
| 41 | 
            +
                type=float,
         | 
| 42 | 
            +
                required=False,
         | 
| 43 | 
            +
                default=0.5,
         | 
| 44 | 
            +
                help="A value between 0 and 1. 0 means original audio without transfer, 1 means completely transfer to the audio indicated by text",
         | 
| 45 | 
            +
            )
         | 
| 46 | 
            +
             | 
| 47 | 
            +
            parser.add_argument(
         | 
| 48 | 
            +
                "-s",
         | 
| 49 | 
            +
                "--save_path",
         | 
| 50 | 
            +
                type=str,
         | 
| 51 | 
            +
                required=False,
         | 
| 52 | 
            +
                help="The path to save model output",
         | 
| 53 | 
            +
                default="./output",
         | 
| 54 | 
            +
            )
         | 
| 55 | 
            +
             | 
| 56 | 
            +
            parser.add_argument(
         | 
| 57 | 
            +
                "--model_name",
         | 
| 58 | 
            +
                type=str,
         | 
| 59 | 
            +
                required=False,
         | 
| 60 | 
            +
                help="The checkpoint you gonna use",
         | 
| 61 | 
            +
                default="audioldm-s-full",
         | 
| 62 | 
            +
                choices=["audioldm-s-full", "audioldm-l-full", "audioldm-s-full-v2"]
         | 
| 63 | 
            +
            )
         | 
| 64 | 
            +
             | 
| 65 | 
            +
            parser.add_argument(
         | 
| 66 | 
            +
                "-ckpt",
         | 
| 67 | 
            +
                "--ckpt_path",
         | 
| 68 | 
            +
                type=str,
         | 
| 69 | 
            +
                required=False,
         | 
| 70 | 
            +
                help="The path to the pretrained .ckpt model",
         | 
| 71 | 
            +
                default=None,
         | 
| 72 | 
            +
            )
         | 
| 73 | 
            +
             | 
| 74 | 
            +
            parser.add_argument(
         | 
| 75 | 
            +
                "-b",
         | 
| 76 | 
            +
                "--batchsize",
         | 
| 77 | 
            +
                type=int,
         | 
| 78 | 
            +
                required=False,
         | 
| 79 | 
            +
                default=1,
         | 
| 80 | 
            +
                help="Generate how many samples at the same time",
         | 
| 81 | 
            +
            )
         | 
| 82 | 
            +
             | 
| 83 | 
            +
            parser.add_argument(
         | 
| 84 | 
            +
                "--ddim_steps",
         | 
| 85 | 
            +
                type=int,
         | 
| 86 | 
            +
                required=False,
         | 
| 87 | 
            +
                default=200,
         | 
| 88 | 
            +
                help="The sampling step for DDIM",
         | 
| 89 | 
            +
            )
         | 
| 90 | 
            +
             | 
| 91 | 
            +
            parser.add_argument(
         | 
| 92 | 
            +
                "-gs",
         | 
| 93 | 
            +
                "--guidance_scale",
         | 
| 94 | 
            +
                type=float,
         | 
| 95 | 
            +
                required=False,
         | 
| 96 | 
            +
                default=2.5,
         | 
| 97 | 
            +
                help="Guidance scale (Large => better quality and relavancy to text; Small => better diversity)",
         | 
| 98 | 
            +
            )
         | 
| 99 | 
            +
             | 
| 100 | 
            +
            parser.add_argument(
         | 
| 101 | 
            +
                "-dur",
         | 
| 102 | 
            +
                "--duration",
         | 
| 103 | 
            +
                type=float,
         | 
| 104 | 
            +
                required=False,
         | 
| 105 | 
            +
                default=10.0,
         | 
| 106 | 
            +
                help="The duration of the samples",
         | 
| 107 | 
            +
            )
         | 
| 108 | 
            +
             | 
| 109 | 
            +
            parser.add_argument(
         | 
| 110 | 
            +
                "-n",
         | 
| 111 | 
            +
                "--n_candidate_gen_per_text",
         | 
| 112 | 
            +
                type=int,
         | 
| 113 | 
            +
                required=False,
         | 
| 114 | 
            +
                default=3,
         | 
| 115 | 
            +
                help="Automatic quality control. This number control the number of candidates (e.g., generate three audios and choose the best to show you). A Larger value usually lead to better quality with heavier computation",
         | 
| 116 | 
            +
            )
         | 
| 117 | 
            +
             | 
| 118 | 
            +
            parser.add_argument(
         | 
| 119 | 
            +
                "--seed",
         | 
| 120 | 
            +
                type=int,
         | 
| 121 | 
            +
                required=False,
         | 
| 122 | 
            +
                default=42,
         | 
| 123 | 
            +
                help="Change this value (any integer number) will lead to a different generation result.",
         | 
| 124 | 
            +
            )
         | 
| 125 | 
            +
             | 
| 126 | 
            +
            args = parser.parse_args()
         | 
| 127 | 
            +
             | 
| 128 | 
            +
            if(args.ckpt_path is not None):
         | 
| 129 | 
            +
                print("Warning: ckpt_path has no effect after version 0.0.20.")
         | 
| 130 | 
            +
                
         | 
| 131 | 
            +
            assert args.duration % 2.5 == 0, "Duration must be a multiple of 2.5"
         | 
| 132 | 
            +
             | 
| 133 | 
            +
            mode = args.mode
         | 
| 134 | 
            +
            if(mode == "generation" and args.file_path is not None):
         | 
| 135 | 
            +
                mode = "generation_audio_to_audio"
         | 
| 136 | 
            +
                if(len(args.text) > 0):
         | 
| 137 | 
            +
                    print("Warning: You have specified the --file_path. --text will be ignored")
         | 
| 138 | 
            +
                    args.text = ""
         | 
| 139 | 
            +
                    
         | 
| 140 | 
            +
            save_path = os.path.join(args.save_path, mode)
         | 
| 141 | 
            +
             | 
| 142 | 
            +
            if(args.file_path is not None):
         | 
| 143 | 
            +
                save_path = os.path.join(save_path, os.path.basename(args.file_path.split(".")[0]))
         | 
| 144 | 
            +
             | 
| 145 | 
            +
            text = args.text
         | 
| 146 | 
            +
            random_seed = args.seed
         | 
| 147 | 
            +
            duration = args.duration
         | 
| 148 | 
            +
            guidance_scale = args.guidance_scale
         | 
| 149 | 
            +
            n_candidate_gen_per_text = args.n_candidate_gen_per_text
         | 
| 150 | 
            +
             | 
| 151 | 
            +
            os.makedirs(save_path, exist_ok=True)
         | 
| 152 | 
            +
            audioldm = build_model(model_name=args.model_name)
         | 
| 153 | 
            +
             | 
| 154 | 
            +
            if(args.mode == "generation"):
         | 
| 155 | 
            +
                waveform = text_to_audio(
         | 
| 156 | 
            +
                    audioldm,
         | 
| 157 | 
            +
                    text,
         | 
| 158 | 
            +
                    args.file_path,
         | 
| 159 | 
            +
                    random_seed,
         | 
| 160 | 
            +
                    duration=duration,
         | 
| 161 | 
            +
                    guidance_scale=guidance_scale,
         | 
| 162 | 
            +
                    ddim_steps=args.ddim_steps,
         | 
| 163 | 
            +
                    n_candidate_gen_per_text=n_candidate_gen_per_text,
         | 
| 164 | 
            +
                    batchsize=args.batchsize,
         | 
| 165 | 
            +
                )
         | 
| 166 | 
            +
                
         | 
| 167 | 
            +
            elif(args.mode == "transfer"):
         | 
| 168 | 
            +
                assert args.file_path is not None
         | 
| 169 | 
            +
                assert os.path.exists(args.file_path), "The original audio file \'%s\' for style transfer does not exist." % args.file_path
         | 
| 170 | 
            +
                waveform = style_transfer(
         | 
| 171 | 
            +
                    audioldm,
         | 
| 172 | 
            +
                    text,
         | 
| 173 | 
            +
                    args.file_path,
         | 
| 174 | 
            +
                    args.transfer_strength,
         | 
| 175 | 
            +
                    random_seed,
         | 
| 176 | 
            +
                    duration=duration,
         | 
| 177 | 
            +
                    guidance_scale=guidance_scale,
         | 
| 178 | 
            +
                    ddim_steps=args.ddim_steps,
         | 
| 179 | 
            +
                    batchsize=args.batchsize,
         | 
| 180 | 
            +
                )
         | 
| 181 | 
            +
                waveform = waveform[:,None,:]
         | 
| 182 | 
            +
             | 
| 183 | 
            +
            save_wave(waveform, save_path, name="%s_%s" % (get_time(), text))
         | 
    	
        audioldm/__pycache__/__init__.cpython-310.pyc
    ADDED
    
    | Binary file (317 Bytes). View file | 
|  | 
    	
        audioldm/__pycache__/ldm.cpython-310.pyc
    ADDED
    
    | Binary file (16.1 kB). View file | 
|  | 
    	
        audioldm/__pycache__/pipeline.cpython-310.pyc
    ADDED
    
    | Binary file (6.63 kB). View file | 
|  | 
    	
        audioldm/__pycache__/utils.cpython-310.pyc
    ADDED
    
    | Binary file (8.01 kB). View file | 
|  | 
    	
        audioldm/audio/__init__.py
    ADDED
    
    | @@ -0,0 +1,2 @@ | |
|  | |
|  | 
|  | |
| 1 | 
            +
            from .tools import wav_to_fbank, read_wav_file
         | 
| 2 | 
            +
            from .stft import TacotronSTFT
         | 
    	
        audioldm/audio/__pycache__/__init__.cpython-310.pyc
    ADDED
    
    | Binary file (255 Bytes). View file | 
|  | 
    	
        audioldm/audio/__pycache__/audio_processing.cpython-310.pyc
    ADDED
    
    | Binary file (2.78 kB). View file | 
|  | 
    	
        audioldm/audio/__pycache__/stft.cpython-310.pyc
    ADDED
    
    | Binary file (4.98 kB). View file | 
|  | 
    	
        audioldm/audio/__pycache__/tools.cpython-310.pyc
    ADDED
    
    | Binary file (2.19 kB). View file | 
|  | 
    	
        audioldm/audio/audio_processing.py
    ADDED
    
    | @@ -0,0 +1,100 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import torch
         | 
| 2 | 
            +
            import numpy as np
         | 
| 3 | 
            +
            import librosa.util as librosa_util
         | 
| 4 | 
            +
            from scipy.signal import get_window
         | 
| 5 | 
            +
             | 
| 6 | 
            +
             | 
| 7 | 
            +
            def window_sumsquare(
         | 
| 8 | 
            +
                window,
         | 
| 9 | 
            +
                n_frames,
         | 
| 10 | 
            +
                hop_length,
         | 
| 11 | 
            +
                win_length,
         | 
| 12 | 
            +
                n_fft,
         | 
| 13 | 
            +
                dtype=np.float32,
         | 
| 14 | 
            +
                norm=None,
         | 
| 15 | 
            +
            ):
         | 
| 16 | 
            +
                """
         | 
| 17 | 
            +
                # from librosa 0.6
         | 
| 18 | 
            +
                Compute the sum-square envelope of a window function at a given hop length.
         | 
| 19 | 
            +
             | 
| 20 | 
            +
                This is used to estimate modulation effects induced by windowing
         | 
| 21 | 
            +
                observations in short-time fourier transforms.
         | 
| 22 | 
            +
             | 
| 23 | 
            +
                Parameters
         | 
| 24 | 
            +
                ----------
         | 
| 25 | 
            +
                window : string, tuple, number, callable, or list-like
         | 
| 26 | 
            +
                    Window specification, as in `get_window`
         | 
| 27 | 
            +
             | 
| 28 | 
            +
                n_frames : int > 0
         | 
| 29 | 
            +
                    The number of analysis frames
         | 
| 30 | 
            +
             | 
| 31 | 
            +
                hop_length : int > 0
         | 
| 32 | 
            +
                    The number of samples to advance between frames
         | 
| 33 | 
            +
             | 
| 34 | 
            +
                win_length : [optional]
         | 
| 35 | 
            +
                    The length of the window function.  By default, this matches `n_fft`.
         | 
| 36 | 
            +
             | 
| 37 | 
            +
                n_fft : int > 0
         | 
| 38 | 
            +
                    The length of each analysis frame.
         | 
| 39 | 
            +
             | 
| 40 | 
            +
                dtype : np.dtype
         | 
| 41 | 
            +
                    The data type of the output
         | 
| 42 | 
            +
             | 
| 43 | 
            +
                Returns
         | 
| 44 | 
            +
                -------
         | 
| 45 | 
            +
                wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`
         | 
| 46 | 
            +
                    The sum-squared envelope of the window function
         | 
| 47 | 
            +
                """
         | 
| 48 | 
            +
                if win_length is None:
         | 
| 49 | 
            +
                    win_length = n_fft
         | 
| 50 | 
            +
             | 
| 51 | 
            +
                n = n_fft + hop_length * (n_frames - 1)
         | 
| 52 | 
            +
                x = np.zeros(n, dtype=dtype)
         | 
| 53 | 
            +
             | 
| 54 | 
            +
                # Compute the squared window at the desired length
         | 
| 55 | 
            +
                win_sq = get_window(window, win_length, fftbins=True)
         | 
| 56 | 
            +
                win_sq = librosa_util.normalize(win_sq, norm=norm) ** 2
         | 
| 57 | 
            +
                win_sq = librosa_util.pad_center(win_sq, n_fft)
         | 
| 58 | 
            +
             | 
| 59 | 
            +
                # Fill the envelope
         | 
| 60 | 
            +
                for i in range(n_frames):
         | 
| 61 | 
            +
                    sample = i * hop_length
         | 
| 62 | 
            +
                    x[sample : min(n, sample + n_fft)] += win_sq[: max(0, min(n_fft, n - sample))]
         | 
| 63 | 
            +
                return x
         | 
| 64 | 
            +
             | 
| 65 | 
            +
             | 
| 66 | 
            +
            def griffin_lim(magnitudes, stft_fn, n_iters=30):
         | 
| 67 | 
            +
                """
         | 
| 68 | 
            +
                PARAMS
         | 
| 69 | 
            +
                ------
         | 
| 70 | 
            +
                magnitudes: spectrogram magnitudes
         | 
| 71 | 
            +
                stft_fn: STFT class with transform (STFT) and inverse (ISTFT) methods
         | 
| 72 | 
            +
                """
         | 
| 73 | 
            +
             | 
| 74 | 
            +
                angles = np.angle(np.exp(2j * np.pi * np.random.rand(*magnitudes.size())))
         | 
| 75 | 
            +
                angles = angles.astype(np.float32)
         | 
| 76 | 
            +
                angles = torch.autograd.Variable(torch.from_numpy(angles))
         | 
| 77 | 
            +
                signal = stft_fn.inverse(magnitudes, angles).squeeze(1)
         | 
| 78 | 
            +
             | 
| 79 | 
            +
                for i in range(n_iters):
         | 
| 80 | 
            +
                    _, angles = stft_fn.transform(signal)
         | 
| 81 | 
            +
                    signal = stft_fn.inverse(magnitudes, angles).squeeze(1)
         | 
| 82 | 
            +
                return signal
         | 
| 83 | 
            +
             | 
| 84 | 
            +
             | 
| 85 | 
            +
            def dynamic_range_compression(x, normalize_fun=torch.log, C=1, clip_val=1e-5):
         | 
| 86 | 
            +
                """
         | 
| 87 | 
            +
                PARAMS
         | 
| 88 | 
            +
                ------
         | 
| 89 | 
            +
                C: compression factor
         | 
| 90 | 
            +
                """
         | 
| 91 | 
            +
                return normalize_fun(torch.clamp(x, min=clip_val) * C)
         | 
| 92 | 
            +
             | 
| 93 | 
            +
             | 
| 94 | 
            +
            def dynamic_range_decompression(x, C=1):
         | 
| 95 | 
            +
                """
         | 
| 96 | 
            +
                PARAMS
         | 
| 97 | 
            +
                ------
         | 
| 98 | 
            +
                C: compression factor used to compress
         | 
| 99 | 
            +
                """
         | 
| 100 | 
            +
                return torch.exp(x) / C
         | 
    	
        audioldm/audio/stft.py
    ADDED
    
    | @@ -0,0 +1,186 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import torch
         | 
| 2 | 
            +
            import torch.nn.functional as F
         | 
| 3 | 
            +
            import numpy as np
         | 
| 4 | 
            +
            from scipy.signal import get_window
         | 
| 5 | 
            +
            from librosa.util import pad_center, tiny
         | 
| 6 | 
            +
            from librosa.filters import mel as librosa_mel_fn
         | 
| 7 | 
            +
             | 
| 8 | 
            +
            from audioldm.audio.audio_processing import (
         | 
| 9 | 
            +
                dynamic_range_compression,
         | 
| 10 | 
            +
                dynamic_range_decompression,
         | 
| 11 | 
            +
                window_sumsquare,
         | 
| 12 | 
            +
            )
         | 
| 13 | 
            +
             | 
| 14 | 
            +
             | 
| 15 | 
            +
            class STFT(torch.nn.Module):
         | 
| 16 | 
            +
                """adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft"""
         | 
| 17 | 
            +
             | 
| 18 | 
            +
                def __init__(self, filter_length, hop_length, win_length, window="hann"):
         | 
| 19 | 
            +
                    super(STFT, self).__init__()
         | 
| 20 | 
            +
                    self.filter_length = filter_length
         | 
| 21 | 
            +
                    self.hop_length = hop_length
         | 
| 22 | 
            +
                    self.win_length = win_length
         | 
| 23 | 
            +
                    self.window = window
         | 
| 24 | 
            +
                    self.forward_transform = None
         | 
| 25 | 
            +
                    scale = self.filter_length / self.hop_length
         | 
| 26 | 
            +
                    fourier_basis = np.fft.fft(np.eye(self.filter_length))
         | 
| 27 | 
            +
             | 
| 28 | 
            +
                    cutoff = int((self.filter_length / 2 + 1))
         | 
| 29 | 
            +
                    fourier_basis = np.vstack(
         | 
| 30 | 
            +
                        [np.real(fourier_basis[:cutoff, :]), np.imag(fourier_basis[:cutoff, :])]
         | 
| 31 | 
            +
                    )
         | 
| 32 | 
            +
             | 
| 33 | 
            +
                    forward_basis = torch.FloatTensor(fourier_basis[:, None, :])
         | 
| 34 | 
            +
                    inverse_basis = torch.FloatTensor(
         | 
| 35 | 
            +
                        np.linalg.pinv(scale * fourier_basis).T[:, None, :]
         | 
| 36 | 
            +
                    )
         | 
| 37 | 
            +
             | 
| 38 | 
            +
                    if window is not None:
         | 
| 39 | 
            +
                        assert filter_length >= win_length
         | 
| 40 | 
            +
                        # get window and zero center pad it to filter_length
         | 
| 41 | 
            +
                        fft_window = get_window(window, win_length, fftbins=True)
         | 
| 42 | 
            +
                        fft_window = pad_center(fft_window, filter_length)
         | 
| 43 | 
            +
                        fft_window = torch.from_numpy(fft_window).float()
         | 
| 44 | 
            +
             | 
| 45 | 
            +
                        # window the bases
         | 
| 46 | 
            +
                        forward_basis *= fft_window
         | 
| 47 | 
            +
                        inverse_basis *= fft_window
         | 
| 48 | 
            +
             | 
| 49 | 
            +
                    self.register_buffer("forward_basis", forward_basis.float())
         | 
| 50 | 
            +
                    self.register_buffer("inverse_basis", inverse_basis.float())
         | 
| 51 | 
            +
             | 
| 52 | 
            +
                def transform(self, input_data):
         | 
| 53 | 
            +
                    device = self.forward_basis.device
         | 
| 54 | 
            +
                    input_data = input_data.to(device)
         | 
| 55 | 
            +
                    
         | 
| 56 | 
            +
                    num_batches = input_data.size(0)
         | 
| 57 | 
            +
                    num_samples = input_data.size(1)
         | 
| 58 | 
            +
             | 
| 59 | 
            +
                    self.num_samples = num_samples
         | 
| 60 | 
            +
             | 
| 61 | 
            +
                    # similar to librosa, reflect-pad the input
         | 
| 62 | 
            +
                    input_data = input_data.view(num_batches, 1, num_samples)
         | 
| 63 | 
            +
                    input_data = F.pad(
         | 
| 64 | 
            +
                        input_data.unsqueeze(1),
         | 
| 65 | 
            +
                        (int(self.filter_length / 2), int(self.filter_length / 2), 0, 0),
         | 
| 66 | 
            +
                        mode="reflect",
         | 
| 67 | 
            +
                    )
         | 
| 68 | 
            +
                    input_data = input_data.squeeze(1)
         | 
| 69 | 
            +
             | 
| 70 | 
            +
                    forward_transform = F.conv1d(
         | 
| 71 | 
            +
                        input_data,
         | 
| 72 | 
            +
                        torch.autograd.Variable(self.forward_basis, requires_grad=False),
         | 
| 73 | 
            +
                        stride=self.hop_length,
         | 
| 74 | 
            +
                        padding=0,
         | 
| 75 | 
            +
                    )#.cpu()
         | 
| 76 | 
            +
             | 
| 77 | 
            +
                    cutoff = int((self.filter_length / 2) + 1)
         | 
| 78 | 
            +
                    real_part = forward_transform[:, :cutoff, :]
         | 
| 79 | 
            +
                    imag_part = forward_transform[:, cutoff:, :]
         | 
| 80 | 
            +
             | 
| 81 | 
            +
                    magnitude = torch.sqrt(real_part**2 + imag_part**2)
         | 
| 82 | 
            +
                    phase = torch.autograd.Variable(torch.atan2(imag_part.data, real_part.data))
         | 
| 83 | 
            +
             | 
| 84 | 
            +
                    return magnitude, phase
         | 
| 85 | 
            +
             | 
| 86 | 
            +
                def inverse(self, magnitude, phase):
         | 
| 87 | 
            +
                    device = self.forward_basis.device
         | 
| 88 | 
            +
                    magnitude, phase = magnitude.to(device), phase.to(device)
         | 
| 89 | 
            +
                    
         | 
| 90 | 
            +
                    recombine_magnitude_phase = torch.cat(
         | 
| 91 | 
            +
                        [magnitude * torch.cos(phase), magnitude * torch.sin(phase)], dim=1
         | 
| 92 | 
            +
                    )
         | 
| 93 | 
            +
             | 
| 94 | 
            +
                    inverse_transform = F.conv_transpose1d(
         | 
| 95 | 
            +
                        recombine_magnitude_phase,
         | 
| 96 | 
            +
                        torch.autograd.Variable(self.inverse_basis, requires_grad=False),
         | 
| 97 | 
            +
                        stride=self.hop_length,
         | 
| 98 | 
            +
                        padding=0,
         | 
| 99 | 
            +
                    )
         | 
| 100 | 
            +
             | 
| 101 | 
            +
                    if self.window is not None:
         | 
| 102 | 
            +
                        window_sum = window_sumsquare(
         | 
| 103 | 
            +
                            self.window,
         | 
| 104 | 
            +
                            magnitude.size(-1),
         | 
| 105 | 
            +
                            hop_length=self.hop_length,
         | 
| 106 | 
            +
                            win_length=self.win_length,
         | 
| 107 | 
            +
                            n_fft=self.filter_length,
         | 
| 108 | 
            +
                            dtype=np.float32,
         | 
| 109 | 
            +
                        )
         | 
| 110 | 
            +
                        # remove modulation effects
         | 
| 111 | 
            +
                        approx_nonzero_indices = torch.from_numpy(
         | 
| 112 | 
            +
                            np.where(window_sum > tiny(window_sum))[0]
         | 
| 113 | 
            +
                        )
         | 
| 114 | 
            +
                        window_sum = torch.autograd.Variable(
         | 
| 115 | 
            +
                            torch.from_numpy(window_sum), requires_grad=False
         | 
| 116 | 
            +
                        )
         | 
| 117 | 
            +
                        window_sum = window_sum
         | 
| 118 | 
            +
                        inverse_transform[:, :, approx_nonzero_indices] /= window_sum[
         | 
| 119 | 
            +
                            approx_nonzero_indices
         | 
| 120 | 
            +
                        ]
         | 
| 121 | 
            +
             | 
| 122 | 
            +
                        # scale by hop ratio
         | 
| 123 | 
            +
                        inverse_transform *= float(self.filter_length) / self.hop_length
         | 
| 124 | 
            +
             | 
| 125 | 
            +
                    inverse_transform = inverse_transform[:, :, int(self.filter_length / 2) :]
         | 
| 126 | 
            +
                    inverse_transform = inverse_transform[:, :, : -int(self.filter_length / 2) :]
         | 
| 127 | 
            +
             | 
| 128 | 
            +
                    return inverse_transform
         | 
| 129 | 
            +
             | 
| 130 | 
            +
                def forward(self, input_data):
         | 
| 131 | 
            +
                    self.magnitude, self.phase = self.transform(input_data)
         | 
| 132 | 
            +
                    reconstruction = self.inverse(self.magnitude, self.phase)
         | 
| 133 | 
            +
                    return reconstruction
         | 
| 134 | 
            +
             | 
| 135 | 
            +
             | 
| 136 | 
            +
            class TacotronSTFT(torch.nn.Module):
         | 
| 137 | 
            +
                def __init__(
         | 
| 138 | 
            +
                    self,
         | 
| 139 | 
            +
                    filter_length,
         | 
| 140 | 
            +
                    hop_length,
         | 
| 141 | 
            +
                    win_length,
         | 
| 142 | 
            +
                    n_mel_channels,
         | 
| 143 | 
            +
                    sampling_rate,
         | 
| 144 | 
            +
                    mel_fmin,
         | 
| 145 | 
            +
                    mel_fmax,
         | 
| 146 | 
            +
                ):
         | 
| 147 | 
            +
                    super(TacotronSTFT, self).__init__()
         | 
| 148 | 
            +
                    self.n_mel_channels = n_mel_channels
         | 
| 149 | 
            +
                    self.sampling_rate = sampling_rate
         | 
| 150 | 
            +
                    self.stft_fn = STFT(filter_length, hop_length, win_length)
         | 
| 151 | 
            +
                    mel_basis = librosa_mel_fn(
         | 
| 152 | 
            +
                        sampling_rate, filter_length, n_mel_channels, mel_fmin, mel_fmax
         | 
| 153 | 
            +
                    )
         | 
| 154 | 
            +
                    mel_basis = torch.from_numpy(mel_basis).float()
         | 
| 155 | 
            +
                    self.register_buffer("mel_basis", mel_basis)
         | 
| 156 | 
            +
             | 
| 157 | 
            +
                def spectral_normalize(self, magnitudes, normalize_fun):
         | 
| 158 | 
            +
                    output = dynamic_range_compression(magnitudes, normalize_fun)
         | 
| 159 | 
            +
                    return output
         | 
| 160 | 
            +
             | 
| 161 | 
            +
                def spectral_de_normalize(self, magnitudes):
         | 
| 162 | 
            +
                    output = dynamic_range_decompression(magnitudes)
         | 
| 163 | 
            +
                    return output
         | 
| 164 | 
            +
             | 
| 165 | 
            +
                def mel_spectrogram(self, y, normalize_fun=torch.log):
         | 
| 166 | 
            +
                    """Computes mel-spectrograms from a batch of waves
         | 
| 167 | 
            +
                    PARAMS
         | 
| 168 | 
            +
                    ------
         | 
| 169 | 
            +
                    y: Variable(torch.FloatTensor) with shape (B, T) in range [-1, 1]
         | 
| 170 | 
            +
             | 
| 171 | 
            +
                    RETURNS
         | 
| 172 | 
            +
                    -------
         | 
| 173 | 
            +
                    mel_output: torch.FloatTensor of shape (B, n_mel_channels, T)
         | 
| 174 | 
            +
                    """
         | 
| 175 | 
            +
                    assert torch.min(y.data) >= -1, torch.min(y.data)
         | 
| 176 | 
            +
                    assert torch.max(y.data) <= 1, torch.max(y.data)
         | 
| 177 | 
            +
             | 
| 178 | 
            +
                    magnitudes, phases = self.stft_fn.transform(y)
         | 
| 179 | 
            +
                    magnitudes = magnitudes.data
         | 
| 180 | 
            +
                    mel_output = torch.matmul(self.mel_basis, magnitudes)
         | 
| 181 | 
            +
                    mel_output = self.spectral_normalize(mel_output, normalize_fun)
         | 
| 182 | 
            +
                    energy = torch.norm(magnitudes, dim=1)
         | 
| 183 | 
            +
             | 
| 184 | 
            +
                    log_magnitudes = self.spectral_normalize(magnitudes, normalize_fun)
         | 
| 185 | 
            +
             | 
| 186 | 
            +
                    return mel_output, log_magnitudes, energy
         | 
    	
        audioldm/audio/tools.py
    ADDED
    
    | @@ -0,0 +1,85 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import torch
         | 
| 2 | 
            +
            import numpy as np
         | 
| 3 | 
            +
            import torchaudio
         | 
| 4 | 
            +
             | 
| 5 | 
            +
             | 
| 6 | 
            +
            def get_mel_from_wav(audio, _stft):
         | 
| 7 | 
            +
                audio = torch.clip(torch.FloatTensor(audio).unsqueeze(0), -1, 1)
         | 
| 8 | 
            +
                audio = torch.autograd.Variable(audio, requires_grad=False)
         | 
| 9 | 
            +
                melspec, log_magnitudes_stft, energy = _stft.mel_spectrogram(audio)
         | 
| 10 | 
            +
                melspec = torch.squeeze(melspec, 0).numpy().astype(np.float32)
         | 
| 11 | 
            +
                log_magnitudes_stft = (
         | 
| 12 | 
            +
                    torch.squeeze(log_magnitudes_stft, 0).numpy().astype(np.float32)
         | 
| 13 | 
            +
                )
         | 
| 14 | 
            +
                energy = torch.squeeze(energy, 0).numpy().astype(np.float32)
         | 
| 15 | 
            +
                return melspec, log_magnitudes_stft, energy
         | 
| 16 | 
            +
             | 
| 17 | 
            +
             | 
| 18 | 
            +
            def _pad_spec(fbank, target_length=1024):
         | 
| 19 | 
            +
                n_frames = fbank.shape[0]
         | 
| 20 | 
            +
                p = target_length - n_frames
         | 
| 21 | 
            +
                # cut and pad
         | 
| 22 | 
            +
                if p > 0:
         | 
| 23 | 
            +
                    m = torch.nn.ZeroPad2d((0, 0, 0, p))
         | 
| 24 | 
            +
                    fbank = m(fbank)
         | 
| 25 | 
            +
                elif p < 0:
         | 
| 26 | 
            +
                    fbank = fbank[0:target_length, :]
         | 
| 27 | 
            +
             | 
| 28 | 
            +
                if fbank.size(-1) % 2 != 0:
         | 
| 29 | 
            +
                    fbank = fbank[..., :-1]
         | 
| 30 | 
            +
             | 
| 31 | 
            +
                return fbank
         | 
| 32 | 
            +
             | 
| 33 | 
            +
             | 
| 34 | 
            +
            def pad_wav(waveform, segment_length):
         | 
| 35 | 
            +
                waveform_length = waveform.shape[-1]
         | 
| 36 | 
            +
                assert waveform_length > 100, "Waveform is too short, %s" % waveform_length
         | 
| 37 | 
            +
                if segment_length is None or waveform_length == segment_length:
         | 
| 38 | 
            +
                    return waveform
         | 
| 39 | 
            +
                elif waveform_length > segment_length:
         | 
| 40 | 
            +
                    return waveform[:segment_length]
         | 
| 41 | 
            +
                elif waveform_length < segment_length:
         | 
| 42 | 
            +
                    temp_wav = np.zeros((1, segment_length))
         | 
| 43 | 
            +
                    temp_wav[:, :waveform_length] = waveform
         | 
| 44 | 
            +
                return temp_wav
         | 
| 45 | 
            +
             | 
| 46 | 
            +
            def normalize_wav(waveform):
         | 
| 47 | 
            +
                waveform = waveform - np.mean(waveform)
         | 
| 48 | 
            +
                waveform = waveform / (np.max(np.abs(waveform)) + 1e-8)
         | 
| 49 | 
            +
                return waveform * 0.5
         | 
| 50 | 
            +
             | 
| 51 | 
            +
             | 
| 52 | 
            +
            def read_wav_file(filename, segment_length):
         | 
| 53 | 
            +
                # waveform, sr = librosa.load(filename, sr=None, mono=True) # 4 times slower
         | 
| 54 | 
            +
                waveform, sr = torchaudio.load(filename)  # Faster!!!
         | 
| 55 | 
            +
                waveform = torchaudio.functional.resample(waveform, orig_freq=sr, new_freq=16000)
         | 
| 56 | 
            +
                waveform = waveform.numpy()[0, ...]
         | 
| 57 | 
            +
                waveform = normalize_wav(waveform)
         | 
| 58 | 
            +
                waveform = waveform[None, ...]
         | 
| 59 | 
            +
                waveform = pad_wav(waveform, segment_length)
         | 
| 60 | 
            +
                
         | 
| 61 | 
            +
                waveform = waveform / np.max(np.abs(waveform))
         | 
| 62 | 
            +
                waveform = 0.5 * waveform
         | 
| 63 | 
            +
                
         | 
| 64 | 
            +
                return waveform
         | 
| 65 | 
            +
             | 
| 66 | 
            +
             | 
| 67 | 
            +
            def wav_to_fbank(filename, target_length=1024, fn_STFT=None):
         | 
| 68 | 
            +
                assert fn_STFT is not None
         | 
| 69 | 
            +
             | 
| 70 | 
            +
                # mixup
         | 
| 71 | 
            +
                waveform = read_wav_file(filename, target_length * 160)  # hop size is 160
         | 
| 72 | 
            +
             | 
| 73 | 
            +
                waveform = waveform[0, ...]
         | 
| 74 | 
            +
                waveform = torch.FloatTensor(waveform)
         | 
| 75 | 
            +
             | 
| 76 | 
            +
                fbank, log_magnitudes_stft, energy = get_mel_from_wav(waveform, fn_STFT)
         | 
| 77 | 
            +
             | 
| 78 | 
            +
                fbank = torch.FloatTensor(fbank.T)
         | 
| 79 | 
            +
                log_magnitudes_stft = torch.FloatTensor(log_magnitudes_stft.T)
         | 
| 80 | 
            +
             | 
| 81 | 
            +
                fbank, log_magnitudes_stft = _pad_spec(fbank, target_length), _pad_spec(
         | 
| 82 | 
            +
                    log_magnitudes_stft, target_length
         | 
| 83 | 
            +
                )
         | 
| 84 | 
            +
             | 
| 85 | 
            +
                return fbank, log_magnitudes_stft, waveform
         | 
    	
        audioldm/clap/__init__.py
    ADDED
    
    | 
            File without changes
         | 
    	
        audioldm/clap/encoders.py
    ADDED
    
    | @@ -0,0 +1,170 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import torch
         | 
| 2 | 
            +
            import torch.nn as nn
         | 
| 3 | 
            +
            from audioldm.clap.open_clip import create_model
         | 
| 4 | 
            +
            from audioldm.clap.training.data import get_audio_features
         | 
| 5 | 
            +
            import torchaudio
         | 
| 6 | 
            +
            from transformers import RobertaTokenizer
         | 
| 7 | 
            +
            import torch.nn.functional as F
         | 
| 8 | 
            +
             | 
| 9 | 
            +
             | 
| 10 | 
            +
            class CLAPAudioEmbeddingClassifierFreev2(nn.Module):
         | 
| 11 | 
            +
                def __init__(
         | 
| 12 | 
            +
                    self,
         | 
| 13 | 
            +
                    pretrained_path="",
         | 
| 14 | 
            +
                    key="class",
         | 
| 15 | 
            +
                    sampling_rate=16000,
         | 
| 16 | 
            +
                    embed_mode="audio",
         | 
| 17 | 
            +
                    amodel = "HTSAT-tiny",
         | 
| 18 | 
            +
                    unconditional_prob=0.1,
         | 
| 19 | 
            +
                    random_mute=False,
         | 
| 20 | 
            +
                    max_random_mute_portion=0.5,
         | 
| 21 | 
            +
                    training_mode=True,
         | 
| 22 | 
            +
                ):
         | 
| 23 | 
            +
                    super().__init__()
         | 
| 24 | 
            +
             | 
| 25 | 
            +
                    self.key = key
         | 
| 26 | 
            +
                    self.device = "cpu"
         | 
| 27 | 
            +
                    self.precision = "fp32"
         | 
| 28 | 
            +
                    self.amodel = amodel  # or 'PANN-14'
         | 
| 29 | 
            +
                    self.tmodel = "roberta"  # the best text encoder in our training
         | 
| 30 | 
            +
                    self.enable_fusion = False  # False if you do not want to use the fusion model
         | 
| 31 | 
            +
                    self.fusion_type = "aff_2d"
         | 
| 32 | 
            +
                    self.pretrained = pretrained_path
         | 
| 33 | 
            +
                    self.embed_mode = embed_mode
         | 
| 34 | 
            +
                    self.embed_mode_orig = embed_mode
         | 
| 35 | 
            +
                    self.sampling_rate = sampling_rate
         | 
| 36 | 
            +
                    self.unconditional_prob = unconditional_prob
         | 
| 37 | 
            +
                    self.random_mute = random_mute
         | 
| 38 | 
            +
                    self.tokenize = RobertaTokenizer.from_pretrained("roberta-base")
         | 
| 39 | 
            +
                    self.max_random_mute_portion = max_random_mute_portion
         | 
| 40 | 
            +
                    self.training_mode = training_mode
         | 
| 41 | 
            +
                    self.model, self.model_cfg = create_model(
         | 
| 42 | 
            +
                        self.amodel,
         | 
| 43 | 
            +
                        self.tmodel,
         | 
| 44 | 
            +
                        self.pretrained,
         | 
| 45 | 
            +
                        precision=self.precision,
         | 
| 46 | 
            +
                        device=self.device,
         | 
| 47 | 
            +
                        enable_fusion=self.enable_fusion,
         | 
| 48 | 
            +
                        fusion_type=self.fusion_type,
         | 
| 49 | 
            +
                    )
         | 
| 50 | 
            +
                    for p in self.model.parameters():
         | 
| 51 | 
            +
                        p.requires_grad = False
         | 
| 52 | 
            +
             | 
| 53 | 
            +
                    self.model.eval()
         | 
| 54 | 
            +
             | 
| 55 | 
            +
                def get_unconditional_condition(self, batchsize):
         | 
| 56 | 
            +
                    self.unconditional_token = self.model.get_text_embedding(
         | 
| 57 | 
            +
                        self.tokenizer(["", ""])
         | 
| 58 | 
            +
                    )[0:1]
         | 
| 59 | 
            +
                    return torch.cat([self.unconditional_token.unsqueeze(0)] * batchsize, dim=0)
         | 
| 60 | 
            +
             | 
| 61 | 
            +
                def batch_to_list(self, batch):
         | 
| 62 | 
            +
                    ret = []
         | 
| 63 | 
            +
                    for i in range(batch.size(0)):
         | 
| 64 | 
            +
                        ret.append(batch[i])
         | 
| 65 | 
            +
                    return ret
         | 
| 66 | 
            +
             | 
| 67 | 
            +
                def make_decision(self, probability):
         | 
| 68 | 
            +
                    if float(torch.rand(1)) < probability:
         | 
| 69 | 
            +
                        return True
         | 
| 70 | 
            +
                    else:
         | 
| 71 | 
            +
                        return False
         | 
| 72 | 
            +
             | 
| 73 | 
            +
                def random_uniform(self, start, end):
         | 
| 74 | 
            +
                    val = torch.rand(1).item()
         | 
| 75 | 
            +
                    return start + (end - start) * val
         | 
| 76 | 
            +
             | 
| 77 | 
            +
                def _random_mute(self, waveform):
         | 
| 78 | 
            +
                    # waveform: [bs, t-steps]
         | 
| 79 | 
            +
                    t_steps = waveform.size(-1)
         | 
| 80 | 
            +
                    for i in range(waveform.size(0)):
         | 
| 81 | 
            +
                        mute_size = int(
         | 
| 82 | 
            +
                            self.random_uniform(0, end=int(t_steps * self.max_random_mute_portion))
         | 
| 83 | 
            +
                        )
         | 
| 84 | 
            +
                        mute_start = int(self.random_uniform(0, t_steps - mute_size))
         | 
| 85 | 
            +
                        waveform[i, mute_start : mute_start + mute_size] = 0
         | 
| 86 | 
            +
                    return waveform
         | 
| 87 | 
            +
             | 
| 88 | 
            +
                def cos_similarity(self, waveform, text):
         | 
| 89 | 
            +
                    # waveform: [bs, t_steps]
         | 
| 90 | 
            +
                    with torch.no_grad():
         | 
| 91 | 
            +
                        self.embed_mode = "audio"
         | 
| 92 | 
            +
                        audio_emb = self(waveform.cuda())
         | 
| 93 | 
            +
                        self.embed_mode = "text"
         | 
| 94 | 
            +
                        text_emb = self(text)
         | 
| 95 | 
            +
                        similarity = F.cosine_similarity(audio_emb, text_emb, dim=2), audio_emb, text_emb
         | 
| 96 | 
            +
                        return similarity.squeeze()
         | 
| 97 | 
            +
             | 
| 98 | 
            +
                def forward(self, batch, key=None):
         | 
| 99 | 
            +
                    # If you want this conditioner to be unconditional, set self.unconditional_prob = 1.0
         | 
| 100 | 
            +
                    # If you want this conditioner to be fully conditional, set self.unconditional_prob = 0.0
         | 
| 101 | 
            +
                    if self.model.training == True and not self.training_mode:
         | 
| 102 | 
            +
                        print(
         | 
| 103 | 
            +
                            "The pretrained CLAP model should always be in eval mode. Reloading model just in case you change the parameters."
         | 
| 104 | 
            +
                        )
         | 
| 105 | 
            +
                        self.model, self.model_cfg = create_model(
         | 
| 106 | 
            +
                            self.amodel,
         | 
| 107 | 
            +
                            self.tmodel,
         | 
| 108 | 
            +
                            self.pretrained,
         | 
| 109 | 
            +
                            precision=self.precision,
         | 
| 110 | 
            +
                            device="cuda",
         | 
| 111 | 
            +
                            enable_fusion=self.enable_fusion,
         | 
| 112 | 
            +
                            fusion_type=self.fusion_type,
         | 
| 113 | 
            +
                        )
         | 
| 114 | 
            +
                        for p in self.model.parameters():
         | 
| 115 | 
            +
                            p.requires_grad = False
         | 
| 116 | 
            +
                        self.model.eval()
         | 
| 117 | 
            +
             | 
| 118 | 
            +
                    # the 'fusion' truncate mode can be changed to 'rand_trunc' if run in unfusion mode
         | 
| 119 | 
            +
                    if self.embed_mode == "audio":
         | 
| 120 | 
            +
                        with torch.no_grad():
         | 
| 121 | 
            +
                            audio_dict_list = []
         | 
| 122 | 
            +
                            assert (
         | 
| 123 | 
            +
                                self.sampling_rate == 16000
         | 
| 124 | 
            +
                            ), "We only support 16000 sampling rate"
         | 
| 125 | 
            +
                            if self.random_mute:
         | 
| 126 | 
            +
                                batch = self._random_mute(batch)
         | 
| 127 | 
            +
                            # batch: [bs, 1, t-samples]
         | 
| 128 | 
            +
                            batch = torchaudio.functional.resample(
         | 
| 129 | 
            +
                                batch, orig_freq=self.sampling_rate, new_freq=48000
         | 
| 130 | 
            +
                            )
         | 
| 131 | 
            +
                            for waveform in self.batch_to_list(batch):
         | 
| 132 | 
            +
                                audio_dict = {}
         | 
| 133 | 
            +
                                audio_dict = get_audio_features(
         | 
| 134 | 
            +
                                    audio_dict,
         | 
| 135 | 
            +
                                    waveform,
         | 
| 136 | 
            +
                                    480000,
         | 
| 137 | 
            +
                                    data_truncating="fusion",
         | 
| 138 | 
            +
                                    data_filling="repeatpad",
         | 
| 139 | 
            +
                                    audio_cfg=self.model_cfg["audio_cfg"],
         | 
| 140 | 
            +
                                )
         | 
| 141 | 
            +
                                audio_dict_list.append(audio_dict)
         | 
| 142 | 
            +
                            # [bs, 512]
         | 
| 143 | 
            +
                            embed = self.model.get_audio_embedding(audio_dict_list)
         | 
| 144 | 
            +
                    elif self.embed_mode == "text":
         | 
| 145 | 
            +
                        with torch.no_grad():
         | 
| 146 | 
            +
                            # the 'fusion' truncate mode can be changed to 'rand_trunc' if run in unfusion mode
         | 
| 147 | 
            +
                            text_data = self.tokenizer(batch)
         | 
| 148 | 
            +
                            embed = self.model.get_text_embedding(text_data)
         | 
| 149 | 
            +
             | 
| 150 | 
            +
                    embed = embed.unsqueeze(1)
         | 
| 151 | 
            +
                    self.unconditional_token = self.model.get_text_embedding(
         | 
| 152 | 
            +
                        self.tokenizer(["", ""])
         | 
| 153 | 
            +
                    )[0:1]
         | 
| 154 | 
            +
             | 
| 155 | 
            +
                    for i in range(embed.size(0)):
         | 
| 156 | 
            +
                        if self.make_decision(self.unconditional_prob):
         | 
| 157 | 
            +
                            embed[i] = self.unconditional_token
         | 
| 158 | 
            +
             | 
| 159 | 
            +
                    # [bs, 1, 512]
         | 
| 160 | 
            +
                    return embed.detach()
         | 
| 161 | 
            +
             | 
| 162 | 
            +
                def tokenizer(self, text):
         | 
| 163 | 
            +
                    result = self.tokenize(
         | 
| 164 | 
            +
                        text,
         | 
| 165 | 
            +
                        padding="max_length",
         | 
| 166 | 
            +
                        truncation=True,
         | 
| 167 | 
            +
                        max_length=512,
         | 
| 168 | 
            +
                        return_tensors="pt",
         | 
| 169 | 
            +
                    )
         | 
| 170 | 
            +
                    return {k: v.squeeze(0) for k, v in result.items()}
         | 
    	
        audioldm/clap/open_clip/__init__.py
    ADDED
    
    | @@ -0,0 +1,25 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            from .factory import (
         | 
| 2 | 
            +
                list_models,
         | 
| 3 | 
            +
                create_model,
         | 
| 4 | 
            +
                create_model_and_transforms,
         | 
| 5 | 
            +
                add_model_config,
         | 
| 6 | 
            +
            )
         | 
| 7 | 
            +
            from .loss import ClipLoss, gather_features, LPLoss, lp_gather_features, LPMetrics
         | 
| 8 | 
            +
            from .model import (
         | 
| 9 | 
            +
                CLAP,
         | 
| 10 | 
            +
                CLAPTextCfg,
         | 
| 11 | 
            +
                CLAPVisionCfg,
         | 
| 12 | 
            +
                CLAPAudioCfp,
         | 
| 13 | 
            +
                convert_weights_to_fp16,
         | 
| 14 | 
            +
                trace_model,
         | 
| 15 | 
            +
            )
         | 
| 16 | 
            +
            from .openai import load_openai_model, list_openai_models
         | 
| 17 | 
            +
            from .pretrained import (
         | 
| 18 | 
            +
                list_pretrained,
         | 
| 19 | 
            +
                list_pretrained_tag_models,
         | 
| 20 | 
            +
                list_pretrained_model_tags,
         | 
| 21 | 
            +
                get_pretrained_url,
         | 
| 22 | 
            +
                download_pretrained,
         | 
| 23 | 
            +
            )
         | 
| 24 | 
            +
            from .tokenizer import SimpleTokenizer, tokenize
         | 
| 25 | 
            +
            from .transform import image_transform
         | 
    	
        audioldm/clap/open_clip/bert.py
    ADDED
    
    | @@ -0,0 +1,40 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            from transformers import BertTokenizer, BertModel
         | 
| 2 | 
            +
             | 
| 3 | 
            +
            tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
         | 
| 4 | 
            +
            model = BertModel.from_pretrained("bert-base-uncased")
         | 
| 5 | 
            +
            text = "Replace me by any text you'd like."
         | 
| 6 | 
            +
             | 
| 7 | 
            +
             | 
| 8 | 
            +
            def bert_embeddings(text):
         | 
| 9 | 
            +
                # text = "Replace me by any text you'd like."
         | 
| 10 | 
            +
                encoded_input = tokenizer(text, return_tensors="pt")
         | 
| 11 | 
            +
                output = model(**encoded_input)
         | 
| 12 | 
            +
                return output
         | 
| 13 | 
            +
             | 
| 14 | 
            +
             | 
| 15 | 
            +
            from transformers import RobertaTokenizer, RobertaModel
         | 
| 16 | 
            +
             | 
| 17 | 
            +
            tokenizer = RobertaTokenizer.from_pretrained("roberta-base")
         | 
| 18 | 
            +
            model = RobertaModel.from_pretrained("roberta-base")
         | 
| 19 | 
            +
            text = "Replace me by any text you'd like."
         | 
| 20 | 
            +
             | 
| 21 | 
            +
             | 
| 22 | 
            +
            def Roberta_embeddings(text):
         | 
| 23 | 
            +
                # text = "Replace me by any text you'd like."
         | 
| 24 | 
            +
                encoded_input = tokenizer(text, return_tensors="pt")
         | 
| 25 | 
            +
                output = model(**encoded_input)
         | 
| 26 | 
            +
                return output
         | 
| 27 | 
            +
             | 
| 28 | 
            +
             | 
| 29 | 
            +
            from transformers import BartTokenizer, BartModel
         | 
| 30 | 
            +
             | 
| 31 | 
            +
            tokenizer = BartTokenizer.from_pretrained("facebook/bart-base")
         | 
| 32 | 
            +
            model = BartModel.from_pretrained("facebook/bart-base")
         | 
| 33 | 
            +
            text = "Replace me by any text you'd like."
         | 
| 34 | 
            +
             | 
| 35 | 
            +
             | 
| 36 | 
            +
            def bart_embeddings(text):
         | 
| 37 | 
            +
                # text = "Replace me by any text you'd like."
         | 
| 38 | 
            +
                encoded_input = tokenizer(text, return_tensors="pt")
         | 
| 39 | 
            +
                output = model(**encoded_input)
         | 
| 40 | 
            +
                return output
         | 
    	
        audioldm/clap/open_clip/bpe_simple_vocab_16e6.txt.gz
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:924691ac288e54409236115652ad4aa250f48203de50a9e4722a6ecd48d6804a
         | 
| 3 | 
            +
            size 1356917
         | 
    	
        audioldm/clap/open_clip/factory.py
    ADDED
    
    | @@ -0,0 +1,279 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import json
         | 
| 2 | 
            +
            import logging
         | 
| 3 | 
            +
            import os
         | 
| 4 | 
            +
            import pathlib
         | 
| 5 | 
            +
            import re
         | 
| 6 | 
            +
            from copy import deepcopy
         | 
| 7 | 
            +
            from pathlib import Path
         | 
| 8 | 
            +
             | 
| 9 | 
            +
            import torch
         | 
| 10 | 
            +
             | 
| 11 | 
            +
            from .model import CLAP, convert_weights_to_fp16
         | 
| 12 | 
            +
            from .openai import load_openai_model
         | 
| 13 | 
            +
            from .pretrained import get_pretrained_url, download_pretrained
         | 
| 14 | 
            +
            from .transform import image_transform
         | 
| 15 | 
            +
             | 
| 16 | 
            +
            _MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"]
         | 
| 17 | 
            +
            _MODEL_CONFIGS = {}  # directory (model_name: config) of model architecture configs
         | 
| 18 | 
            +
            CACHE_DIR = os.getenv("AUDIOLDM_CACHE_DIR", "~/.cache/audioldm")
         | 
| 19 | 
            +
             | 
| 20 | 
            +
             | 
| 21 | 
            +
             | 
| 22 | 
            +
            def _natural_key(string_):
         | 
| 23 | 
            +
                return [int(s) if s.isdigit() else s for s in re.split(r"(\d+)", string_.lower())]
         | 
| 24 | 
            +
             | 
| 25 | 
            +
             | 
| 26 | 
            +
            def _rescan_model_configs():
         | 
| 27 | 
            +
                global _MODEL_CONFIGS
         | 
| 28 | 
            +
             | 
| 29 | 
            +
                config_ext = (".json",)
         | 
| 30 | 
            +
                config_files = []
         | 
| 31 | 
            +
                for config_path in _MODEL_CONFIG_PATHS:
         | 
| 32 | 
            +
                    if config_path.is_file() and config_path.suffix in config_ext:
         | 
| 33 | 
            +
                        config_files.append(config_path)
         | 
| 34 | 
            +
                    elif config_path.is_dir():
         | 
| 35 | 
            +
                        for ext in config_ext:
         | 
| 36 | 
            +
                            config_files.extend(config_path.glob(f"*{ext}"))
         | 
| 37 | 
            +
             | 
| 38 | 
            +
                for cf in config_files:
         | 
| 39 | 
            +
                    if os.path.basename(cf)[0] == ".":
         | 
| 40 | 
            +
                        continue  # Ignore hidden files
         | 
| 41 | 
            +
             | 
| 42 | 
            +
                    with open(cf, "r") as f:
         | 
| 43 | 
            +
                        model_cfg = json.load(f)
         | 
| 44 | 
            +
                        if all(a in model_cfg for a in ("embed_dim", "audio_cfg", "text_cfg")):
         | 
| 45 | 
            +
                            _MODEL_CONFIGS[cf.stem] = model_cfg
         | 
| 46 | 
            +
             | 
| 47 | 
            +
                _MODEL_CONFIGS = {
         | 
| 48 | 
            +
                    k: v
         | 
| 49 | 
            +
                    for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))
         | 
| 50 | 
            +
                }
         | 
| 51 | 
            +
             | 
| 52 | 
            +
             | 
| 53 | 
            +
            _rescan_model_configs()  # initial populate of model config registry
         | 
| 54 | 
            +
             | 
| 55 | 
            +
             | 
| 56 | 
            +
            def load_state_dict(checkpoint_path: str, map_location="cpu", skip_params=True):
         | 
| 57 | 
            +
                checkpoint = torch.load(checkpoint_path, map_location=map_location)
         | 
| 58 | 
            +
                if isinstance(checkpoint, dict) and "state_dict" in checkpoint:
         | 
| 59 | 
            +
                    state_dict = checkpoint["state_dict"]
         | 
| 60 | 
            +
                else:
         | 
| 61 | 
            +
                    state_dict = checkpoint
         | 
| 62 | 
            +
                if skip_params:
         | 
| 63 | 
            +
                    if next(iter(state_dict.items()))[0].startswith("module"):
         | 
| 64 | 
            +
                        state_dict = {k[7:]: v for k, v in state_dict.items()}
         | 
| 65 | 
            +
                # for k in state_dict:
         | 
| 66 | 
            +
                #     if k.startswith('transformer'):
         | 
| 67 | 
            +
                #         v = state_dict.pop(k)
         | 
| 68 | 
            +
                #         state_dict['text_branch.' + k[12:]] = v
         | 
| 69 | 
            +
                return state_dict
         | 
| 70 | 
            +
             | 
| 71 | 
            +
             | 
| 72 | 
            +
            def create_model(
         | 
| 73 | 
            +
                amodel_name: str,
         | 
| 74 | 
            +
                tmodel_name: str,
         | 
| 75 | 
            +
                pretrained: str = "",
         | 
| 76 | 
            +
                precision: str = "fp32",
         | 
| 77 | 
            +
                device: torch.device = torch.device("cpu"),
         | 
| 78 | 
            +
                jit: bool = False,
         | 
| 79 | 
            +
                force_quick_gelu: bool = False,
         | 
| 80 | 
            +
                openai_model_cache_dir: str = os.path.expanduser(f"{CACHE_DIR}/clip"),
         | 
| 81 | 
            +
                skip_params=True,
         | 
| 82 | 
            +
                pretrained_audio: str = "",
         | 
| 83 | 
            +
                pretrained_text: str = "",
         | 
| 84 | 
            +
                enable_fusion: bool = False,
         | 
| 85 | 
            +
                fusion_type: str = "None"
         | 
| 86 | 
            +
                # pretrained_image: bool = False,
         | 
| 87 | 
            +
            ):
         | 
| 88 | 
            +
                amodel_name = amodel_name.replace(
         | 
| 89 | 
            +
                    "/", "-"
         | 
| 90 | 
            +
                )  # for callers using old naming with / in ViT names
         | 
| 91 | 
            +
                pretrained_orig = pretrained
         | 
| 92 | 
            +
                pretrained = pretrained.lower()
         | 
| 93 | 
            +
                if pretrained == "openai":
         | 
| 94 | 
            +
                    if amodel_name in _MODEL_CONFIGS:
         | 
| 95 | 
            +
                        logging.info(f"Loading {amodel_name} model config.")
         | 
| 96 | 
            +
                        model_cfg = deepcopy(_MODEL_CONFIGS[amodel_name])
         | 
| 97 | 
            +
                    else:
         | 
| 98 | 
            +
                        logging.error(
         | 
| 99 | 
            +
                            f"Model config for {amodel_name} not found; available models {list_models()}."
         | 
| 100 | 
            +
                        )
         | 
| 101 | 
            +
                        raise RuntimeError(f"Model config for {amodel_name} not found.")
         | 
| 102 | 
            +
             | 
| 103 | 
            +
                    logging.info(f"Loading pretrained ViT-B-16 text encoder from OpenAI.")
         | 
| 104 | 
            +
                    # Hard Code in model name
         | 
| 105 | 
            +
                    model_cfg["text_cfg"]["model_type"] = tmodel_name
         | 
| 106 | 
            +
                    model = load_openai_model(
         | 
| 107 | 
            +
                        "ViT-B-16",
         | 
| 108 | 
            +
                        model_cfg,
         | 
| 109 | 
            +
                        device=device,
         | 
| 110 | 
            +
                        jit=jit,
         | 
| 111 | 
            +
                        cache_dir=openai_model_cache_dir,
         | 
| 112 | 
            +
                        enable_fusion=enable_fusion,
         | 
| 113 | 
            +
                        fusion_type=fusion_type,
         | 
| 114 | 
            +
                    )
         | 
| 115 | 
            +
                    # See https://discuss.pytorch.org/t/valueerror-attemting-to-unscale-fp16-gradients/81372
         | 
| 116 | 
            +
                    if precision == "amp" or precision == "fp32":
         | 
| 117 | 
            +
                        model = model.float()
         | 
| 118 | 
            +
                else:
         | 
| 119 | 
            +
                    if amodel_name in _MODEL_CONFIGS:
         | 
| 120 | 
            +
                        logging.info(f"Loading {amodel_name} model config.")
         | 
| 121 | 
            +
                        model_cfg = deepcopy(_MODEL_CONFIGS[amodel_name])
         | 
| 122 | 
            +
                    else:
         | 
| 123 | 
            +
                        logging.error(
         | 
| 124 | 
            +
                            f"Model config for {amodel_name} not found; available models {list_models()}."
         | 
| 125 | 
            +
                        )
         | 
| 126 | 
            +
                        raise RuntimeError(f"Model config for {amodel_name} not found.")
         | 
| 127 | 
            +
             | 
| 128 | 
            +
                    if force_quick_gelu:
         | 
| 129 | 
            +
                        # override for use of QuickGELU on non-OpenAI transformer models
         | 
| 130 | 
            +
                        model_cfg["quick_gelu"] = True
         | 
| 131 | 
            +
             | 
| 132 | 
            +
                    # if pretrained_image:
         | 
| 133 | 
            +
                    #     if 'timm_amodel_name' in model_cfg.get('vision_cfg', {}):
         | 
| 134 | 
            +
                    #         # pretrained weight loading for timm models set via vision_cfg
         | 
| 135 | 
            +
                    #         model_cfg['vision_cfg']['timm_model_pretrained'] = True
         | 
| 136 | 
            +
                    #     else:
         | 
| 137 | 
            +
                    #         assert False, 'pretrained image towers currently only supported for timm models'
         | 
| 138 | 
            +
                    model_cfg["text_cfg"]["model_type"] = tmodel_name
         | 
| 139 | 
            +
                    model_cfg["enable_fusion"] = enable_fusion
         | 
| 140 | 
            +
                    model_cfg["fusion_type"] = fusion_type
         | 
| 141 | 
            +
                    model = CLAP(**model_cfg)
         | 
| 142 | 
            +
             | 
| 143 | 
            +
                    if pretrained:
         | 
| 144 | 
            +
                        checkpoint_path = ""
         | 
| 145 | 
            +
                        url = get_pretrained_url(amodel_name, pretrained)
         | 
| 146 | 
            +
                        if url:
         | 
| 147 | 
            +
                            checkpoint_path = download_pretrained(url, root=openai_model_cache_dir)
         | 
| 148 | 
            +
                        elif os.path.exists(pretrained_orig):
         | 
| 149 | 
            +
                            checkpoint_path = pretrained_orig
         | 
| 150 | 
            +
                        if checkpoint_path:
         | 
| 151 | 
            +
                            logging.info(
         | 
| 152 | 
            +
                                f"Loading pretrained {amodel_name}-{tmodel_name} weights ({pretrained})."
         | 
| 153 | 
            +
                            )
         | 
| 154 | 
            +
                            ckpt = load_state_dict(checkpoint_path, skip_params=True)
         | 
| 155 | 
            +
                            model.load_state_dict(ckpt)
         | 
| 156 | 
            +
                            param_names = [n for n, p in model.named_parameters()]
         | 
| 157 | 
            +
                            # for n in param_names:
         | 
| 158 | 
            +
                            #     print(n, "\t", "Loaded" if n in ckpt else "Unloaded")
         | 
| 159 | 
            +
                        else:
         | 
| 160 | 
            +
                            logging.warning(
         | 
| 161 | 
            +
                                f"Pretrained weights ({pretrained}) not found for model {amodel_name}."
         | 
| 162 | 
            +
                            )
         | 
| 163 | 
            +
                            raise RuntimeError(
         | 
| 164 | 
            +
                                f"Pretrained weights ({pretrained}) not found for model {amodel_name}."
         | 
| 165 | 
            +
                            )
         | 
| 166 | 
            +
             | 
| 167 | 
            +
                    if pretrained_audio:
         | 
| 168 | 
            +
                        if amodel_name.startswith("PANN"):
         | 
| 169 | 
            +
                            if "Cnn14_mAP" in pretrained_audio:  # official checkpoint
         | 
| 170 | 
            +
                                audio_ckpt = torch.load(pretrained_audio, map_location="cpu")
         | 
| 171 | 
            +
                                audio_ckpt = audio_ckpt["model"]
         | 
| 172 | 
            +
                                keys = list(audio_ckpt.keys())
         | 
| 173 | 
            +
                                for key in keys:
         | 
| 174 | 
            +
                                    if (
         | 
| 175 | 
            +
                                        "spectrogram_extractor" not in key
         | 
| 176 | 
            +
                                        and "logmel_extractor" not in key
         | 
| 177 | 
            +
                                    ):
         | 
| 178 | 
            +
                                        v = audio_ckpt.pop(key)
         | 
| 179 | 
            +
                                        audio_ckpt["audio_branch." + key] = v
         | 
| 180 | 
            +
                            elif os.path.basename(pretrained_audio).startswith(
         | 
| 181 | 
            +
                                "PANN"
         | 
| 182 | 
            +
                            ):  # checkpoint trained via HTSAT codebase
         | 
| 183 | 
            +
                                audio_ckpt = torch.load(pretrained_audio, map_location="cpu")
         | 
| 184 | 
            +
                                audio_ckpt = audio_ckpt["state_dict"]
         | 
| 185 | 
            +
                                keys = list(audio_ckpt.keys())
         | 
| 186 | 
            +
                                for key in keys:
         | 
| 187 | 
            +
                                    if key.startswith("sed_model"):
         | 
| 188 | 
            +
                                        v = audio_ckpt.pop(key)
         | 
| 189 | 
            +
                                        audio_ckpt["audio_branch." + key[10:]] = v
         | 
| 190 | 
            +
                            elif os.path.basename(pretrained_audio).startswith(
         | 
| 191 | 
            +
                                "finetuned"
         | 
| 192 | 
            +
                            ):  # checkpoint trained via linear probe codebase
         | 
| 193 | 
            +
                                audio_ckpt = torch.load(pretrained_audio, map_location="cpu")
         | 
| 194 | 
            +
                            else:
         | 
| 195 | 
            +
                                raise ValueError("Unknown audio checkpoint")
         | 
| 196 | 
            +
                        elif amodel_name.startswith("HTSAT"):
         | 
| 197 | 
            +
                            if "HTSAT_AudioSet_Saved" in pretrained_audio:  # official checkpoint
         | 
| 198 | 
            +
                                audio_ckpt = torch.load(pretrained_audio, map_location="cpu")
         | 
| 199 | 
            +
                                audio_ckpt = audio_ckpt["state_dict"]
         | 
| 200 | 
            +
                                keys = list(audio_ckpt.keys())
         | 
| 201 | 
            +
                                for key in keys:
         | 
| 202 | 
            +
                                    if key.startswith("sed_model") and (
         | 
| 203 | 
            +
                                        "spectrogram_extractor" not in key
         | 
| 204 | 
            +
                                        and "logmel_extractor" not in key
         | 
| 205 | 
            +
                                    ):
         | 
| 206 | 
            +
                                        v = audio_ckpt.pop(key)
         | 
| 207 | 
            +
                                        audio_ckpt["audio_branch." + key[10:]] = v
         | 
| 208 | 
            +
                            elif os.path.basename(pretrained_audio).startswith(
         | 
| 209 | 
            +
                                "HTSAT"
         | 
| 210 | 
            +
                            ):  # checkpoint trained via HTSAT codebase
         | 
| 211 | 
            +
                                audio_ckpt = torch.load(pretrained_audio, map_location="cpu")
         | 
| 212 | 
            +
                                audio_ckpt = audio_ckpt["state_dict"]
         | 
| 213 | 
            +
                                keys = list(audio_ckpt.keys())
         | 
| 214 | 
            +
                                for key in keys:
         | 
| 215 | 
            +
                                    if key.startswith("sed_model"):
         | 
| 216 | 
            +
                                        v = audio_ckpt.pop(key)
         | 
| 217 | 
            +
                                        audio_ckpt["audio_branch." + key[10:]] = v
         | 
| 218 | 
            +
                            elif os.path.basename(pretrained_audio).startswith(
         | 
| 219 | 
            +
                                "finetuned"
         | 
| 220 | 
            +
                            ):  # checkpoint trained via linear probe codebase
         | 
| 221 | 
            +
                                audio_ckpt = torch.load(pretrained_audio, map_location="cpu")
         | 
| 222 | 
            +
                            else:
         | 
| 223 | 
            +
                                raise ValueError("Unknown audio checkpoint")
         | 
| 224 | 
            +
                        else:
         | 
| 225 | 
            +
                            raise f"this audio encoder pretrained checkpoint is not support"
         | 
| 226 | 
            +
             | 
| 227 | 
            +
                        model.load_state_dict(audio_ckpt, strict=False)
         | 
| 228 | 
            +
                        logging.info(
         | 
| 229 | 
            +
                            f"Loading pretrained {amodel_name} weights ({pretrained_audio})."
         | 
| 230 | 
            +
                        )
         | 
| 231 | 
            +
                        param_names = [n for n, p in model.named_parameters()]
         | 
| 232 | 
            +
                        for n in param_names:
         | 
| 233 | 
            +
                            print(n, "\t", "Loaded" if n in audio_ckpt else "Unloaded")
         | 
| 234 | 
            +
             | 
| 235 | 
            +
                    model.to(device=device)
         | 
| 236 | 
            +
                    if precision == "fp16":
         | 
| 237 | 
            +
                        assert device.type != "cpu"
         | 
| 238 | 
            +
                        convert_weights_to_fp16(model)
         | 
| 239 | 
            +
             | 
| 240 | 
            +
                    if jit:
         | 
| 241 | 
            +
                        model = torch.jit.script(model)
         | 
| 242 | 
            +
             | 
| 243 | 
            +
                return model, model_cfg
         | 
| 244 | 
            +
             | 
| 245 | 
            +
             | 
| 246 | 
            +
            def create_model_and_transforms(
         | 
| 247 | 
            +
                model_name: str,
         | 
| 248 | 
            +
                pretrained: str = "",
         | 
| 249 | 
            +
                precision: str = "fp32",
         | 
| 250 | 
            +
                device: torch.device = torch.device("cpu"),
         | 
| 251 | 
            +
                jit: bool = False,
         | 
| 252 | 
            +
                force_quick_gelu: bool = False,
         | 
| 253 | 
            +
                # pretrained_image: bool = False,
         | 
| 254 | 
            +
            ):
         | 
| 255 | 
            +
                model = create_model(
         | 
| 256 | 
            +
                    model_name,
         | 
| 257 | 
            +
                    pretrained,
         | 
| 258 | 
            +
                    precision,
         | 
| 259 | 
            +
                    device,
         | 
| 260 | 
            +
                    jit,
         | 
| 261 | 
            +
                    force_quick_gelu=force_quick_gelu,
         | 
| 262 | 
            +
                    # pretrained_image=pretrained_image
         | 
| 263 | 
            +
                )
         | 
| 264 | 
            +
                preprocess_train = image_transform(model.visual.image_size, is_train=True)
         | 
| 265 | 
            +
                preprocess_val = image_transform(model.visual.image_size, is_train=False)
         | 
| 266 | 
            +
                return model, preprocess_train, preprocess_val
         | 
| 267 | 
            +
             | 
| 268 | 
            +
             | 
| 269 | 
            +
            def list_models():
         | 
| 270 | 
            +
                """enumerate available model architectures based on config files"""
         | 
| 271 | 
            +
                return list(_MODEL_CONFIGS.keys())
         | 
| 272 | 
            +
             | 
| 273 | 
            +
             | 
| 274 | 
            +
            def add_model_config(path):
         | 
| 275 | 
            +
                """add model config path or file and update registry"""
         | 
| 276 | 
            +
                if not isinstance(path, Path):
         | 
| 277 | 
            +
                    path = Path(path)
         | 
| 278 | 
            +
                _MODEL_CONFIG_PATHS.append(path)
         | 
| 279 | 
            +
                _rescan_model_configs()
         | 
    	
        audioldm/clap/open_clip/feature_fusion.py
    ADDED
    
    | @@ -0,0 +1,192 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            """
         | 
| 2 | 
            +
            Feature Fusion for Varible-Length Data Processing
         | 
| 3 | 
            +
            AFF/iAFF is referred and modified from https://github.com/YimianDai/open-aff/blob/master/aff_pytorch/aff_net/fusion.py
         | 
| 4 | 
            +
            According to the paper: Yimian Dai et al, Attentional Feature Fusion, IEEE Winter Conference on Applications of Computer Vision, WACV 2021
         | 
| 5 | 
            +
            """
         | 
| 6 | 
            +
             | 
| 7 | 
            +
            import torch
         | 
| 8 | 
            +
            import torch.nn as nn
         | 
| 9 | 
            +
             | 
| 10 | 
            +
             | 
| 11 | 
            +
            class DAF(nn.Module):
         | 
| 12 | 
            +
                """
         | 
| 13 | 
            +
                直接相加 DirectAddFuse
         | 
| 14 | 
            +
                """
         | 
| 15 | 
            +
             | 
| 16 | 
            +
                def __init__(self):
         | 
| 17 | 
            +
                    super(DAF, self).__init__()
         | 
| 18 | 
            +
             | 
| 19 | 
            +
                def forward(self, x, residual):
         | 
| 20 | 
            +
                    return x + residual
         | 
| 21 | 
            +
             | 
| 22 | 
            +
             | 
| 23 | 
            +
            class iAFF(nn.Module):
         | 
| 24 | 
            +
                """
         | 
| 25 | 
            +
                多特征融合 iAFF
         | 
| 26 | 
            +
                """
         | 
| 27 | 
            +
             | 
| 28 | 
            +
                def __init__(self, channels=64, r=4, type="2D"):
         | 
| 29 | 
            +
                    super(iAFF, self).__init__()
         | 
| 30 | 
            +
                    inter_channels = int(channels // r)
         | 
| 31 | 
            +
             | 
| 32 | 
            +
                    if type == "1D":
         | 
| 33 | 
            +
                        # 本地注意力
         | 
| 34 | 
            +
                        self.local_att = nn.Sequential(
         | 
| 35 | 
            +
                            nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
         | 
| 36 | 
            +
                            nn.BatchNorm1d(inter_channels),
         | 
| 37 | 
            +
                            nn.ReLU(inplace=True),
         | 
| 38 | 
            +
                            nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
         | 
| 39 | 
            +
                            nn.BatchNorm1d(channels),
         | 
| 40 | 
            +
                        )
         | 
| 41 | 
            +
             | 
| 42 | 
            +
                        # 全局注意力
         | 
| 43 | 
            +
                        self.global_att = nn.Sequential(
         | 
| 44 | 
            +
                            nn.AdaptiveAvgPool1d(1),
         | 
| 45 | 
            +
                            nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
         | 
| 46 | 
            +
                            nn.BatchNorm1d(inter_channels),
         | 
| 47 | 
            +
                            nn.ReLU(inplace=True),
         | 
| 48 | 
            +
                            nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
         | 
| 49 | 
            +
                            nn.BatchNorm1d(channels),
         | 
| 50 | 
            +
                        )
         | 
| 51 | 
            +
             | 
| 52 | 
            +
                        # 第二次本地注意力
         | 
| 53 | 
            +
                        self.local_att2 = nn.Sequential(
         | 
| 54 | 
            +
                            nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
         | 
| 55 | 
            +
                            nn.BatchNorm1d(inter_channels),
         | 
| 56 | 
            +
                            nn.ReLU(inplace=True),
         | 
| 57 | 
            +
                            nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
         | 
| 58 | 
            +
                            nn.BatchNorm1d(channels),
         | 
| 59 | 
            +
                        )
         | 
| 60 | 
            +
                        # 第二次全局注意力
         | 
| 61 | 
            +
                        self.global_att2 = nn.Sequential(
         | 
| 62 | 
            +
                            nn.AdaptiveAvgPool1d(1),
         | 
| 63 | 
            +
                            nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
         | 
| 64 | 
            +
                            nn.BatchNorm1d(inter_channels),
         | 
| 65 | 
            +
                            nn.ReLU(inplace=True),
         | 
| 66 | 
            +
                            nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
         | 
| 67 | 
            +
                            nn.BatchNorm1d(channels),
         | 
| 68 | 
            +
                        )
         | 
| 69 | 
            +
                    elif type == "2D":
         | 
| 70 | 
            +
                        # 本地注意力
         | 
| 71 | 
            +
                        self.local_att = nn.Sequential(
         | 
| 72 | 
            +
                            nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
         | 
| 73 | 
            +
                            nn.BatchNorm2d(inter_channels),
         | 
| 74 | 
            +
                            nn.ReLU(inplace=True),
         | 
| 75 | 
            +
                            nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
         | 
| 76 | 
            +
                            nn.BatchNorm2d(channels),
         | 
| 77 | 
            +
                        )
         | 
| 78 | 
            +
             | 
| 79 | 
            +
                        # 全局注意力
         | 
| 80 | 
            +
                        self.global_att = nn.Sequential(
         | 
| 81 | 
            +
                            nn.AdaptiveAvgPool2d(1),
         | 
| 82 | 
            +
                            nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
         | 
| 83 | 
            +
                            nn.BatchNorm2d(inter_channels),
         | 
| 84 | 
            +
                            nn.ReLU(inplace=True),
         | 
| 85 | 
            +
                            nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
         | 
| 86 | 
            +
                            nn.BatchNorm2d(channels),
         | 
| 87 | 
            +
                        )
         | 
| 88 | 
            +
             | 
| 89 | 
            +
                        # 第二次本地注意力
         | 
| 90 | 
            +
                        self.local_att2 = nn.Sequential(
         | 
| 91 | 
            +
                            nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
         | 
| 92 | 
            +
                            nn.BatchNorm2d(inter_channels),
         | 
| 93 | 
            +
                            nn.ReLU(inplace=True),
         | 
| 94 | 
            +
                            nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
         | 
| 95 | 
            +
                            nn.BatchNorm2d(channels),
         | 
| 96 | 
            +
                        )
         | 
| 97 | 
            +
                        # 第二次全局注意力
         | 
| 98 | 
            +
                        self.global_att2 = nn.Sequential(
         | 
| 99 | 
            +
                            nn.AdaptiveAvgPool2d(1),
         | 
| 100 | 
            +
                            nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
         | 
| 101 | 
            +
                            nn.BatchNorm2d(inter_channels),
         | 
| 102 | 
            +
                            nn.ReLU(inplace=True),
         | 
| 103 | 
            +
                            nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
         | 
| 104 | 
            +
                            nn.BatchNorm2d(channels),
         | 
| 105 | 
            +
                        )
         | 
| 106 | 
            +
                    else:
         | 
| 107 | 
            +
                        raise f"the type is not supported"
         | 
| 108 | 
            +
             | 
| 109 | 
            +
                    self.sigmoid = nn.Sigmoid()
         | 
| 110 | 
            +
             | 
| 111 | 
            +
                def forward(self, x, residual):
         | 
| 112 | 
            +
                    flag = False
         | 
| 113 | 
            +
                    xa = x + residual
         | 
| 114 | 
            +
                    if xa.size(0) == 1:
         | 
| 115 | 
            +
                        xa = torch.cat([xa, xa], dim=0)
         | 
| 116 | 
            +
                        flag = True
         | 
| 117 | 
            +
                    xl = self.local_att(xa)
         | 
| 118 | 
            +
                    xg = self.global_att(xa)
         | 
| 119 | 
            +
                    xlg = xl + xg
         | 
| 120 | 
            +
                    wei = self.sigmoid(xlg)
         | 
| 121 | 
            +
                    xi = x * wei + residual * (1 - wei)
         | 
| 122 | 
            +
             | 
| 123 | 
            +
                    xl2 = self.local_att2(xi)
         | 
| 124 | 
            +
                    xg2 = self.global_att(xi)
         | 
| 125 | 
            +
                    xlg2 = xl2 + xg2
         | 
| 126 | 
            +
                    wei2 = self.sigmoid(xlg2)
         | 
| 127 | 
            +
                    xo = x * wei2 + residual * (1 - wei2)
         | 
| 128 | 
            +
                    if flag:
         | 
| 129 | 
            +
                        xo = xo[0].unsqueeze(0)
         | 
| 130 | 
            +
                    return xo
         | 
| 131 | 
            +
             | 
| 132 | 
            +
             | 
| 133 | 
            +
            class AFF(nn.Module):
         | 
| 134 | 
            +
                """
         | 
| 135 | 
            +
                多特征融合 AFF
         | 
| 136 | 
            +
                """
         | 
| 137 | 
            +
             | 
| 138 | 
            +
                def __init__(self, channels=64, r=4, type="2D"):
         | 
| 139 | 
            +
                    super(AFF, self).__init__()
         | 
| 140 | 
            +
                    inter_channels = int(channels // r)
         | 
| 141 | 
            +
             | 
| 142 | 
            +
                    if type == "1D":
         | 
| 143 | 
            +
                        self.local_att = nn.Sequential(
         | 
| 144 | 
            +
                            nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
         | 
| 145 | 
            +
                            nn.BatchNorm1d(inter_channels),
         | 
| 146 | 
            +
                            nn.ReLU(inplace=True),
         | 
| 147 | 
            +
                            nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
         | 
| 148 | 
            +
                            nn.BatchNorm1d(channels),
         | 
| 149 | 
            +
                        )
         | 
| 150 | 
            +
                        self.global_att = nn.Sequential(
         | 
| 151 | 
            +
                            nn.AdaptiveAvgPool1d(1),
         | 
| 152 | 
            +
                            nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
         | 
| 153 | 
            +
                            nn.BatchNorm1d(inter_channels),
         | 
| 154 | 
            +
                            nn.ReLU(inplace=True),
         | 
| 155 | 
            +
                            nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
         | 
| 156 | 
            +
                            nn.BatchNorm1d(channels),
         | 
| 157 | 
            +
                        )
         | 
| 158 | 
            +
                    elif type == "2D":
         | 
| 159 | 
            +
                        self.local_att = nn.Sequential(
         | 
| 160 | 
            +
                            nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
         | 
| 161 | 
            +
                            nn.BatchNorm2d(inter_channels),
         | 
| 162 | 
            +
                            nn.ReLU(inplace=True),
         | 
| 163 | 
            +
                            nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
         | 
| 164 | 
            +
                            nn.BatchNorm2d(channels),
         | 
| 165 | 
            +
                        )
         | 
| 166 | 
            +
                        self.global_att = nn.Sequential(
         | 
| 167 | 
            +
                            nn.AdaptiveAvgPool2d(1),
         | 
| 168 | 
            +
                            nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
         | 
| 169 | 
            +
                            nn.BatchNorm2d(inter_channels),
         | 
| 170 | 
            +
                            nn.ReLU(inplace=True),
         | 
| 171 | 
            +
                            nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
         | 
| 172 | 
            +
                            nn.BatchNorm2d(channels),
         | 
| 173 | 
            +
                        )
         | 
| 174 | 
            +
                    else:
         | 
| 175 | 
            +
                        raise f"the type is not supported."
         | 
| 176 | 
            +
             | 
| 177 | 
            +
                    self.sigmoid = nn.Sigmoid()
         | 
| 178 | 
            +
             | 
| 179 | 
            +
                def forward(self, x, residual):
         | 
| 180 | 
            +
                    flag = False
         | 
| 181 | 
            +
                    xa = x + residual
         | 
| 182 | 
            +
                    if xa.size(0) == 1:
         | 
| 183 | 
            +
                        xa = torch.cat([xa, xa], dim=0)
         | 
| 184 | 
            +
                        flag = True
         | 
| 185 | 
            +
                    xl = self.local_att(xa)
         | 
| 186 | 
            +
                    xg = self.global_att(xa)
         | 
| 187 | 
            +
                    xlg = xl + xg
         | 
| 188 | 
            +
                    wei = self.sigmoid(xlg)
         | 
| 189 | 
            +
                    xo = 2 * x * wei + 2 * residual * (1 - wei)
         | 
| 190 | 
            +
                    if flag:
         | 
| 191 | 
            +
                        xo = xo[0].unsqueeze(0)
         | 
| 192 | 
            +
                    return xo
         | 
    	
        audioldm/clap/open_clip/htsat.py
    ADDED
    
    | @@ -0,0 +1,1308 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            # Ke Chen
         | 
| 2 | 
            +
            # knutchen@ucsd.edu
         | 
| 3 | 
            +
            # HTS-AT: A HIERARCHICAL TOKEN-SEMANTIC AUDIO TRANSFORMER FOR SOUND CLASSIFICATION AND DETECTION
         | 
| 4 | 
            +
            # Some layers designed on the model
         | 
| 5 | 
            +
            # below codes are based and referred from https://github.com/microsoft/Swin-Transformer
         | 
| 6 | 
            +
            # Swin Transformer for Computer Vision: https://arxiv.org/pdf/2103.14030.pdf
         | 
| 7 | 
            +
             | 
| 8 | 
            +
            import torch
         | 
| 9 | 
            +
            import torch.nn as nn
         | 
| 10 | 
            +
            import torch.nn.functional as F
         | 
| 11 | 
            +
            from itertools import repeat
         | 
| 12 | 
            +
            import collections.abc
         | 
| 13 | 
            +
            import math
         | 
| 14 | 
            +
            import warnings
         | 
| 15 | 
            +
             | 
| 16 | 
            +
            from torch.nn.init import _calculate_fan_in_and_fan_out
         | 
| 17 | 
            +
            import torch.utils.checkpoint as checkpoint
         | 
| 18 | 
            +
             | 
| 19 | 
            +
            import random
         | 
| 20 | 
            +
             | 
| 21 | 
            +
            from torchlibrosa.stft import Spectrogram, LogmelFilterBank
         | 
| 22 | 
            +
            from torchlibrosa.augmentation import SpecAugmentation
         | 
| 23 | 
            +
             | 
| 24 | 
            +
            from itertools import repeat
         | 
| 25 | 
            +
            from .utils import do_mixup, interpolate
         | 
| 26 | 
            +
             | 
| 27 | 
            +
            from .feature_fusion import iAFF, AFF, DAF
         | 
| 28 | 
            +
             | 
| 29 | 
            +
            # from PyTorch internals
         | 
| 30 | 
            +
            def _ntuple(n):
         | 
| 31 | 
            +
                def parse(x):
         | 
| 32 | 
            +
                    if isinstance(x, collections.abc.Iterable):
         | 
| 33 | 
            +
                        return x
         | 
| 34 | 
            +
                    return tuple(repeat(x, n))
         | 
| 35 | 
            +
             | 
| 36 | 
            +
                return parse
         | 
| 37 | 
            +
             | 
| 38 | 
            +
             | 
| 39 | 
            +
            to_1tuple = _ntuple(1)
         | 
| 40 | 
            +
            to_2tuple = _ntuple(2)
         | 
| 41 | 
            +
            to_3tuple = _ntuple(3)
         | 
| 42 | 
            +
            to_4tuple = _ntuple(4)
         | 
| 43 | 
            +
            to_ntuple = _ntuple
         | 
| 44 | 
            +
             | 
| 45 | 
            +
             | 
| 46 | 
            +
            def drop_path(x, drop_prob: float = 0.0, training: bool = False):
         | 
| 47 | 
            +
                """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
         | 
| 48 | 
            +
                This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
         | 
| 49 | 
            +
                the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
         | 
| 50 | 
            +
                See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
         | 
| 51 | 
            +
                changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
         | 
| 52 | 
            +
                'survival rate' as the argument.
         | 
| 53 | 
            +
                """
         | 
| 54 | 
            +
                if drop_prob == 0.0 or not training:
         | 
| 55 | 
            +
                    return x
         | 
| 56 | 
            +
                keep_prob = 1 - drop_prob
         | 
| 57 | 
            +
                shape = (x.shape[0],) + (1,) * (
         | 
| 58 | 
            +
                    x.ndim - 1
         | 
| 59 | 
            +
                )  # work with diff dim tensors, not just 2D ConvNets
         | 
| 60 | 
            +
                random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
         | 
| 61 | 
            +
                random_tensor.floor_()  # binarize
         | 
| 62 | 
            +
                output = x.div(keep_prob) * random_tensor
         | 
| 63 | 
            +
                return output
         | 
| 64 | 
            +
             | 
| 65 | 
            +
             | 
| 66 | 
            +
            class DropPath(nn.Module):
         | 
| 67 | 
            +
                """Drop paths (Stochastic Depth) per sample  (when applied in main path of residual blocks)."""
         | 
| 68 | 
            +
             | 
| 69 | 
            +
                def __init__(self, drop_prob=None):
         | 
| 70 | 
            +
                    super(DropPath, self).__init__()
         | 
| 71 | 
            +
                    self.drop_prob = drop_prob
         | 
| 72 | 
            +
             | 
| 73 | 
            +
                def forward(self, x):
         | 
| 74 | 
            +
                    return drop_path(x, self.drop_prob, self.training)
         | 
| 75 | 
            +
             | 
| 76 | 
            +
             | 
| 77 | 
            +
            class PatchEmbed(nn.Module):
         | 
| 78 | 
            +
                """2D Image to Patch Embedding"""
         | 
| 79 | 
            +
             | 
| 80 | 
            +
                def __init__(
         | 
| 81 | 
            +
                    self,
         | 
| 82 | 
            +
                    img_size=224,
         | 
| 83 | 
            +
                    patch_size=16,
         | 
| 84 | 
            +
                    in_chans=3,
         | 
| 85 | 
            +
                    embed_dim=768,
         | 
| 86 | 
            +
                    norm_layer=None,
         | 
| 87 | 
            +
                    flatten=True,
         | 
| 88 | 
            +
                    patch_stride=16,
         | 
| 89 | 
            +
                    enable_fusion=False,
         | 
| 90 | 
            +
                    fusion_type="None",
         | 
| 91 | 
            +
                ):
         | 
| 92 | 
            +
                    super().__init__()
         | 
| 93 | 
            +
                    img_size = to_2tuple(img_size)
         | 
| 94 | 
            +
                    patch_size = to_2tuple(patch_size)
         | 
| 95 | 
            +
                    patch_stride = to_2tuple(patch_stride)
         | 
| 96 | 
            +
                    self.img_size = img_size
         | 
| 97 | 
            +
                    self.patch_size = patch_size
         | 
| 98 | 
            +
                    self.patch_stride = patch_stride
         | 
| 99 | 
            +
                    self.grid_size = (
         | 
| 100 | 
            +
                        img_size[0] // patch_stride[0],
         | 
| 101 | 
            +
                        img_size[1] // patch_stride[1],
         | 
| 102 | 
            +
                    )
         | 
| 103 | 
            +
                    self.num_patches = self.grid_size[0] * self.grid_size[1]
         | 
| 104 | 
            +
                    self.flatten = flatten
         | 
| 105 | 
            +
                    self.in_chans = in_chans
         | 
| 106 | 
            +
                    self.embed_dim = embed_dim
         | 
| 107 | 
            +
             | 
| 108 | 
            +
                    self.enable_fusion = enable_fusion
         | 
| 109 | 
            +
                    self.fusion_type = fusion_type
         | 
| 110 | 
            +
             | 
| 111 | 
            +
                    padding = (
         | 
| 112 | 
            +
                        (patch_size[0] - patch_stride[0]) // 2,
         | 
| 113 | 
            +
                        (patch_size[1] - patch_stride[1]) // 2,
         | 
| 114 | 
            +
                    )
         | 
| 115 | 
            +
             | 
| 116 | 
            +
                    if (self.enable_fusion) and (self.fusion_type == "channel_map"):
         | 
| 117 | 
            +
                        self.proj = nn.Conv2d(
         | 
| 118 | 
            +
                            in_chans * 4,
         | 
| 119 | 
            +
                            embed_dim,
         | 
| 120 | 
            +
                            kernel_size=patch_size,
         | 
| 121 | 
            +
                            stride=patch_stride,
         | 
| 122 | 
            +
                            padding=padding,
         | 
| 123 | 
            +
                        )
         | 
| 124 | 
            +
                    else:
         | 
| 125 | 
            +
                        self.proj = nn.Conv2d(
         | 
| 126 | 
            +
                            in_chans,
         | 
| 127 | 
            +
                            embed_dim,
         | 
| 128 | 
            +
                            kernel_size=patch_size,
         | 
| 129 | 
            +
                            stride=patch_stride,
         | 
| 130 | 
            +
                            padding=padding,
         | 
| 131 | 
            +
                        )
         | 
| 132 | 
            +
                    self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
         | 
| 133 | 
            +
             | 
| 134 | 
            +
                    if (self.enable_fusion) and (
         | 
| 135 | 
            +
                        self.fusion_type in ["daf_2d", "aff_2d", "iaff_2d"]
         | 
| 136 | 
            +
                    ):
         | 
| 137 | 
            +
                        self.mel_conv2d = nn.Conv2d(
         | 
| 138 | 
            +
                            in_chans,
         | 
| 139 | 
            +
                            embed_dim,
         | 
| 140 | 
            +
                            kernel_size=(patch_size[0], patch_size[1] * 3),
         | 
| 141 | 
            +
                            stride=(patch_stride[0], patch_stride[1] * 3),
         | 
| 142 | 
            +
                            padding=padding,
         | 
| 143 | 
            +
                        )
         | 
| 144 | 
            +
                        if self.fusion_type == "daf_2d":
         | 
| 145 | 
            +
                            self.fusion_model = DAF()
         | 
| 146 | 
            +
                        elif self.fusion_type == "aff_2d":
         | 
| 147 | 
            +
                            self.fusion_model = AFF(channels=embed_dim, type="2D")
         | 
| 148 | 
            +
                        elif self.fusion_type == "iaff_2d":
         | 
| 149 | 
            +
                            self.fusion_model = iAFF(channels=embed_dim, type="2D")
         | 
| 150 | 
            +
             | 
| 151 | 
            +
                def forward(self, x, longer_idx=None):
         | 
| 152 | 
            +
                    if (self.enable_fusion) and (
         | 
| 153 | 
            +
                        self.fusion_type in ["daf_2d", "aff_2d", "iaff_2d"]
         | 
| 154 | 
            +
                    ):
         | 
| 155 | 
            +
                        global_x = x[:, 0:1, :, :]
         | 
| 156 | 
            +
             | 
| 157 | 
            +
                        # global processing
         | 
| 158 | 
            +
                        B, C, H, W = global_x.shape
         | 
| 159 | 
            +
                        assert (
         | 
| 160 | 
            +
                            H == self.img_size[0] and W == self.img_size[1]
         | 
| 161 | 
            +
                        ), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
         | 
| 162 | 
            +
                        global_x = self.proj(global_x)
         | 
| 163 | 
            +
                        TW = global_x.size(-1)
         | 
| 164 | 
            +
                        if len(longer_idx) > 0:
         | 
| 165 | 
            +
                            # local processing
         | 
| 166 | 
            +
                            local_x = x[longer_idx, 1:, :, :].contiguous()
         | 
| 167 | 
            +
                            B, C, H, W = local_x.shape
         | 
| 168 | 
            +
                            local_x = local_x.view(B * C, 1, H, W)
         | 
| 169 | 
            +
                            local_x = self.mel_conv2d(local_x)
         | 
| 170 | 
            +
                            local_x = local_x.view(
         | 
| 171 | 
            +
                                B, C, local_x.size(1), local_x.size(2), local_x.size(3)
         | 
| 172 | 
            +
                            )
         | 
| 173 | 
            +
                            local_x = local_x.permute((0, 2, 3, 1, 4)).contiguous().flatten(3)
         | 
| 174 | 
            +
                            TB, TC, TH, _ = local_x.size()
         | 
| 175 | 
            +
                            if local_x.size(-1) < TW:
         | 
| 176 | 
            +
                                local_x = torch.cat(
         | 
| 177 | 
            +
                                    [
         | 
| 178 | 
            +
                                        local_x,
         | 
| 179 | 
            +
                                        torch.zeros(
         | 
| 180 | 
            +
                                            (TB, TC, TH, TW - local_x.size(-1)),
         | 
| 181 | 
            +
                                            device=global_x.device,
         | 
| 182 | 
            +
                                        ),
         | 
| 183 | 
            +
                                    ],
         | 
| 184 | 
            +
                                    dim=-1,
         | 
| 185 | 
            +
                                )
         | 
| 186 | 
            +
                            else:
         | 
| 187 | 
            +
                                local_x = local_x[:, :, :, :TW]
         | 
| 188 | 
            +
             | 
| 189 | 
            +
                            global_x[longer_idx] = self.fusion_model(global_x[longer_idx], local_x)
         | 
| 190 | 
            +
                        x = global_x
         | 
| 191 | 
            +
                    else:
         | 
| 192 | 
            +
                        B, C, H, W = x.shape
         | 
| 193 | 
            +
                        assert (
         | 
| 194 | 
            +
                            H == self.img_size[0] and W == self.img_size[1]
         | 
| 195 | 
            +
                        ), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
         | 
| 196 | 
            +
                        x = self.proj(x)
         | 
| 197 | 
            +
             | 
| 198 | 
            +
                    if self.flatten:
         | 
| 199 | 
            +
                        x = x.flatten(2).transpose(1, 2)  # BCHW -> BNC
         | 
| 200 | 
            +
                    x = self.norm(x)
         | 
| 201 | 
            +
                    return x
         | 
| 202 | 
            +
             | 
| 203 | 
            +
             | 
| 204 | 
            +
            class Mlp(nn.Module):
         | 
| 205 | 
            +
                """MLP as used in Vision Transformer, MLP-Mixer and related networks"""
         | 
| 206 | 
            +
             | 
| 207 | 
            +
                def __init__(
         | 
| 208 | 
            +
                    self,
         | 
| 209 | 
            +
                    in_features,
         | 
| 210 | 
            +
                    hidden_features=None,
         | 
| 211 | 
            +
                    out_features=None,
         | 
| 212 | 
            +
                    act_layer=nn.GELU,
         | 
| 213 | 
            +
                    drop=0.0,
         | 
| 214 | 
            +
                ):
         | 
| 215 | 
            +
                    super().__init__()
         | 
| 216 | 
            +
                    out_features = out_features or in_features
         | 
| 217 | 
            +
                    hidden_features = hidden_features or in_features
         | 
| 218 | 
            +
                    self.fc1 = nn.Linear(in_features, hidden_features)
         | 
| 219 | 
            +
                    self.act = act_layer()
         | 
| 220 | 
            +
                    self.fc2 = nn.Linear(hidden_features, out_features)
         | 
| 221 | 
            +
                    self.drop = nn.Dropout(drop)
         | 
| 222 | 
            +
             | 
| 223 | 
            +
                def forward(self, x):
         | 
| 224 | 
            +
                    x = self.fc1(x)
         | 
| 225 | 
            +
                    x = self.act(x)
         | 
| 226 | 
            +
                    x = self.drop(x)
         | 
| 227 | 
            +
                    x = self.fc2(x)
         | 
| 228 | 
            +
                    x = self.drop(x)
         | 
| 229 | 
            +
                    return x
         | 
| 230 | 
            +
             | 
| 231 | 
            +
             | 
| 232 | 
            +
            def _no_grad_trunc_normal_(tensor, mean, std, a, b):
         | 
| 233 | 
            +
                # Cut & paste from PyTorch official master until it's in a few official releases - RW
         | 
| 234 | 
            +
                # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
         | 
| 235 | 
            +
                def norm_cdf(x):
         | 
| 236 | 
            +
                    # Computes standard normal cumulative distribution function
         | 
| 237 | 
            +
                    return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
         | 
| 238 | 
            +
             | 
| 239 | 
            +
                if (mean < a - 2 * std) or (mean > b + 2 * std):
         | 
| 240 | 
            +
                    warnings.warn(
         | 
| 241 | 
            +
                        "mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
         | 
| 242 | 
            +
                        "The distribution of values may be incorrect.",
         | 
| 243 | 
            +
                        stacklevel=2,
         | 
| 244 | 
            +
                    )
         | 
| 245 | 
            +
             | 
| 246 | 
            +
                with torch.no_grad():
         | 
| 247 | 
            +
                    # Values are generated by using a truncated uniform distribution and
         | 
| 248 | 
            +
                    # then using the inverse CDF for the normal distribution.
         | 
| 249 | 
            +
                    # Get upper and lower cdf values
         | 
| 250 | 
            +
                    l = norm_cdf((a - mean) / std)
         | 
| 251 | 
            +
                    u = norm_cdf((b - mean) / std)
         | 
| 252 | 
            +
             | 
| 253 | 
            +
                    # Uniformly fill tensor with values from [l, u], then translate to
         | 
| 254 | 
            +
                    # [2l-1, 2u-1].
         | 
| 255 | 
            +
                    tensor.uniform_(2 * l - 1, 2 * u - 1)
         | 
| 256 | 
            +
             | 
| 257 | 
            +
                    # Use inverse cdf transform for normal distribution to get truncated
         | 
| 258 | 
            +
                    # standard normal
         | 
| 259 | 
            +
                    tensor.erfinv_()
         | 
| 260 | 
            +
             | 
| 261 | 
            +
                    # Transform to proper mean, std
         | 
| 262 | 
            +
                    tensor.mul_(std * math.sqrt(2.0))
         | 
| 263 | 
            +
                    tensor.add_(mean)
         | 
| 264 | 
            +
             | 
| 265 | 
            +
                    # Clamp to ensure it's in the proper range
         | 
| 266 | 
            +
                    tensor.clamp_(min=a, max=b)
         | 
| 267 | 
            +
                    return tensor
         | 
| 268 | 
            +
             | 
| 269 | 
            +
             | 
| 270 | 
            +
            def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
         | 
| 271 | 
            +
                # type: (Tensor, float, float, float, float) -> Tensor
         | 
| 272 | 
            +
                r"""Fills the input Tensor with values drawn from a truncated
         | 
| 273 | 
            +
                normal distribution. The values are effectively drawn from the
         | 
| 274 | 
            +
                normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
         | 
| 275 | 
            +
                with values outside :math:`[a, b]` redrawn until they are within
         | 
| 276 | 
            +
                the bounds. The method used for generating the random values works
         | 
| 277 | 
            +
                best when :math:`a \leq \text{mean} \leq b`.
         | 
| 278 | 
            +
                Args:
         | 
| 279 | 
            +
                    tensor: an n-dimensional `torch.Tensor`
         | 
| 280 | 
            +
                    mean: the mean of the normal distribution
         | 
| 281 | 
            +
                    std: the standard deviation of the normal distribution
         | 
| 282 | 
            +
                    a: the minimum cutoff value
         | 
| 283 | 
            +
                    b: the maximum cutoff value
         | 
| 284 | 
            +
                Examples:
         | 
| 285 | 
            +
                    >>> w = torch.empty(3, 5)
         | 
| 286 | 
            +
                    >>> nn.init.trunc_normal_(w)
         | 
| 287 | 
            +
                """
         | 
| 288 | 
            +
                return _no_grad_trunc_normal_(tensor, mean, std, a, b)
         | 
| 289 | 
            +
             | 
| 290 | 
            +
             | 
| 291 | 
            +
            def variance_scaling_(tensor, scale=1.0, mode="fan_in", distribution="normal"):
         | 
| 292 | 
            +
                fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
         | 
| 293 | 
            +
                if mode == "fan_in":
         | 
| 294 | 
            +
                    denom = fan_in
         | 
| 295 | 
            +
                elif mode == "fan_out":
         | 
| 296 | 
            +
                    denom = fan_out
         | 
| 297 | 
            +
                elif mode == "fan_avg":
         | 
| 298 | 
            +
                    denom = (fan_in + fan_out) / 2
         | 
| 299 | 
            +
             | 
| 300 | 
            +
                variance = scale / denom
         | 
| 301 | 
            +
             | 
| 302 | 
            +
                if distribution == "truncated_normal":
         | 
| 303 | 
            +
                    # constant is stddev of standard normal truncated to (-2, 2)
         | 
| 304 | 
            +
                    trunc_normal_(tensor, std=math.sqrt(variance) / 0.87962566103423978)
         | 
| 305 | 
            +
                elif distribution == "normal":
         | 
| 306 | 
            +
                    tensor.normal_(std=math.sqrt(variance))
         | 
| 307 | 
            +
                elif distribution == "uniform":
         | 
| 308 | 
            +
                    bound = math.sqrt(3 * variance)
         | 
| 309 | 
            +
                    tensor.uniform_(-bound, bound)
         | 
| 310 | 
            +
                else:
         | 
| 311 | 
            +
                    raise ValueError(f"invalid distribution {distribution}")
         | 
| 312 | 
            +
             | 
| 313 | 
            +
             | 
| 314 | 
            +
            def lecun_normal_(tensor):
         | 
| 315 | 
            +
                variance_scaling_(tensor, mode="fan_in", distribution="truncated_normal")
         | 
| 316 | 
            +
             | 
| 317 | 
            +
             | 
| 318 | 
            +
            def window_partition(x, window_size):
         | 
| 319 | 
            +
                """
         | 
| 320 | 
            +
                Args:
         | 
| 321 | 
            +
                    x: (B, H, W, C)
         | 
| 322 | 
            +
                    window_size (int): window size
         | 
| 323 | 
            +
                Returns:
         | 
| 324 | 
            +
                    windows: (num_windows*B, window_size, window_size, C)
         | 
| 325 | 
            +
                """
         | 
| 326 | 
            +
                B, H, W, C = x.shape
         | 
| 327 | 
            +
                x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
         | 
| 328 | 
            +
                windows = (
         | 
| 329 | 
            +
                    x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
         | 
| 330 | 
            +
                )
         | 
| 331 | 
            +
                return windows
         | 
| 332 | 
            +
             | 
| 333 | 
            +
             | 
| 334 | 
            +
            def window_reverse(windows, window_size, H, W):
         | 
| 335 | 
            +
                """
         | 
| 336 | 
            +
                Args:
         | 
| 337 | 
            +
                    windows: (num_windows*B, window_size, window_size, C)
         | 
| 338 | 
            +
                    window_size (int): Window size
         | 
| 339 | 
            +
                    H (int): Height of image
         | 
| 340 | 
            +
                    W (int): Width of image
         | 
| 341 | 
            +
                Returns:
         | 
| 342 | 
            +
                    x: (B, H, W, C)
         | 
| 343 | 
            +
                """
         | 
| 344 | 
            +
                B = int(windows.shape[0] / (H * W / window_size / window_size))
         | 
| 345 | 
            +
                x = windows.view(
         | 
| 346 | 
            +
                    B, H // window_size, W // window_size, window_size, window_size, -1
         | 
| 347 | 
            +
                )
         | 
| 348 | 
            +
                x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
         | 
| 349 | 
            +
                return x
         | 
| 350 | 
            +
             | 
| 351 | 
            +
             | 
| 352 | 
            +
            class WindowAttention(nn.Module):
         | 
| 353 | 
            +
                r"""Window based multi-head self attention (W-MSA) module with relative position bias.
         | 
| 354 | 
            +
                It supports both of shifted and non-shifted window.
         | 
| 355 | 
            +
                Args:
         | 
| 356 | 
            +
                    dim (int): Number of input channels.
         | 
| 357 | 
            +
                    window_size (tuple[int]): The height and width of the window.
         | 
| 358 | 
            +
                    num_heads (int): Number of attention heads.
         | 
| 359 | 
            +
                    qkv_bias (bool, optional):  If True, add a learnable bias to query, key, value. Default: True
         | 
| 360 | 
            +
                    qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
         | 
| 361 | 
            +
                    attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
         | 
| 362 | 
            +
                    proj_drop (float, optional): Dropout ratio of output. Default: 0.0
         | 
| 363 | 
            +
                """
         | 
| 364 | 
            +
             | 
| 365 | 
            +
                def __init__(
         | 
| 366 | 
            +
                    self,
         | 
| 367 | 
            +
                    dim,
         | 
| 368 | 
            +
                    window_size,
         | 
| 369 | 
            +
                    num_heads,
         | 
| 370 | 
            +
                    qkv_bias=True,
         | 
| 371 | 
            +
                    qk_scale=None,
         | 
| 372 | 
            +
                    attn_drop=0.0,
         | 
| 373 | 
            +
                    proj_drop=0.0,
         | 
| 374 | 
            +
                ):
         | 
| 375 | 
            +
             | 
| 376 | 
            +
                    super().__init__()
         | 
| 377 | 
            +
                    self.dim = dim
         | 
| 378 | 
            +
                    self.window_size = window_size  # Wh, Ww
         | 
| 379 | 
            +
                    self.num_heads = num_heads
         | 
| 380 | 
            +
                    head_dim = dim // num_heads
         | 
| 381 | 
            +
                    self.scale = qk_scale or head_dim**-0.5
         | 
| 382 | 
            +
             | 
| 383 | 
            +
                    # define a parameter table of relative position bias
         | 
| 384 | 
            +
                    self.relative_position_bias_table = nn.Parameter(
         | 
| 385 | 
            +
                        torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)
         | 
| 386 | 
            +
                    )  # 2*Wh-1 * 2*Ww-1, nH
         | 
| 387 | 
            +
             | 
| 388 | 
            +
                    # get pair-wise relative position index for each token inside the window
         | 
| 389 | 
            +
                    coords_h = torch.arange(self.window_size[0])
         | 
| 390 | 
            +
                    coords_w = torch.arange(self.window_size[1])
         | 
| 391 | 
            +
                    coords = torch.stack(torch.meshgrid([coords_h, coords_w]))  # 2, Wh, Ww
         | 
| 392 | 
            +
                    coords_flatten = torch.flatten(coords, 1)  # 2, Wh*Ww
         | 
| 393 | 
            +
                    relative_coords = (
         | 
| 394 | 
            +
                        coords_flatten[:, :, None] - coords_flatten[:, None, :]
         | 
| 395 | 
            +
                    )  # 2, Wh*Ww, Wh*Ww
         | 
| 396 | 
            +
                    relative_coords = relative_coords.permute(
         | 
| 397 | 
            +
                        1, 2, 0
         | 
| 398 | 
            +
                    ).contiguous()  # Wh*Ww, Wh*Ww, 2
         | 
| 399 | 
            +
                    relative_coords[:, :, 0] += self.window_size[0] - 1  # shift to start from 0
         | 
| 400 | 
            +
                    relative_coords[:, :, 1] += self.window_size[1] - 1
         | 
| 401 | 
            +
                    relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
         | 
| 402 | 
            +
                    relative_position_index = relative_coords.sum(-1)  # Wh*Ww, Wh*Ww
         | 
| 403 | 
            +
                    self.register_buffer("relative_position_index", relative_position_index)
         | 
| 404 | 
            +
             | 
| 405 | 
            +
                    self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
         | 
| 406 | 
            +
                    self.attn_drop = nn.Dropout(attn_drop)
         | 
| 407 | 
            +
                    self.proj = nn.Linear(dim, dim)
         | 
| 408 | 
            +
                    self.proj_drop = nn.Dropout(proj_drop)
         | 
| 409 | 
            +
             | 
| 410 | 
            +
                    trunc_normal_(self.relative_position_bias_table, std=0.02)
         | 
| 411 | 
            +
                    self.softmax = nn.Softmax(dim=-1)
         | 
| 412 | 
            +
             | 
| 413 | 
            +
                def forward(self, x, mask=None):
         | 
| 414 | 
            +
                    """
         | 
| 415 | 
            +
                    Args:
         | 
| 416 | 
            +
                        x: input features with shape of (num_windows*B, N, C)
         | 
| 417 | 
            +
                        mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
         | 
| 418 | 
            +
                    """
         | 
| 419 | 
            +
                    B_, N, C = x.shape
         | 
| 420 | 
            +
                    qkv = (
         | 
| 421 | 
            +
                        self.qkv(x)
         | 
| 422 | 
            +
                        .reshape(B_, N, 3, self.num_heads, C // self.num_heads)
         | 
| 423 | 
            +
                        .permute(2, 0, 3, 1, 4)
         | 
| 424 | 
            +
                    )
         | 
| 425 | 
            +
                    q, k, v = (
         | 
| 426 | 
            +
                        qkv[0],
         | 
| 427 | 
            +
                        qkv[1],
         | 
| 428 | 
            +
                        qkv[2],
         | 
| 429 | 
            +
                    )  # make torchscript happy (cannot use tensor as tuple)
         | 
| 430 | 
            +
             | 
| 431 | 
            +
                    q = q * self.scale
         | 
| 432 | 
            +
                    attn = q @ k.transpose(-2, -1)
         | 
| 433 | 
            +
             | 
| 434 | 
            +
                    relative_position_bias = self.relative_position_bias_table[
         | 
| 435 | 
            +
                        self.relative_position_index.view(-1)
         | 
| 436 | 
            +
                    ].view(
         | 
| 437 | 
            +
                        self.window_size[0] * self.window_size[1],
         | 
| 438 | 
            +
                        self.window_size[0] * self.window_size[1],
         | 
| 439 | 
            +
                        -1,
         | 
| 440 | 
            +
                    )  # Wh*Ww,Wh*Ww,nH
         | 
| 441 | 
            +
                    relative_position_bias = relative_position_bias.permute(
         | 
| 442 | 
            +
                        2, 0, 1
         | 
| 443 | 
            +
                    ).contiguous()  # nH, Wh*Ww, Wh*Ww
         | 
| 444 | 
            +
                    attn = attn + relative_position_bias.unsqueeze(0)
         | 
| 445 | 
            +
             | 
| 446 | 
            +
                    if mask is not None:
         | 
| 447 | 
            +
                        nW = mask.shape[0]
         | 
| 448 | 
            +
                        attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(
         | 
| 449 | 
            +
                            1
         | 
| 450 | 
            +
                        ).unsqueeze(0)
         | 
| 451 | 
            +
                        attn = attn.view(-1, self.num_heads, N, N)
         | 
| 452 | 
            +
                        attn = self.softmax(attn)
         | 
| 453 | 
            +
                    else:
         | 
| 454 | 
            +
                        attn = self.softmax(attn)
         | 
| 455 | 
            +
             | 
| 456 | 
            +
                    attn = self.attn_drop(attn)
         | 
| 457 | 
            +
             | 
| 458 | 
            +
                    x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
         | 
| 459 | 
            +
                    x = self.proj(x)
         | 
| 460 | 
            +
                    x = self.proj_drop(x)
         | 
| 461 | 
            +
                    return x, attn
         | 
| 462 | 
            +
             | 
| 463 | 
            +
                def extra_repr(self):
         | 
| 464 | 
            +
                    return f"dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}"
         | 
| 465 | 
            +
             | 
| 466 | 
            +
             | 
| 467 | 
            +
            # We use the model based on Swintransformer Block, therefore we can use the swin-transformer pretrained model
         | 
| 468 | 
            +
            class SwinTransformerBlock(nn.Module):
         | 
| 469 | 
            +
                r"""Swin Transformer Block.
         | 
| 470 | 
            +
                Args:
         | 
| 471 | 
            +
                    dim (int): Number of input channels.
         | 
| 472 | 
            +
                    input_resolution (tuple[int]): Input resulotion.
         | 
| 473 | 
            +
                    num_heads (int): Number of attention heads.
         | 
| 474 | 
            +
                    window_size (int): Window size.
         | 
| 475 | 
            +
                    shift_size (int): Shift size for SW-MSA.
         | 
| 476 | 
            +
                    mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
         | 
| 477 | 
            +
                    qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
         | 
| 478 | 
            +
                    qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
         | 
| 479 | 
            +
                    drop (float, optional): Dropout rate. Default: 0.0
         | 
| 480 | 
            +
                    attn_drop (float, optional): Attention dropout rate. Default: 0.0
         | 
| 481 | 
            +
                    drop_path (float, optional): Stochastic depth rate. Default: 0.0
         | 
| 482 | 
            +
                    act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
         | 
| 483 | 
            +
                    norm_layer (nn.Module, optional): Normalization layer.  Default: nn.LayerNorm
         | 
| 484 | 
            +
                """
         | 
| 485 | 
            +
             | 
| 486 | 
            +
                def __init__(
         | 
| 487 | 
            +
                    self,
         | 
| 488 | 
            +
                    dim,
         | 
| 489 | 
            +
                    input_resolution,
         | 
| 490 | 
            +
                    num_heads,
         | 
| 491 | 
            +
                    window_size=7,
         | 
| 492 | 
            +
                    shift_size=0,
         | 
| 493 | 
            +
                    mlp_ratio=4.0,
         | 
| 494 | 
            +
                    qkv_bias=True,
         | 
| 495 | 
            +
                    qk_scale=None,
         | 
| 496 | 
            +
                    drop=0.0,
         | 
| 497 | 
            +
                    attn_drop=0.0,
         | 
| 498 | 
            +
                    drop_path=0.0,
         | 
| 499 | 
            +
                    act_layer=nn.GELU,
         | 
| 500 | 
            +
                    norm_layer=nn.LayerNorm,
         | 
| 501 | 
            +
                    norm_before_mlp="ln",
         | 
| 502 | 
            +
                ):
         | 
| 503 | 
            +
                    super().__init__()
         | 
| 504 | 
            +
                    self.dim = dim
         | 
| 505 | 
            +
                    self.input_resolution = input_resolution
         | 
| 506 | 
            +
                    self.num_heads = num_heads
         | 
| 507 | 
            +
                    self.window_size = window_size
         | 
| 508 | 
            +
                    self.shift_size = shift_size
         | 
| 509 | 
            +
                    self.mlp_ratio = mlp_ratio
         | 
| 510 | 
            +
                    self.norm_before_mlp = norm_before_mlp
         | 
| 511 | 
            +
                    if min(self.input_resolution) <= self.window_size:
         | 
| 512 | 
            +
                        # if window size is larger than input resolution, we don't partition windows
         | 
| 513 | 
            +
                        self.shift_size = 0
         | 
| 514 | 
            +
                        self.window_size = min(self.input_resolution)
         | 
| 515 | 
            +
                    assert (
         | 
| 516 | 
            +
                        0 <= self.shift_size < self.window_size
         | 
| 517 | 
            +
                    ), "shift_size must in 0-window_size"
         | 
| 518 | 
            +
             | 
| 519 | 
            +
                    self.norm1 = norm_layer(dim)
         | 
| 520 | 
            +
                    self.attn = WindowAttention(
         | 
| 521 | 
            +
                        dim,
         | 
| 522 | 
            +
                        window_size=to_2tuple(self.window_size),
         | 
| 523 | 
            +
                        num_heads=num_heads,
         | 
| 524 | 
            +
                        qkv_bias=qkv_bias,
         | 
| 525 | 
            +
                        qk_scale=qk_scale,
         | 
| 526 | 
            +
                        attn_drop=attn_drop,
         | 
| 527 | 
            +
                        proj_drop=drop,
         | 
| 528 | 
            +
                    )
         | 
| 529 | 
            +
             | 
| 530 | 
            +
                    self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
         | 
| 531 | 
            +
                    if self.norm_before_mlp == "ln":
         | 
| 532 | 
            +
                        self.norm2 = nn.LayerNorm(dim)
         | 
| 533 | 
            +
                    elif self.norm_before_mlp == "bn":
         | 
| 534 | 
            +
                        self.norm2 = lambda x: nn.BatchNorm1d(dim)(x.transpose(1, 2)).transpose(
         | 
| 535 | 
            +
                            1, 2
         | 
| 536 | 
            +
                        )
         | 
| 537 | 
            +
                    else:
         | 
| 538 | 
            +
                        raise NotImplementedError
         | 
| 539 | 
            +
                    mlp_hidden_dim = int(dim * mlp_ratio)
         | 
| 540 | 
            +
                    self.mlp = Mlp(
         | 
| 541 | 
            +
                        in_features=dim,
         | 
| 542 | 
            +
                        hidden_features=mlp_hidden_dim,
         | 
| 543 | 
            +
                        act_layer=act_layer,
         | 
| 544 | 
            +
                        drop=drop,
         | 
| 545 | 
            +
                    )
         | 
| 546 | 
            +
             | 
| 547 | 
            +
                    if self.shift_size > 0:
         | 
| 548 | 
            +
                        # calculate attention mask for SW-MSA
         | 
| 549 | 
            +
                        H, W = self.input_resolution
         | 
| 550 | 
            +
                        img_mask = torch.zeros((1, H, W, 1))  # 1 H W 1
         | 
| 551 | 
            +
                        h_slices = (
         | 
| 552 | 
            +
                            slice(0, -self.window_size),
         | 
| 553 | 
            +
                            slice(-self.window_size, -self.shift_size),
         | 
| 554 | 
            +
                            slice(-self.shift_size, None),
         | 
| 555 | 
            +
                        )
         | 
| 556 | 
            +
                        w_slices = (
         | 
| 557 | 
            +
                            slice(0, -self.window_size),
         | 
| 558 | 
            +
                            slice(-self.window_size, -self.shift_size),
         | 
| 559 | 
            +
                            slice(-self.shift_size, None),
         | 
| 560 | 
            +
                        )
         | 
| 561 | 
            +
                        cnt = 0
         | 
| 562 | 
            +
                        for h in h_slices:
         | 
| 563 | 
            +
                            for w in w_slices:
         | 
| 564 | 
            +
                                img_mask[:, h, w, :] = cnt
         | 
| 565 | 
            +
                                cnt += 1
         | 
| 566 | 
            +
             | 
| 567 | 
            +
                        mask_windows = window_partition(
         | 
| 568 | 
            +
                            img_mask, self.window_size
         | 
| 569 | 
            +
                        )  # nW, window_size, window_size, 1
         | 
| 570 | 
            +
                        mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
         | 
| 571 | 
            +
                        attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
         | 
| 572 | 
            +
                        attn_mask = attn_mask.masked_fill(
         | 
| 573 | 
            +
                            attn_mask != 0, float(-100.0)
         | 
| 574 | 
            +
                        ).masked_fill(attn_mask == 0, float(0.0))
         | 
| 575 | 
            +
                    else:
         | 
| 576 | 
            +
                        attn_mask = None
         | 
| 577 | 
            +
             | 
| 578 | 
            +
                    self.register_buffer("attn_mask", attn_mask)
         | 
| 579 | 
            +
             | 
| 580 | 
            +
                def forward(self, x):
         | 
| 581 | 
            +
                    # pdb.set_trace()
         | 
| 582 | 
            +
                    H, W = self.input_resolution
         | 
| 583 | 
            +
                    # print("H: ", H)
         | 
| 584 | 
            +
                    # print("W: ", W)
         | 
| 585 | 
            +
                    # pdb.set_trace()
         | 
| 586 | 
            +
                    B, L, C = x.shape
         | 
| 587 | 
            +
                    # assert L == H * W, "input feature has wrong size"
         | 
| 588 | 
            +
             | 
| 589 | 
            +
                    shortcut = x
         | 
| 590 | 
            +
                    x = self.norm1(x)
         | 
| 591 | 
            +
                    x = x.view(B, H, W, C)
         | 
| 592 | 
            +
             | 
| 593 | 
            +
                    # cyclic shift
         | 
| 594 | 
            +
                    if self.shift_size > 0:
         | 
| 595 | 
            +
                        shifted_x = torch.roll(
         | 
| 596 | 
            +
                            x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)
         | 
| 597 | 
            +
                        )
         | 
| 598 | 
            +
                    else:
         | 
| 599 | 
            +
                        shifted_x = x
         | 
| 600 | 
            +
             | 
| 601 | 
            +
                    # partition windows
         | 
| 602 | 
            +
                    x_windows = window_partition(
         | 
| 603 | 
            +
                        shifted_x, self.window_size
         | 
| 604 | 
            +
                    )  # nW*B, window_size, window_size, C
         | 
| 605 | 
            +
                    x_windows = x_windows.view(
         | 
| 606 | 
            +
                        -1, self.window_size * self.window_size, C
         | 
| 607 | 
            +
                    )  # nW*B, window_size*window_size, C
         | 
| 608 | 
            +
             | 
| 609 | 
            +
                    # W-MSA/SW-MSA
         | 
| 610 | 
            +
                    attn_windows, attn = self.attn(
         | 
| 611 | 
            +
                        x_windows, mask=self.attn_mask
         | 
| 612 | 
            +
                    )  # nW*B, window_size*window_size, C
         | 
| 613 | 
            +
             | 
| 614 | 
            +
                    # merge windows
         | 
| 615 | 
            +
                    attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
         | 
| 616 | 
            +
                    shifted_x = window_reverse(attn_windows, self.window_size, H, W)  # B H' W' C
         | 
| 617 | 
            +
             | 
| 618 | 
            +
                    # reverse cyclic shift
         | 
| 619 | 
            +
                    if self.shift_size > 0:
         | 
| 620 | 
            +
                        x = torch.roll(
         | 
| 621 | 
            +
                            shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)
         | 
| 622 | 
            +
                        )
         | 
| 623 | 
            +
                    else:
         | 
| 624 | 
            +
                        x = shifted_x
         | 
| 625 | 
            +
                    x = x.view(B, H * W, C)
         | 
| 626 | 
            +
             | 
| 627 | 
            +
                    # FFN
         | 
| 628 | 
            +
                    x = shortcut + self.drop_path(x)
         | 
| 629 | 
            +
                    x = x + self.drop_path(self.mlp(self.norm2(x)))
         | 
| 630 | 
            +
             | 
| 631 | 
            +
                    return x, attn
         | 
| 632 | 
            +
             | 
| 633 | 
            +
                def extra_repr(self):
         | 
| 634 | 
            +
                    return (
         | 
| 635 | 
            +
                        f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, "
         | 
| 636 | 
            +
                        f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
         | 
| 637 | 
            +
                    )
         | 
| 638 | 
            +
             | 
| 639 | 
            +
             | 
| 640 | 
            +
            class PatchMerging(nn.Module):
         | 
| 641 | 
            +
                r"""Patch Merging Layer.
         | 
| 642 | 
            +
                Args:
         | 
| 643 | 
            +
                    input_resolution (tuple[int]): Resolution of input feature.
         | 
| 644 | 
            +
                    dim (int): Number of input channels.
         | 
| 645 | 
            +
                    norm_layer (nn.Module, optional): Normalization layer.  Default: nn.LayerNorm
         | 
| 646 | 
            +
                """
         | 
| 647 | 
            +
             | 
| 648 | 
            +
                def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
         | 
| 649 | 
            +
                    super().__init__()
         | 
| 650 | 
            +
                    self.input_resolution = input_resolution
         | 
| 651 | 
            +
                    self.dim = dim
         | 
| 652 | 
            +
                    self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
         | 
| 653 | 
            +
                    self.norm = norm_layer(4 * dim)
         | 
| 654 | 
            +
             | 
| 655 | 
            +
                def forward(self, x):
         | 
| 656 | 
            +
                    """
         | 
| 657 | 
            +
                    x: B, H*W, C
         | 
| 658 | 
            +
                    """
         | 
| 659 | 
            +
                    H, W = self.input_resolution
         | 
| 660 | 
            +
                    B, L, C = x.shape
         | 
| 661 | 
            +
                    assert L == H * W, "input feature has wrong size"
         | 
| 662 | 
            +
                    assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
         | 
| 663 | 
            +
             | 
| 664 | 
            +
                    x = x.view(B, H, W, C)
         | 
| 665 | 
            +
             | 
| 666 | 
            +
                    x0 = x[:, 0::2, 0::2, :]  # B H/2 W/2 C
         | 
| 667 | 
            +
                    x1 = x[:, 1::2, 0::2, :]  # B H/2 W/2 C
         | 
| 668 | 
            +
                    x2 = x[:, 0::2, 1::2, :]  # B H/2 W/2 C
         | 
| 669 | 
            +
                    x3 = x[:, 1::2, 1::2, :]  # B H/2 W/2 C
         | 
| 670 | 
            +
                    x = torch.cat([x0, x1, x2, x3], -1)  # B H/2 W/2 4*C
         | 
| 671 | 
            +
                    x = x.view(B, -1, 4 * C)  # B H/2*W/2 4*C
         | 
| 672 | 
            +
             | 
| 673 | 
            +
                    x = self.norm(x)
         | 
| 674 | 
            +
                    x = self.reduction(x)
         | 
| 675 | 
            +
             | 
| 676 | 
            +
                    return x
         | 
| 677 | 
            +
             | 
| 678 | 
            +
                def extra_repr(self):
         | 
| 679 | 
            +
                    return f"input_resolution={self.input_resolution}, dim={self.dim}"
         | 
| 680 | 
            +
             | 
| 681 | 
            +
             | 
| 682 | 
            +
            class BasicLayer(nn.Module):
         | 
| 683 | 
            +
                """A basic Swin Transformer layer for one stage.
         | 
| 684 | 
            +
                Args:
         | 
| 685 | 
            +
                    dim (int): Number of input channels.
         | 
| 686 | 
            +
                    input_resolution (tuple[int]): Input resolution.
         | 
| 687 | 
            +
                    depth (int): Number of blocks.
         | 
| 688 | 
            +
                    num_heads (int): Number of attention heads.
         | 
| 689 | 
            +
                    window_size (int): Local window size.
         | 
| 690 | 
            +
                    mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
         | 
| 691 | 
            +
                    qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
         | 
| 692 | 
            +
                    qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
         | 
| 693 | 
            +
                    drop (float, optional): Dropout rate. Default: 0.0
         | 
| 694 | 
            +
                    attn_drop (float, optional): Attention dropout rate. Default: 0.0
         | 
| 695 | 
            +
                    drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
         | 
| 696 | 
            +
                    norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
         | 
| 697 | 
            +
                    downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
         | 
| 698 | 
            +
                    use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
         | 
| 699 | 
            +
                """
         | 
| 700 | 
            +
             | 
| 701 | 
            +
                def __init__(
         | 
| 702 | 
            +
                    self,
         | 
| 703 | 
            +
                    dim,
         | 
| 704 | 
            +
                    input_resolution,
         | 
| 705 | 
            +
                    depth,
         | 
| 706 | 
            +
                    num_heads,
         | 
| 707 | 
            +
                    window_size,
         | 
| 708 | 
            +
                    mlp_ratio=4.0,
         | 
| 709 | 
            +
                    qkv_bias=True,
         | 
| 710 | 
            +
                    qk_scale=None,
         | 
| 711 | 
            +
                    drop=0.0,
         | 
| 712 | 
            +
                    attn_drop=0.0,
         | 
| 713 | 
            +
                    drop_path=0.0,
         | 
| 714 | 
            +
                    norm_layer=nn.LayerNorm,
         | 
| 715 | 
            +
                    downsample=None,
         | 
| 716 | 
            +
                    use_checkpoint=False,
         | 
| 717 | 
            +
                    norm_before_mlp="ln",
         | 
| 718 | 
            +
                ):
         | 
| 719 | 
            +
             | 
| 720 | 
            +
                    super().__init__()
         | 
| 721 | 
            +
                    self.dim = dim
         | 
| 722 | 
            +
                    self.input_resolution = input_resolution
         | 
| 723 | 
            +
                    self.depth = depth
         | 
| 724 | 
            +
                    self.use_checkpoint = use_checkpoint
         | 
| 725 | 
            +
             | 
| 726 | 
            +
                    # build blocks
         | 
| 727 | 
            +
                    self.blocks = nn.ModuleList(
         | 
| 728 | 
            +
                        [
         | 
| 729 | 
            +
                            SwinTransformerBlock(
         | 
| 730 | 
            +
                                dim=dim,
         | 
| 731 | 
            +
                                input_resolution=input_resolution,
         | 
| 732 | 
            +
                                num_heads=num_heads,
         | 
| 733 | 
            +
                                window_size=window_size,
         | 
| 734 | 
            +
                                shift_size=0 if (i % 2 == 0) else window_size // 2,
         | 
| 735 | 
            +
                                mlp_ratio=mlp_ratio,
         | 
| 736 | 
            +
                                qkv_bias=qkv_bias,
         | 
| 737 | 
            +
                                qk_scale=qk_scale,
         | 
| 738 | 
            +
                                drop=drop,
         | 
| 739 | 
            +
                                attn_drop=attn_drop,
         | 
| 740 | 
            +
                                drop_path=drop_path[i]
         | 
| 741 | 
            +
                                if isinstance(drop_path, list)
         | 
| 742 | 
            +
                                else drop_path,
         | 
| 743 | 
            +
                                norm_layer=norm_layer,
         | 
| 744 | 
            +
                                norm_before_mlp=norm_before_mlp,
         | 
| 745 | 
            +
                            )
         | 
| 746 | 
            +
                            for i in range(depth)
         | 
| 747 | 
            +
                        ]
         | 
| 748 | 
            +
                    )
         | 
| 749 | 
            +
             | 
| 750 | 
            +
                    # patch merging layer
         | 
| 751 | 
            +
                    if downsample is not None:
         | 
| 752 | 
            +
                        self.downsample = downsample(
         | 
| 753 | 
            +
                            input_resolution, dim=dim, norm_layer=norm_layer
         | 
| 754 | 
            +
                        )
         | 
| 755 | 
            +
                    else:
         | 
| 756 | 
            +
                        self.downsample = None
         | 
| 757 | 
            +
             | 
| 758 | 
            +
                def forward(self, x):
         | 
| 759 | 
            +
                    attns = []
         | 
| 760 | 
            +
                    for blk in self.blocks:
         | 
| 761 | 
            +
                        if self.use_checkpoint:
         | 
| 762 | 
            +
                            x = checkpoint.checkpoint(blk, x)
         | 
| 763 | 
            +
                        else:
         | 
| 764 | 
            +
                            x, attn = blk(x)
         | 
| 765 | 
            +
                            if not self.training:
         | 
| 766 | 
            +
                                attns.append(attn.unsqueeze(0))
         | 
| 767 | 
            +
                    if self.downsample is not None:
         | 
| 768 | 
            +
                        x = self.downsample(x)
         | 
| 769 | 
            +
                    if not self.training:
         | 
| 770 | 
            +
                        attn = torch.cat(attns, dim=0)
         | 
| 771 | 
            +
                        attn = torch.mean(attn, dim=0)
         | 
| 772 | 
            +
                    return x, attn
         | 
| 773 | 
            +
             | 
| 774 | 
            +
                def extra_repr(self):
         | 
| 775 | 
            +
                    return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
         | 
| 776 | 
            +
             | 
| 777 | 
            +
             | 
| 778 | 
            +
            # The Core of HTSAT
         | 
| 779 | 
            +
            class HTSAT_Swin_Transformer(nn.Module):
         | 
| 780 | 
            +
                r"""HTSAT based on the Swin Transformer
         | 
| 781 | 
            +
                Args:
         | 
| 782 | 
            +
                    spec_size (int | tuple(int)): Input Spectrogram size. Default 256
         | 
| 783 | 
            +
                    patch_size (int | tuple(int)): Patch size. Default: 4
         | 
| 784 | 
            +
                    path_stride (iot | tuple(int)): Patch Stride for Frequency and Time Axis. Default: 4
         | 
| 785 | 
            +
                    in_chans (int): Number of input image channels. Default: 1 (mono)
         | 
| 786 | 
            +
                    num_classes (int): Number of classes for classification head. Default: 527
         | 
| 787 | 
            +
                    embed_dim (int): Patch embedding dimension. Default: 96
         | 
| 788 | 
            +
                    depths (tuple(int)): Depth of each HTSAT-Swin Transformer layer.
         | 
| 789 | 
            +
                    num_heads (tuple(int)): Number of attention heads in different layers.
         | 
| 790 | 
            +
                    window_size (int): Window size. Default: 8
         | 
| 791 | 
            +
                    mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
         | 
| 792 | 
            +
                    qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
         | 
| 793 | 
            +
                    qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
         | 
| 794 | 
            +
                    drop_rate (float): Dropout rate. Default: 0
         | 
| 795 | 
            +
                    attn_drop_rate (float): Attention dropout rate. Default: 0
         | 
| 796 | 
            +
                    drop_path_rate (float): Stochastic depth rate. Default: 0.1
         | 
| 797 | 
            +
                    norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
         | 
| 798 | 
            +
                    ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
         | 
| 799 | 
            +
                    patch_norm (bool): If True, add normalization after patch embedding. Default: True
         | 
| 800 | 
            +
                    use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
         | 
| 801 | 
            +
                    config (module): The configuration Module from config.py
         | 
| 802 | 
            +
                """
         | 
| 803 | 
            +
             | 
| 804 | 
            +
                def __init__(
         | 
| 805 | 
            +
                    self,
         | 
| 806 | 
            +
                    spec_size=256,
         | 
| 807 | 
            +
                    patch_size=4,
         | 
| 808 | 
            +
                    patch_stride=(4, 4),
         | 
| 809 | 
            +
                    in_chans=1,
         | 
| 810 | 
            +
                    num_classes=527,
         | 
| 811 | 
            +
                    embed_dim=96,
         | 
| 812 | 
            +
                    depths=[2, 2, 6, 2],
         | 
| 813 | 
            +
                    num_heads=[4, 8, 16, 32],
         | 
| 814 | 
            +
                    window_size=8,
         | 
| 815 | 
            +
                    mlp_ratio=4.0,
         | 
| 816 | 
            +
                    qkv_bias=True,
         | 
| 817 | 
            +
                    qk_scale=None,
         | 
| 818 | 
            +
                    drop_rate=0.0,
         | 
| 819 | 
            +
                    attn_drop_rate=0.0,
         | 
| 820 | 
            +
                    drop_path_rate=0.1,
         | 
| 821 | 
            +
                    norm_layer=nn.LayerNorm,
         | 
| 822 | 
            +
                    ape=False,
         | 
| 823 | 
            +
                    patch_norm=True,
         | 
| 824 | 
            +
                    use_checkpoint=False,
         | 
| 825 | 
            +
                    norm_before_mlp="ln",
         | 
| 826 | 
            +
                    config=None,
         | 
| 827 | 
            +
                    enable_fusion=False,
         | 
| 828 | 
            +
                    fusion_type="None",
         | 
| 829 | 
            +
                    **kwargs,
         | 
| 830 | 
            +
                ):
         | 
| 831 | 
            +
                    super(HTSAT_Swin_Transformer, self).__init__()
         | 
| 832 | 
            +
             | 
| 833 | 
            +
                    self.config = config
         | 
| 834 | 
            +
                    self.spec_size = spec_size
         | 
| 835 | 
            +
                    self.patch_stride = patch_stride
         | 
| 836 | 
            +
                    self.patch_size = patch_size
         | 
| 837 | 
            +
                    self.window_size = window_size
         | 
| 838 | 
            +
                    self.embed_dim = embed_dim
         | 
| 839 | 
            +
                    self.depths = depths
         | 
| 840 | 
            +
                    self.ape = ape
         | 
| 841 | 
            +
                    self.in_chans = in_chans
         | 
| 842 | 
            +
                    self.num_classes = num_classes
         | 
| 843 | 
            +
                    self.num_heads = num_heads
         | 
| 844 | 
            +
                    self.num_layers = len(self.depths)
         | 
| 845 | 
            +
                    self.num_features = int(self.embed_dim * 2 ** (self.num_layers - 1))
         | 
| 846 | 
            +
             | 
| 847 | 
            +
                    self.drop_rate = drop_rate
         | 
| 848 | 
            +
                    self.attn_drop_rate = attn_drop_rate
         | 
| 849 | 
            +
                    self.drop_path_rate = drop_path_rate
         | 
| 850 | 
            +
             | 
| 851 | 
            +
                    self.qkv_bias = qkv_bias
         | 
| 852 | 
            +
                    self.qk_scale = None
         | 
| 853 | 
            +
             | 
| 854 | 
            +
                    self.patch_norm = patch_norm
         | 
| 855 | 
            +
                    self.norm_layer = norm_layer if self.patch_norm else None
         | 
| 856 | 
            +
                    self.norm_before_mlp = norm_before_mlp
         | 
| 857 | 
            +
                    self.mlp_ratio = mlp_ratio
         | 
| 858 | 
            +
             | 
| 859 | 
            +
                    self.use_checkpoint = use_checkpoint
         | 
| 860 | 
            +
             | 
| 861 | 
            +
                    self.enable_fusion = enable_fusion
         | 
| 862 | 
            +
                    self.fusion_type = fusion_type
         | 
| 863 | 
            +
             | 
| 864 | 
            +
                    #  process mel-spec ; used only once
         | 
| 865 | 
            +
                    self.freq_ratio = self.spec_size // self.config.mel_bins
         | 
| 866 | 
            +
                    window = "hann"
         | 
| 867 | 
            +
                    center = True
         | 
| 868 | 
            +
                    pad_mode = "reflect"
         | 
| 869 | 
            +
                    ref = 1.0
         | 
| 870 | 
            +
                    amin = 1e-10
         | 
| 871 | 
            +
                    top_db = None
         | 
| 872 | 
            +
                    self.interpolate_ratio = 32  # Downsampled ratio
         | 
| 873 | 
            +
                    # Spectrogram extractor
         | 
| 874 | 
            +
                    self.spectrogram_extractor = Spectrogram(
         | 
| 875 | 
            +
                        n_fft=config.window_size,
         | 
| 876 | 
            +
                        hop_length=config.hop_size,
         | 
| 877 | 
            +
                        win_length=config.window_size,
         | 
| 878 | 
            +
                        window=window,
         | 
| 879 | 
            +
                        center=center,
         | 
| 880 | 
            +
                        pad_mode=pad_mode,
         | 
| 881 | 
            +
                        freeze_parameters=True,
         | 
| 882 | 
            +
                    )
         | 
| 883 | 
            +
                    # Logmel feature extractor
         | 
| 884 | 
            +
                    self.logmel_extractor = LogmelFilterBank(
         | 
| 885 | 
            +
                        sr=config.sample_rate,
         | 
| 886 | 
            +
                        n_fft=config.window_size,
         | 
| 887 | 
            +
                        n_mels=config.mel_bins,
         | 
| 888 | 
            +
                        fmin=config.fmin,
         | 
| 889 | 
            +
                        fmax=config.fmax,
         | 
| 890 | 
            +
                        ref=ref,
         | 
| 891 | 
            +
                        amin=amin,
         | 
| 892 | 
            +
                        top_db=top_db,
         | 
| 893 | 
            +
                        freeze_parameters=True,
         | 
| 894 | 
            +
                    )
         | 
| 895 | 
            +
                    # Spec augmenter
         | 
| 896 | 
            +
                    self.spec_augmenter = SpecAugmentation(
         | 
| 897 | 
            +
                        time_drop_width=64,
         | 
| 898 | 
            +
                        time_stripes_num=2,
         | 
| 899 | 
            +
                        freq_drop_width=8,
         | 
| 900 | 
            +
                        freq_stripes_num=2,
         | 
| 901 | 
            +
                    )  # 2 2
         | 
| 902 | 
            +
                    self.bn0 = nn.BatchNorm2d(self.config.mel_bins)
         | 
| 903 | 
            +
             | 
| 904 | 
            +
                    # split spctrogram into non-overlapping patches
         | 
| 905 | 
            +
                    self.patch_embed = PatchEmbed(
         | 
| 906 | 
            +
                        img_size=self.spec_size,
         | 
| 907 | 
            +
                        patch_size=self.patch_size,
         | 
| 908 | 
            +
                        in_chans=self.in_chans,
         | 
| 909 | 
            +
                        embed_dim=self.embed_dim,
         | 
| 910 | 
            +
                        norm_layer=self.norm_layer,
         | 
| 911 | 
            +
                        patch_stride=patch_stride,
         | 
| 912 | 
            +
                        enable_fusion=self.enable_fusion,
         | 
| 913 | 
            +
                        fusion_type=self.fusion_type,
         | 
| 914 | 
            +
                    )
         | 
| 915 | 
            +
             | 
| 916 | 
            +
                    num_patches = self.patch_embed.num_patches
         | 
| 917 | 
            +
                    patches_resolution = self.patch_embed.grid_size
         | 
| 918 | 
            +
                    self.patches_resolution = patches_resolution
         | 
| 919 | 
            +
             | 
| 920 | 
            +
                    # absolute position embedding
         | 
| 921 | 
            +
                    if self.ape:
         | 
| 922 | 
            +
                        self.absolute_pos_embed = nn.Parameter(
         | 
| 923 | 
            +
                            torch.zeros(1, num_patches, self.embed_dim)
         | 
| 924 | 
            +
                        )
         | 
| 925 | 
            +
                        trunc_normal_(self.absolute_pos_embed, std=0.02)
         | 
| 926 | 
            +
             | 
| 927 | 
            +
                    self.pos_drop = nn.Dropout(p=self.drop_rate)
         | 
| 928 | 
            +
             | 
| 929 | 
            +
                    # stochastic depth
         | 
| 930 | 
            +
                    dpr = [
         | 
| 931 | 
            +
                        x.item() for x in torch.linspace(0, self.drop_path_rate, sum(self.depths))
         | 
| 932 | 
            +
                    ]  # stochastic depth decay rule
         | 
| 933 | 
            +
             | 
| 934 | 
            +
                    # build layers
         | 
| 935 | 
            +
                    self.layers = nn.ModuleList()
         | 
| 936 | 
            +
                    for i_layer in range(self.num_layers):
         | 
| 937 | 
            +
                        layer = BasicLayer(
         | 
| 938 | 
            +
                            dim=int(self.embed_dim * 2**i_layer),
         | 
| 939 | 
            +
                            input_resolution=(
         | 
| 940 | 
            +
                                patches_resolution[0] // (2**i_layer),
         | 
| 941 | 
            +
                                patches_resolution[1] // (2**i_layer),
         | 
| 942 | 
            +
                            ),
         | 
| 943 | 
            +
                            depth=self.depths[i_layer],
         | 
| 944 | 
            +
                            num_heads=self.num_heads[i_layer],
         | 
| 945 | 
            +
                            window_size=self.window_size,
         | 
| 946 | 
            +
                            mlp_ratio=self.mlp_ratio,
         | 
| 947 | 
            +
                            qkv_bias=self.qkv_bias,
         | 
| 948 | 
            +
                            qk_scale=self.qk_scale,
         | 
| 949 | 
            +
                            drop=self.drop_rate,
         | 
| 950 | 
            +
                            attn_drop=self.attn_drop_rate,
         | 
| 951 | 
            +
                            drop_path=dpr[
         | 
| 952 | 
            +
                                sum(self.depths[:i_layer]) : sum(self.depths[: i_layer + 1])
         | 
| 953 | 
            +
                            ],
         | 
| 954 | 
            +
                            norm_layer=self.norm_layer,
         | 
| 955 | 
            +
                            downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
         | 
| 956 | 
            +
                            use_checkpoint=use_checkpoint,
         | 
| 957 | 
            +
                            norm_before_mlp=self.norm_before_mlp,
         | 
| 958 | 
            +
                        )
         | 
| 959 | 
            +
                        self.layers.append(layer)
         | 
| 960 | 
            +
             | 
| 961 | 
            +
                    self.norm = self.norm_layer(self.num_features)
         | 
| 962 | 
            +
                    self.avgpool = nn.AdaptiveAvgPool1d(1)
         | 
| 963 | 
            +
                    self.maxpool = nn.AdaptiveMaxPool1d(1)
         | 
| 964 | 
            +
             | 
| 965 | 
            +
                    SF = (
         | 
| 966 | 
            +
                        self.spec_size
         | 
| 967 | 
            +
                        // (2 ** (len(self.depths) - 1))
         | 
| 968 | 
            +
                        // self.patch_stride[0]
         | 
| 969 | 
            +
                        // self.freq_ratio
         | 
| 970 | 
            +
                    )
         | 
| 971 | 
            +
                    self.tscam_conv = nn.Conv2d(
         | 
| 972 | 
            +
                        in_channels=self.num_features,
         | 
| 973 | 
            +
                        out_channels=self.num_classes,
         | 
| 974 | 
            +
                        kernel_size=(SF, 3),
         | 
| 975 | 
            +
                        padding=(0, 1),
         | 
| 976 | 
            +
                    )
         | 
| 977 | 
            +
                    self.head = nn.Linear(num_classes, num_classes)
         | 
| 978 | 
            +
             | 
| 979 | 
            +
                    if (self.enable_fusion) and (
         | 
| 980 | 
            +
                        self.fusion_type in ["daf_1d", "aff_1d", "iaff_1d"]
         | 
| 981 | 
            +
                    ):
         | 
| 982 | 
            +
                        self.mel_conv1d = nn.Sequential(
         | 
| 983 | 
            +
                            nn.Conv1d(64, 64, kernel_size=5, stride=3, padding=2),
         | 
| 984 | 
            +
                            nn.BatchNorm1d(64),
         | 
| 985 | 
            +
                        )
         | 
| 986 | 
            +
                        if self.fusion_type == "daf_1d":
         | 
| 987 | 
            +
                            self.fusion_model = DAF()
         | 
| 988 | 
            +
                        elif self.fusion_type == "aff_1d":
         | 
| 989 | 
            +
                            self.fusion_model = AFF(channels=64, type="1D")
         | 
| 990 | 
            +
                        elif self.fusion_type == "iaff_1d":
         | 
| 991 | 
            +
                            self.fusion_model = iAFF(channels=64, type="1D")
         | 
| 992 | 
            +
             | 
| 993 | 
            +
                    self.apply(self._init_weights)
         | 
| 994 | 
            +
             | 
| 995 | 
            +
                def _init_weights(self, m):
         | 
| 996 | 
            +
                    if isinstance(m, nn.Linear):
         | 
| 997 | 
            +
                        trunc_normal_(m.weight, std=0.02)
         | 
| 998 | 
            +
                        if isinstance(m, nn.Linear) and m.bias is not None:
         | 
| 999 | 
            +
                            nn.init.constant_(m.bias, 0)
         | 
| 1000 | 
            +
                    elif isinstance(m, nn.LayerNorm):
         | 
| 1001 | 
            +
                        nn.init.constant_(m.bias, 0)
         | 
| 1002 | 
            +
                        nn.init.constant_(m.weight, 1.0)
         | 
| 1003 | 
            +
             | 
| 1004 | 
            +
                @torch.jit.ignore
         | 
| 1005 | 
            +
                def no_weight_decay(self):
         | 
| 1006 | 
            +
                    return {"absolute_pos_embed"}
         | 
| 1007 | 
            +
             | 
| 1008 | 
            +
                @torch.jit.ignore
         | 
| 1009 | 
            +
                def no_weight_decay_keywords(self):
         | 
| 1010 | 
            +
                    return {"relative_position_bias_table"}
         | 
| 1011 | 
            +
             | 
| 1012 | 
            +
                def forward_features(self, x, longer_idx=None):
         | 
| 1013 | 
            +
                    # A deprecated optimization for using a hierarchical output from different blocks
         | 
| 1014 | 
            +
             | 
| 1015 | 
            +
                    frames_num = x.shape[2]
         | 
| 1016 | 
            +
                    x = self.patch_embed(x, longer_idx=longer_idx)
         | 
| 1017 | 
            +
                    if self.ape:
         | 
| 1018 | 
            +
                        x = x + self.absolute_pos_embed
         | 
| 1019 | 
            +
                    x = self.pos_drop(x)
         | 
| 1020 | 
            +
                    for i, layer in enumerate(self.layers):
         | 
| 1021 | 
            +
                        x, attn = layer(x)
         | 
| 1022 | 
            +
                    # for x
         | 
| 1023 | 
            +
                    x = self.norm(x)
         | 
| 1024 | 
            +
                    B, N, C = x.shape
         | 
| 1025 | 
            +
                    SF = frames_num // (2 ** (len(self.depths) - 1)) // self.patch_stride[0]
         | 
| 1026 | 
            +
                    ST = frames_num // (2 ** (len(self.depths) - 1)) // self.patch_stride[1]
         | 
| 1027 | 
            +
                    x = x.permute(0, 2, 1).contiguous().reshape(B, C, SF, ST)
         | 
| 1028 | 
            +
                    B, C, F, T = x.shape
         | 
| 1029 | 
            +
                    # group 2D CNN
         | 
| 1030 | 
            +
                    c_freq_bin = F // self.freq_ratio
         | 
| 1031 | 
            +
                    x = x.reshape(B, C, F // c_freq_bin, c_freq_bin, T)
         | 
| 1032 | 
            +
                    x = x.permute(0, 1, 3, 2, 4).contiguous().reshape(B, C, c_freq_bin, -1)
         | 
| 1033 | 
            +
                    # get latent_output
         | 
| 1034 | 
            +
                    fine_grained_latent_output = torch.mean(x, dim=2)
         | 
| 1035 | 
            +
                    fine_grained_latent_output = interpolate(
         | 
| 1036 | 
            +
                        fine_grained_latent_output.permute(0, 2, 1).contiguous(),
         | 
| 1037 | 
            +
                        8 * self.patch_stride[1],
         | 
| 1038 | 
            +
                    )
         | 
| 1039 | 
            +
             | 
| 1040 | 
            +
                    latent_output = self.avgpool(torch.flatten(x, 2))
         | 
| 1041 | 
            +
                    latent_output = torch.flatten(latent_output, 1)
         | 
| 1042 | 
            +
             | 
| 1043 | 
            +
                    # display the attention map, if needed
         | 
| 1044 | 
            +
             | 
| 1045 | 
            +
                    x = self.tscam_conv(x)
         | 
| 1046 | 
            +
                    x = torch.flatten(x, 2)  # B, C, T
         | 
| 1047 | 
            +
             | 
| 1048 | 
            +
                    fpx = interpolate(
         | 
| 1049 | 
            +
                        torch.sigmoid(x).permute(0, 2, 1).contiguous(), 8 * self.patch_stride[1]
         | 
| 1050 | 
            +
                    )
         | 
| 1051 | 
            +
             | 
| 1052 | 
            +
                    x = self.avgpool(x)
         | 
| 1053 | 
            +
                    x = torch.flatten(x, 1)
         | 
| 1054 | 
            +
             | 
| 1055 | 
            +
                    output_dict = {
         | 
| 1056 | 
            +
                        "framewise_output": fpx,  # already sigmoided
         | 
| 1057 | 
            +
                        "clipwise_output": torch.sigmoid(x),
         | 
| 1058 | 
            +
                        "fine_grained_embedding": fine_grained_latent_output,
         | 
| 1059 | 
            +
                        "embedding": latent_output,
         | 
| 1060 | 
            +
                    }
         | 
| 1061 | 
            +
             | 
| 1062 | 
            +
                    return output_dict
         | 
| 1063 | 
            +
             | 
| 1064 | 
            +
                def crop_wav(self, x, crop_size, spe_pos=None):
         | 
| 1065 | 
            +
                    time_steps = x.shape[2]
         | 
| 1066 | 
            +
                    tx = torch.zeros(x.shape[0], x.shape[1], crop_size, x.shape[3]).to(x.device)
         | 
| 1067 | 
            +
                    for i in range(len(x)):
         | 
| 1068 | 
            +
                        if spe_pos is None:
         | 
| 1069 | 
            +
                            crop_pos = random.randint(0, time_steps - crop_size - 1)
         | 
| 1070 | 
            +
                        else:
         | 
| 1071 | 
            +
                            crop_pos = spe_pos
         | 
| 1072 | 
            +
                        tx[i][0] = x[i, 0, crop_pos : crop_pos + crop_size, :]
         | 
| 1073 | 
            +
                    return tx
         | 
| 1074 | 
            +
             | 
| 1075 | 
            +
                # Reshape the wavform to a img size, if you want to use the pretrained swin transformer model
         | 
| 1076 | 
            +
                def reshape_wav2img(self, x):
         | 
| 1077 | 
            +
                    B, C, T, F = x.shape
         | 
| 1078 | 
            +
                    target_T = int(self.spec_size * self.freq_ratio)
         | 
| 1079 | 
            +
                    target_F = self.spec_size // self.freq_ratio
         | 
| 1080 | 
            +
                    assert (
         | 
| 1081 | 
            +
                        T <= target_T and F <= target_F
         | 
| 1082 | 
            +
                    ), "the wav size should less than or equal to the swin input size"
         | 
| 1083 | 
            +
                    # to avoid bicubic zero error
         | 
| 1084 | 
            +
                    if T < target_T:
         | 
| 1085 | 
            +
                        x = nn.functional.interpolate(
         | 
| 1086 | 
            +
                            x, (target_T, x.shape[3]), mode="bicubic", align_corners=True
         | 
| 1087 | 
            +
                        )
         | 
| 1088 | 
            +
                    if F < target_F:
         | 
| 1089 | 
            +
                        x = nn.functional.interpolate(
         | 
| 1090 | 
            +
                            x, (x.shape[2], target_F), mode="bicubic", align_corners=True
         | 
| 1091 | 
            +
                        )
         | 
| 1092 | 
            +
                    x = x.permute(0, 1, 3, 2).contiguous()
         | 
| 1093 | 
            +
                    x = x.reshape(
         | 
| 1094 | 
            +
                        x.shape[0],
         | 
| 1095 | 
            +
                        x.shape[1],
         | 
| 1096 | 
            +
                        x.shape[2],
         | 
| 1097 | 
            +
                        self.freq_ratio,
         | 
| 1098 | 
            +
                        x.shape[3] // self.freq_ratio,
         | 
| 1099 | 
            +
                    )
         | 
| 1100 | 
            +
                    # print(x.shape)
         | 
| 1101 | 
            +
                    x = x.permute(0, 1, 3, 2, 4).contiguous()
         | 
| 1102 | 
            +
                    x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3], x.shape[4])
         | 
| 1103 | 
            +
                    return x
         | 
| 1104 | 
            +
             | 
| 1105 | 
            +
                # Repeat the wavform to a img size, if you want to use the pretrained swin transformer model
         | 
| 1106 | 
            +
                def repeat_wat2img(self, x, cur_pos):
         | 
| 1107 | 
            +
                    B, C, T, F = x.shape
         | 
| 1108 | 
            +
                    target_T = int(self.spec_size * self.freq_ratio)
         | 
| 1109 | 
            +
                    target_F = self.spec_size // self.freq_ratio
         | 
| 1110 | 
            +
                    assert (
         | 
| 1111 | 
            +
                        T <= target_T and F <= target_F
         | 
| 1112 | 
            +
                    ), "the wav size should less than or equal to the swin input size"
         | 
| 1113 | 
            +
                    # to avoid bicubic zero error
         | 
| 1114 | 
            +
                    if T < target_T:
         | 
| 1115 | 
            +
                        x = nn.functional.interpolate(
         | 
| 1116 | 
            +
                            x, (target_T, x.shape[3]), mode="bicubic", align_corners=True
         | 
| 1117 | 
            +
                        )
         | 
| 1118 | 
            +
                    if F < target_F:
         | 
| 1119 | 
            +
                        x = nn.functional.interpolate(
         | 
| 1120 | 
            +
                            x, (x.shape[2], target_F), mode="bicubic", align_corners=True
         | 
| 1121 | 
            +
                        )
         | 
| 1122 | 
            +
                    x = x.permute(0, 1, 3, 2).contiguous()  # B C F T
         | 
| 1123 | 
            +
                    x = x[:, :, :, cur_pos : cur_pos + self.spec_size]
         | 
| 1124 | 
            +
                    x = x.repeat(repeats=(1, 1, 4, 1))
         | 
| 1125 | 
            +
                    return x
         | 
| 1126 | 
            +
             | 
| 1127 | 
            +
                def forward(
         | 
| 1128 | 
            +
                    self, x: torch.Tensor, mixup_lambda=None, infer_mode=False, device=None
         | 
| 1129 | 
            +
                ):  # out_feat_keys: List[str] = None):
         | 
| 1130 | 
            +
             | 
| 1131 | 
            +
                    if self.enable_fusion and x["longer"].sum() == 0:
         | 
| 1132 | 
            +
                        # if no audio is longer than 10s, then randomly select one audio to be longer
         | 
| 1133 | 
            +
                        x["longer"][torch.randint(0, x["longer"].shape[0], (1,))] = True
         | 
| 1134 | 
            +
             | 
| 1135 | 
            +
                    if not self.enable_fusion:
         | 
| 1136 | 
            +
                        x = x["waveform"].to(device=device, non_blocking=True)
         | 
| 1137 | 
            +
                        x = self.spectrogram_extractor(x)  # (batch_size, 1, time_steps, freq_bins)
         | 
| 1138 | 
            +
                        x = self.logmel_extractor(x)  # (batch_size, 1, time_steps, mel_bins)
         | 
| 1139 | 
            +
                        x = x.transpose(1, 3)
         | 
| 1140 | 
            +
                        x = self.bn0(x)
         | 
| 1141 | 
            +
                        x = x.transpose(1, 3)
         | 
| 1142 | 
            +
                        if self.training:
         | 
| 1143 | 
            +
                            x = self.spec_augmenter(x)
         | 
| 1144 | 
            +
             | 
| 1145 | 
            +
                        if self.training and mixup_lambda is not None:
         | 
| 1146 | 
            +
                            x = do_mixup(x, mixup_lambda)
         | 
| 1147 | 
            +
             | 
| 1148 | 
            +
                        x = self.reshape_wav2img(x)
         | 
| 1149 | 
            +
                        output_dict = self.forward_features(x)
         | 
| 1150 | 
            +
                    else:
         | 
| 1151 | 
            +
                        longer_list = x["longer"].to(device=device, non_blocking=True)
         | 
| 1152 | 
            +
                        x = x["mel_fusion"].to(device=device, non_blocking=True)
         | 
| 1153 | 
            +
                        x = x.transpose(1, 3)
         | 
| 1154 | 
            +
                        x = self.bn0(x)
         | 
| 1155 | 
            +
                        x = x.transpose(1, 3)
         | 
| 1156 | 
            +
                        longer_list_idx = torch.where(longer_list)[0]
         | 
| 1157 | 
            +
                        if self.fusion_type in ["daf_1d", "aff_1d", "iaff_1d"]:
         | 
| 1158 | 
            +
                            new_x = x[:, 0:1, :, :].clone().contiguous()
         | 
| 1159 | 
            +
                            if len(longer_list_idx) > 0:
         | 
| 1160 | 
            +
                                # local processing
         | 
| 1161 | 
            +
                                fusion_x_local = x[longer_list_idx, 1:, :, :].clone().contiguous()
         | 
| 1162 | 
            +
                                FB, FC, FT, FF = fusion_x_local.size()
         | 
| 1163 | 
            +
                                fusion_x_local = fusion_x_local.view(FB * FC, FT, FF)
         | 
| 1164 | 
            +
                                fusion_x_local = torch.permute(
         | 
| 1165 | 
            +
                                    fusion_x_local, (0, 2, 1)
         | 
| 1166 | 
            +
                                ).contiguous()
         | 
| 1167 | 
            +
                                fusion_x_local = self.mel_conv1d(fusion_x_local)
         | 
| 1168 | 
            +
                                fusion_x_local = fusion_x_local.view(
         | 
| 1169 | 
            +
                                    FB, FC, FF, fusion_x_local.size(-1)
         | 
| 1170 | 
            +
                                )
         | 
| 1171 | 
            +
                                fusion_x_local = (
         | 
| 1172 | 
            +
                                    torch.permute(fusion_x_local, (0, 2, 1, 3))
         | 
| 1173 | 
            +
                                    .contiguous()
         | 
| 1174 | 
            +
                                    .flatten(2)
         | 
| 1175 | 
            +
                                )
         | 
| 1176 | 
            +
                                if fusion_x_local.size(-1) < FT:
         | 
| 1177 | 
            +
                                    fusion_x_local = torch.cat(
         | 
| 1178 | 
            +
                                        [
         | 
| 1179 | 
            +
                                            fusion_x_local,
         | 
| 1180 | 
            +
                                            torch.zeros(
         | 
| 1181 | 
            +
                                                (FB, FF, FT - fusion_x_local.size(-1)),
         | 
| 1182 | 
            +
                                                device=device,
         | 
| 1183 | 
            +
                                            ),
         | 
| 1184 | 
            +
                                        ],
         | 
| 1185 | 
            +
                                        dim=-1,
         | 
| 1186 | 
            +
                                    )
         | 
| 1187 | 
            +
                                else:
         | 
| 1188 | 
            +
                                    fusion_x_local = fusion_x_local[:, :, :FT]
         | 
| 1189 | 
            +
                                # 1D fusion
         | 
| 1190 | 
            +
                                new_x = new_x.squeeze(1).permute((0, 2, 1)).contiguous()
         | 
| 1191 | 
            +
                                new_x[longer_list_idx] = self.fusion_model(
         | 
| 1192 | 
            +
                                    new_x[longer_list_idx], fusion_x_local
         | 
| 1193 | 
            +
                                )
         | 
| 1194 | 
            +
                                x = new_x.permute((0, 2, 1)).contiguous()[:, None, :, :]
         | 
| 1195 | 
            +
                            else:
         | 
| 1196 | 
            +
                                x = new_x
         | 
| 1197 | 
            +
             | 
| 1198 | 
            +
                        elif self.fusion_type in ["daf_2d", "aff_2d", "iaff_2d", "channel_map"]:
         | 
| 1199 | 
            +
                            x = x  # no change
         | 
| 1200 | 
            +
             | 
| 1201 | 
            +
                        if self.training:
         | 
| 1202 | 
            +
                            x = self.spec_augmenter(x)
         | 
| 1203 | 
            +
                        if self.training and mixup_lambda is not None:
         | 
| 1204 | 
            +
                            x = do_mixup(x, mixup_lambda)
         | 
| 1205 | 
            +
             | 
| 1206 | 
            +
                        x = self.reshape_wav2img(x)
         | 
| 1207 | 
            +
                        output_dict = self.forward_features(x, longer_idx=longer_list_idx)
         | 
| 1208 | 
            +
             | 
| 1209 | 
            +
                    # if infer_mode:
         | 
| 1210 | 
            +
                    #     # in infer mode. we need to handle different length audio input
         | 
| 1211 | 
            +
                    #     frame_num = x.shape[2]
         | 
| 1212 | 
            +
                    #     target_T = int(self.spec_size * self.freq_ratio)
         | 
| 1213 | 
            +
                    #     repeat_ratio = math.floor(target_T / frame_num)
         | 
| 1214 | 
            +
                    #     x = x.repeat(repeats=(1,1,repeat_ratio,1))
         | 
| 1215 | 
            +
                    #     x = self.reshape_wav2img(x)
         | 
| 1216 | 
            +
                    #     output_dict = self.forward_features(x)
         | 
| 1217 | 
            +
                    # else:
         | 
| 1218 | 
            +
                    #     if x.shape[2] > self.freq_ratio * self.spec_size:
         | 
| 1219 | 
            +
                    #         if self.training:
         | 
| 1220 | 
            +
                    #             x = self.crop_wav(x, crop_size=self.freq_ratio * self.spec_size)
         | 
| 1221 | 
            +
                    #             x = self.reshape_wav2img(x)
         | 
| 1222 | 
            +
                    #             output_dict = self.forward_features(x)
         | 
| 1223 | 
            +
                    #         else:
         | 
| 1224 | 
            +
                    #             # Change: Hard code here
         | 
| 1225 | 
            +
                    #             overlap_size = (x.shape[2] - 1) // 4
         | 
| 1226 | 
            +
                    #             output_dicts = []
         | 
| 1227 | 
            +
                    #             crop_size = (x.shape[2] - 1) // 2
         | 
| 1228 | 
            +
                    #             for cur_pos in range(0, x.shape[2] - crop_size - 1, overlap_size):
         | 
| 1229 | 
            +
                    #                 tx = self.crop_wav(x, crop_size = crop_size, spe_pos = cur_pos)
         | 
| 1230 | 
            +
                    #                 tx = self.reshape_wav2img(tx)
         | 
| 1231 | 
            +
                    #                 output_dicts.append(self.forward_features(tx))
         | 
| 1232 | 
            +
                    #             clipwise_output = torch.zeros_like(output_dicts[0]["clipwise_output"]).float().to(x.device)
         | 
| 1233 | 
            +
                    #             framewise_output = torch.zeros_like(output_dicts[0]["framewise_output"]).float().to(x.device)
         | 
| 1234 | 
            +
                    #             for d in output_dicts:
         | 
| 1235 | 
            +
                    #                 clipwise_output += d["clipwise_output"]
         | 
| 1236 | 
            +
                    #                 framewise_output += d["framewise_output"]
         | 
| 1237 | 
            +
                    #             clipwise_output  = clipwise_output / len(output_dicts)
         | 
| 1238 | 
            +
                    #             framewise_output = framewise_output / len(output_dicts)
         | 
| 1239 | 
            +
                    #             output_dict = {
         | 
| 1240 | 
            +
                    #                 'framewise_output': framewise_output,
         | 
| 1241 | 
            +
                    #                 'clipwise_output': clipwise_output
         | 
| 1242 | 
            +
                    #             }
         | 
| 1243 | 
            +
                    #     else: # this part is typically used, and most easy one
         | 
| 1244 | 
            +
                    #         x = self.reshape_wav2img(x)
         | 
| 1245 | 
            +
                    #         output_dict = self.forward_features(x)
         | 
| 1246 | 
            +
                    # x = self.head(x)
         | 
| 1247 | 
            +
             | 
| 1248 | 
            +
                    # We process the data in the dataloader part, in that here we only consider the input_T < fixed_T
         | 
| 1249 | 
            +
             | 
| 1250 | 
            +
                    return output_dict
         | 
| 1251 | 
            +
             | 
| 1252 | 
            +
             | 
| 1253 | 
            +
            def create_htsat_model(audio_cfg, enable_fusion=False, fusion_type="None"):
         | 
| 1254 | 
            +
                try:
         | 
| 1255 | 
            +
             | 
| 1256 | 
            +
                    assert audio_cfg.model_name in [
         | 
| 1257 | 
            +
                        "tiny",
         | 
| 1258 | 
            +
                        "base",
         | 
| 1259 | 
            +
                        "large",
         | 
| 1260 | 
            +
                    ], "model name for HTS-AT is wrong!"
         | 
| 1261 | 
            +
                    if audio_cfg.model_name == "tiny":
         | 
| 1262 | 
            +
                        model = HTSAT_Swin_Transformer(
         | 
| 1263 | 
            +
                            spec_size=256,
         | 
| 1264 | 
            +
                            patch_size=4,
         | 
| 1265 | 
            +
                            patch_stride=(4, 4),
         | 
| 1266 | 
            +
                            num_classes=audio_cfg.class_num,
         | 
| 1267 | 
            +
                            embed_dim=96,
         | 
| 1268 | 
            +
                            depths=[2, 2, 6, 2],
         | 
| 1269 | 
            +
                            num_heads=[4, 8, 16, 32],
         | 
| 1270 | 
            +
                            window_size=8,
         | 
| 1271 | 
            +
                            config=audio_cfg,
         | 
| 1272 | 
            +
                            enable_fusion=enable_fusion,
         | 
| 1273 | 
            +
                            fusion_type=fusion_type,
         | 
| 1274 | 
            +
                        )
         | 
| 1275 | 
            +
                    elif audio_cfg.model_name == "base":
         | 
| 1276 | 
            +
                        model = HTSAT_Swin_Transformer(
         | 
| 1277 | 
            +
                            spec_size=256,
         | 
| 1278 | 
            +
                            patch_size=4,
         | 
| 1279 | 
            +
                            patch_stride=(4, 4),
         | 
| 1280 | 
            +
                            num_classes=audio_cfg.class_num,
         | 
| 1281 | 
            +
                            embed_dim=128,
         | 
| 1282 | 
            +
                            depths=[2, 2, 12, 2],
         | 
| 1283 | 
            +
                            num_heads=[4, 8, 16, 32],
         | 
| 1284 | 
            +
                            window_size=8,
         | 
| 1285 | 
            +
                            config=audio_cfg,
         | 
| 1286 | 
            +
                            enable_fusion=enable_fusion,
         | 
| 1287 | 
            +
                            fusion_type=fusion_type,
         | 
| 1288 | 
            +
                        )
         | 
| 1289 | 
            +
                    elif audio_cfg.model_name == "large":
         | 
| 1290 | 
            +
                        model = HTSAT_Swin_Transformer(
         | 
| 1291 | 
            +
                            spec_size=256,
         | 
| 1292 | 
            +
                            patch_size=4,
         | 
| 1293 | 
            +
                            patch_stride=(4, 4),
         | 
| 1294 | 
            +
                            num_classes=audio_cfg.class_num,
         | 
| 1295 | 
            +
                            embed_dim=256,
         | 
| 1296 | 
            +
                            depths=[2, 2, 12, 2],
         | 
| 1297 | 
            +
                            num_heads=[4, 8, 16, 32],
         | 
| 1298 | 
            +
                            window_size=8,
         | 
| 1299 | 
            +
                            config=audio_cfg,
         | 
| 1300 | 
            +
                            enable_fusion=enable_fusion,
         | 
| 1301 | 
            +
                            fusion_type=fusion_type,
         | 
| 1302 | 
            +
                        )
         | 
| 1303 | 
            +
             | 
| 1304 | 
            +
                    return model
         | 
| 1305 | 
            +
                except:
         | 
| 1306 | 
            +
                    raise RuntimeError(
         | 
| 1307 | 
            +
                        f"Import Model for {audio_cfg.model_name} not found, or the audio cfg parameters are not enough."
         | 
| 1308 | 
            +
                    )
         | 
    	
        audioldm/clap/open_clip/linear_probe.py
    ADDED
    
    | @@ -0,0 +1,66 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import numpy as np
         | 
| 2 | 
            +
            import torch.nn.functional as F
         | 
| 3 | 
            +
            from torch import nn
         | 
| 4 | 
            +
            from .model import MLPLayers
         | 
| 5 | 
            +
             | 
| 6 | 
            +
             | 
| 7 | 
            +
            class LinearProbe(nn.Module):
         | 
| 8 | 
            +
                def __init__(self, model, mlp, freeze, in_ch, out_ch, act=None):
         | 
| 9 | 
            +
                    """
         | 
| 10 | 
            +
                    Args:
         | 
| 11 | 
            +
                        model: nn.Module
         | 
| 12 | 
            +
                        mlp: bool, if True, then use the MLP layer as the linear probe module
         | 
| 13 | 
            +
                        freeze: bool, if Ture, then freeze all the CLAP model's layers when training the linear probe
         | 
| 14 | 
            +
                        in_ch: int, the output channel from CLAP model
         | 
| 15 | 
            +
                        out_ch: int, the output channel from linear probe (class_num)
         | 
| 16 | 
            +
                        act: torch.nn.functional, the activation function before the loss function
         | 
| 17 | 
            +
                    """
         | 
| 18 | 
            +
                    super().__init__()
         | 
| 19 | 
            +
                    in_ch = 512
         | 
| 20 | 
            +
                    self.clap_model = model
         | 
| 21 | 
            +
                    self.clap_model.text_branch = None  # to save memory
         | 
| 22 | 
            +
                    self.freeze = freeze
         | 
| 23 | 
            +
                    if mlp:
         | 
| 24 | 
            +
                        self.lp_layer = MLPLayers(units=[in_ch, in_ch * 2, out_ch])
         | 
| 25 | 
            +
                    else:
         | 
| 26 | 
            +
                        self.lp_layer = nn.Linear(in_ch, out_ch)
         | 
| 27 | 
            +
             | 
| 28 | 
            +
                    if self.freeze:
         | 
| 29 | 
            +
                        for param in self.clap_model.parameters():
         | 
| 30 | 
            +
                            param.requires_grad = False
         | 
| 31 | 
            +
             | 
| 32 | 
            +
                    if act == "None":
         | 
| 33 | 
            +
                        self.act = None
         | 
| 34 | 
            +
                    elif act == "relu":
         | 
| 35 | 
            +
                        self.act = nn.ReLU()
         | 
| 36 | 
            +
                    elif act == "elu":
         | 
| 37 | 
            +
                        self.act = nn.ELU()
         | 
| 38 | 
            +
                    elif act == "prelu":
         | 
| 39 | 
            +
                        self.act = nn.PReLU(num_parameters=in_ch)
         | 
| 40 | 
            +
                    elif act == "softmax":
         | 
| 41 | 
            +
                        self.act = nn.Softmax(dim=-1)
         | 
| 42 | 
            +
                    elif act == "sigmoid":
         | 
| 43 | 
            +
                        self.act = nn.Sigmoid()
         | 
| 44 | 
            +
             | 
| 45 | 
            +
                def forward(self, x, mix_lambda=None, device=None):
         | 
| 46 | 
            +
                    """
         | 
| 47 | 
            +
                    Args:
         | 
| 48 | 
            +
                        x: waveform, torch.tensor [batch, t_samples] / batch of mel_spec and longer list
         | 
| 49 | 
            +
                        mix_lambda: torch.tensor [batch], the mixup lambda
         | 
| 50 | 
            +
                    Returns:
         | 
| 51 | 
            +
                        class_prob: torch.tensor [batch, class_num]
         | 
| 52 | 
            +
             | 
| 53 | 
            +
                    """
         | 
| 54 | 
            +
                    # batchnorm cancel grandient
         | 
| 55 | 
            +
                    if self.freeze:
         | 
| 56 | 
            +
                        self.clap_model.eval()
         | 
| 57 | 
            +
             | 
| 58 | 
            +
                    x = self.clap_model.audio_projection(
         | 
| 59 | 
            +
                        self.clap_model.audio_branch(x, mixup_lambda=mix_lambda, device=device)[
         | 
| 60 | 
            +
                            "embedding"
         | 
| 61 | 
            +
                        ]
         | 
| 62 | 
            +
                    )
         | 
| 63 | 
            +
                    out = self.lp_layer(x)
         | 
| 64 | 
            +
                    if self.act is not None:
         | 
| 65 | 
            +
                        out = self.act(out)
         | 
| 66 | 
            +
                    return out
         | 
    	
        audioldm/clap/open_clip/loss.py
    ADDED
    
    | @@ -0,0 +1,398 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            from multiprocessing.sharedctypes import Value
         | 
| 2 | 
            +
            import torch
         | 
| 3 | 
            +
            import torch.distributed.nn
         | 
| 4 | 
            +
            from torch import distributed as dist, nn as nn
         | 
| 5 | 
            +
            from torch.nn import functional as F
         | 
| 6 | 
            +
            import numpy as np
         | 
| 7 | 
            +
            from sklearn.metrics import average_precision_score, roc_auc_score, accuracy_score
         | 
| 8 | 
            +
             | 
| 9 | 
            +
            try:
         | 
| 10 | 
            +
                import horovod.torch as hvd
         | 
| 11 | 
            +
            except ImportError:
         | 
| 12 | 
            +
                hvd = None
         | 
| 13 | 
            +
             | 
| 14 | 
            +
             | 
| 15 | 
            +
            def gather_features(
         | 
| 16 | 
            +
                audio_features,
         | 
| 17 | 
            +
                text_features,
         | 
| 18 | 
            +
                audio_features_mlp=None,
         | 
| 19 | 
            +
                text_features_mlp=None,
         | 
| 20 | 
            +
                local_loss=False,
         | 
| 21 | 
            +
                gather_with_grad=False,
         | 
| 22 | 
            +
                rank=0,
         | 
| 23 | 
            +
                world_size=1,
         | 
| 24 | 
            +
                use_horovod=False,
         | 
| 25 | 
            +
                mlp_loss=False,
         | 
| 26 | 
            +
            ):
         | 
| 27 | 
            +
                if use_horovod:
         | 
| 28 | 
            +
                    assert hvd is not None, "Please install horovod"
         | 
| 29 | 
            +
                    if gather_with_grad:
         | 
| 30 | 
            +
                        all_audio_features = hvd.allgather(audio_features)
         | 
| 31 | 
            +
                        all_text_features = hvd.allgather(text_features)
         | 
| 32 | 
            +
                        if mlp_loss:
         | 
| 33 | 
            +
                            all_audio_features_mlp = hvd.allgather(audio_features_mlp)
         | 
| 34 | 
            +
                            all_text_features_mlp = hvd.allgather(text_features_mlp)
         | 
| 35 | 
            +
                    else:
         | 
| 36 | 
            +
                        with torch.no_grad():
         | 
| 37 | 
            +
                            all_audio_features = hvd.allgather(audio_features)
         | 
| 38 | 
            +
                            all_text_features = hvd.allgather(text_features)
         | 
| 39 | 
            +
                            if mlp_loss:
         | 
| 40 | 
            +
                                all_audio_features_mlp = hvd.allgather(audio_features_mlp)
         | 
| 41 | 
            +
                                all_text_features_mlp = hvd.allgather(text_features_mlp)
         | 
| 42 | 
            +
                        if not local_loss:
         | 
| 43 | 
            +
                            # ensure grads for local rank when all_* features don't have a gradient
         | 
| 44 | 
            +
                            gathered_audio_features = list(
         | 
| 45 | 
            +
                                all_audio_features.chunk(world_size, dim=0)
         | 
| 46 | 
            +
                            )
         | 
| 47 | 
            +
                            gathered_text_features = list(
         | 
| 48 | 
            +
                                all_text_features.chunk(world_size, dim=0)
         | 
| 49 | 
            +
                            )
         | 
| 50 | 
            +
                            gathered_audio_features[rank] = audio_features
         | 
| 51 | 
            +
                            gathered_text_features[rank] = text_features
         | 
| 52 | 
            +
                            all_audio_features = torch.cat(gathered_audio_features, dim=0)
         | 
| 53 | 
            +
                            all_text_features = torch.cat(gathered_text_features, dim=0)
         | 
| 54 | 
            +
                            if mlp_loss:
         | 
| 55 | 
            +
                                gathered_audio_features_mlp = list(
         | 
| 56 | 
            +
                                    all_audio_features_mlp.chunk(world_size, dim=0)
         | 
| 57 | 
            +
                                )
         | 
| 58 | 
            +
                                gathered_text_features_mlp = list(
         | 
| 59 | 
            +
                                    all_text_features_mlp.chunk(world_size, dim=0)
         | 
| 60 | 
            +
                                )
         | 
| 61 | 
            +
                                gathered_audio_features_mlp[rank] = audio_features_mlp
         | 
| 62 | 
            +
                                gathered_text_features_mlp[rank] = text_features_mlp
         | 
| 63 | 
            +
                                all_audio_features_mlp = torch.cat(
         | 
| 64 | 
            +
                                    gathered_audio_features_mlp, dim=0
         | 
| 65 | 
            +
                                )
         | 
| 66 | 
            +
                                all_text_features_mlp = torch.cat(gathered_text_features_mlp, dim=0)
         | 
| 67 | 
            +
                else:
         | 
| 68 | 
            +
                    # We gather tensors from all gpus
         | 
| 69 | 
            +
                    if gather_with_grad:
         | 
| 70 | 
            +
                        all_audio_features = torch.cat(
         | 
| 71 | 
            +
                            torch.distributed.nn.all_gather(audio_features), dim=0
         | 
| 72 | 
            +
                        )
         | 
| 73 | 
            +
                        all_text_features = torch.cat(
         | 
| 74 | 
            +
                            torch.distributed.nn.all_gather(text_features), dim=0
         | 
| 75 | 
            +
                        )
         | 
| 76 | 
            +
                        if mlp_loss:
         | 
| 77 | 
            +
                            all_audio_features_mlp = torch.cat(
         | 
| 78 | 
            +
                                torch.distributed.nn.all_gather(audio_features_mlp), dim=0
         | 
| 79 | 
            +
                            )
         | 
| 80 | 
            +
                            all_text_features_mlp = torch.cat(
         | 
| 81 | 
            +
                                torch.distributed.nn.all_gather(text_features_mlp), dim=0
         | 
| 82 | 
            +
                            )
         | 
| 83 | 
            +
                    else:
         | 
| 84 | 
            +
                        gathered_audio_features = [
         | 
| 85 | 
            +
                            torch.zeros_like(audio_features) for _ in range(world_size)
         | 
| 86 | 
            +
                        ]
         | 
| 87 | 
            +
                        gathered_text_features = [
         | 
| 88 | 
            +
                            torch.zeros_like(text_features) for _ in range(world_size)
         | 
| 89 | 
            +
                        ]
         | 
| 90 | 
            +
                        dist.all_gather(gathered_audio_features, audio_features)
         | 
| 91 | 
            +
                        dist.all_gather(gathered_text_features, text_features)
         | 
| 92 | 
            +
                        if mlp_loss:
         | 
| 93 | 
            +
                            gathered_audio_features_mlp = [
         | 
| 94 | 
            +
                                torch.zeros_like(audio_features_mlp) for _ in range(world_size)
         | 
| 95 | 
            +
                            ]
         | 
| 96 | 
            +
                            gathered_text_features_mlp = [
         | 
| 97 | 
            +
                                torch.zeros_like(text_features_mlp) for _ in range(world_size)
         | 
| 98 | 
            +
                            ]
         | 
| 99 | 
            +
                            dist.all_gather(gathered_audio_features_mlp, audio_features_mlp)
         | 
| 100 | 
            +
                            dist.all_gather(gathered_text_features_mlp, text_features_mlp)
         | 
| 101 | 
            +
                        if not local_loss:
         | 
| 102 | 
            +
                            # ensure grads for local rank when all_* features don't have a gradient
         | 
| 103 | 
            +
                            gathered_audio_features[rank] = audio_features
         | 
| 104 | 
            +
                            gathered_text_features[rank] = text_features
         | 
| 105 | 
            +
                            if mlp_loss:
         | 
| 106 | 
            +
                                gathered_audio_features_mlp[rank] = audio_features_mlp
         | 
| 107 | 
            +
                                gathered_text_features_mlp[rank] = text_features_mlp
         | 
| 108 | 
            +
             | 
| 109 | 
            +
                        all_audio_features = torch.cat(gathered_audio_features, dim=0)
         | 
| 110 | 
            +
                        all_text_features = torch.cat(gathered_text_features, dim=0)
         | 
| 111 | 
            +
                        if mlp_loss:
         | 
| 112 | 
            +
                            all_audio_features_mlp = torch.cat(gathered_audio_features_mlp, dim=0)
         | 
| 113 | 
            +
                            all_text_features_mlp = torch.cat(gathered_text_features_mlp, dim=0)
         | 
| 114 | 
            +
                if mlp_loss:
         | 
| 115 | 
            +
                    return (
         | 
| 116 | 
            +
                        all_audio_features,
         | 
| 117 | 
            +
                        all_text_features,
         | 
| 118 | 
            +
                        all_audio_features_mlp,
         | 
| 119 | 
            +
                        all_text_features_mlp,
         | 
| 120 | 
            +
                    )
         | 
| 121 | 
            +
                else:
         | 
| 122 | 
            +
                    return all_audio_features, all_text_features
         | 
| 123 | 
            +
             | 
| 124 | 
            +
             | 
| 125 | 
            +
            class ClipLoss(nn.Module):
         | 
| 126 | 
            +
                def __init__(
         | 
| 127 | 
            +
                    self,
         | 
| 128 | 
            +
                    local_loss=False,
         | 
| 129 | 
            +
                    gather_with_grad=False,
         | 
| 130 | 
            +
                    cache_labels=False,
         | 
| 131 | 
            +
                    rank=0,
         | 
| 132 | 
            +
                    world_size=1,
         | 
| 133 | 
            +
                    use_horovod=False,
         | 
| 134 | 
            +
                    mlp_loss=False,
         | 
| 135 | 
            +
                    weight_loss_kappa=0,
         | 
| 136 | 
            +
                ):
         | 
| 137 | 
            +
                    super().__init__()
         | 
| 138 | 
            +
                    self.local_loss = local_loss
         | 
| 139 | 
            +
                    self.gather_with_grad = gather_with_grad
         | 
| 140 | 
            +
                    self.cache_labels = cache_labels
         | 
| 141 | 
            +
                    self.rank = rank
         | 
| 142 | 
            +
                    self.world_size = world_size
         | 
| 143 | 
            +
                    self.use_horovod = use_horovod
         | 
| 144 | 
            +
                    self.mlp_loss = mlp_loss
         | 
| 145 | 
            +
                    self.weighted_loss = bool(weight_loss_kappa != 0)
         | 
| 146 | 
            +
                    self.weight_loss_kappa = weight_loss_kappa
         | 
| 147 | 
            +
                    # cache state
         | 
| 148 | 
            +
                    self.prev_num_logits = 0
         | 
| 149 | 
            +
                    self.labels = {}
         | 
| 150 | 
            +
             | 
| 151 | 
            +
                def forward(
         | 
| 152 | 
            +
                    self,
         | 
| 153 | 
            +
                    audio_features,
         | 
| 154 | 
            +
                    text_features,
         | 
| 155 | 
            +
                    logit_scale_a,
         | 
| 156 | 
            +
                    logit_scale_t=None,
         | 
| 157 | 
            +
                    audio_features_mlp=None,
         | 
| 158 | 
            +
                    text_features_mlp=None,
         | 
| 159 | 
            +
                ):
         | 
| 160 | 
            +
                    device = audio_features.device
         | 
| 161 | 
            +
                    if self.mlp_loss:
         | 
| 162 | 
            +
                        if self.world_size > 1:
         | 
| 163 | 
            +
                            (
         | 
| 164 | 
            +
                                all_audio_features,
         | 
| 165 | 
            +
                                all_text_features,
         | 
| 166 | 
            +
                                all_audio_features_mlp,
         | 
| 167 | 
            +
                                all_text_features_mlp,
         | 
| 168 | 
            +
                            ) = gather_features(
         | 
| 169 | 
            +
                                audio_features=audio_features,
         | 
| 170 | 
            +
                                text_features=text_features,
         | 
| 171 | 
            +
                                audio_features_mlp=audio_features_mlp,
         | 
| 172 | 
            +
                                text_features_mlp=text_features_mlp,
         | 
| 173 | 
            +
                                local_loss=self.local_loss,
         | 
| 174 | 
            +
                                gather_with_grad=self.gather_with_grad,
         | 
| 175 | 
            +
                                rank=self.rank,
         | 
| 176 | 
            +
                                world_size=self.world_size,
         | 
| 177 | 
            +
                                use_horovod=self.use_horovod,
         | 
| 178 | 
            +
                                mlp_loss=self.mlp_loss,
         | 
| 179 | 
            +
                            )
         | 
| 180 | 
            +
                            if self.local_loss:
         | 
| 181 | 
            +
                                a_logits_per_audio = (
         | 
| 182 | 
            +
                                    logit_scale_a * audio_features @ all_text_features_mlp.T
         | 
| 183 | 
            +
                                )
         | 
| 184 | 
            +
                                a_logits_per_text = (
         | 
| 185 | 
            +
                                    logit_scale_a * text_features_mlp @ all_audio_features.T
         | 
| 186 | 
            +
                                )
         | 
| 187 | 
            +
                                t_logits_per_audio = (
         | 
| 188 | 
            +
                                    logit_scale_t * audio_features_mlp @ all_text_features.T
         | 
| 189 | 
            +
                                )
         | 
| 190 | 
            +
                                t_logits_per_text = (
         | 
| 191 | 
            +
                                    logit_scale_t * text_features @ all_audio_features_mlp.T
         | 
| 192 | 
            +
                                )
         | 
| 193 | 
            +
                            else:
         | 
| 194 | 
            +
                                a_logits_per_audio = (
         | 
| 195 | 
            +
                                    logit_scale_a * all_audio_features @ all_text_features_mlp.T
         | 
| 196 | 
            +
                                )
         | 
| 197 | 
            +
                                a_logits_per_text = a_logits_per_audio.T
         | 
| 198 | 
            +
                                t_logits_per_audio = (
         | 
| 199 | 
            +
                                    logit_scale_t * all_audio_features_mlp @ all_text_features.T
         | 
| 200 | 
            +
                                )
         | 
| 201 | 
            +
                                t_logits_per_text = t_logits_per_audio.T
         | 
| 202 | 
            +
                        else:
         | 
| 203 | 
            +
                            a_logits_per_audio = (
         | 
| 204 | 
            +
                                logit_scale_a * audio_features @ text_features_mlp.T
         | 
| 205 | 
            +
                            )
         | 
| 206 | 
            +
                            a_logits_per_text = logit_scale_a * text_features_mlp @ audio_features.T
         | 
| 207 | 
            +
                            t_logits_per_audio = (
         | 
| 208 | 
            +
                                logit_scale_t * audio_features_mlp @ text_features.T
         | 
| 209 | 
            +
                            )
         | 
| 210 | 
            +
                            t_logits_per_text = logit_scale_t * text_features @ audio_features_mlp.T
         | 
| 211 | 
            +
             | 
| 212 | 
            +
                        # calculated ground-truth and cache if enabled
         | 
| 213 | 
            +
                        num_logits = a_logits_per_audio.shape[0]
         | 
| 214 | 
            +
                        if self.prev_num_logits != num_logits or device not in self.labels:
         | 
| 215 | 
            +
                            labels = torch.arange(num_logits, device=device, dtype=torch.long)
         | 
| 216 | 
            +
                            if self.world_size > 1 and self.local_loss:
         | 
| 217 | 
            +
                                labels = labels + num_logits * self.rank
         | 
| 218 | 
            +
                            if self.cache_labels:
         | 
| 219 | 
            +
                                self.labels[device] = labels
         | 
| 220 | 
            +
                                self.prev_num_logits = num_logits
         | 
| 221 | 
            +
                        else:
         | 
| 222 | 
            +
                            labels = self.labels[device]
         | 
| 223 | 
            +
             | 
| 224 | 
            +
                        if not self.weighted_loss:
         | 
| 225 | 
            +
                            total_loss = (
         | 
| 226 | 
            +
                                F.cross_entropy(a_logits_per_audio, labels)
         | 
| 227 | 
            +
                                + F.cross_entropy(a_logits_per_text, labels)
         | 
| 228 | 
            +
                                + F.cross_entropy(t_logits_per_audio, labels)
         | 
| 229 | 
            +
                                + F.cross_entropy(t_logits_per_text, labels)
         | 
| 230 | 
            +
                            ) / 4
         | 
| 231 | 
            +
                        else:
         | 
| 232 | 
            +
                            audio_weight = (audio_features @ audio_features.T).detach()
         | 
| 233 | 
            +
                            audio_weight = (
         | 
| 234 | 
            +
                                torch.exp(
         | 
| 235 | 
            +
                                    torch.sum(audio_weight, axis=1)
         | 
| 236 | 
            +
                                    / (self.weight_loss_kappa * len(audio_weight))
         | 
| 237 | 
            +
                                )
         | 
| 238 | 
            +
                            ).detach()
         | 
| 239 | 
            +
                            text_weight = (text_features @ text_features.T).detach()
         | 
| 240 | 
            +
                            text_weight = (
         | 
| 241 | 
            +
                                torch.exp(
         | 
| 242 | 
            +
                                    torch.sum(text_weight, axis=1)
         | 
| 243 | 
            +
                                    / (self.weight_loss_kappa * len(text_features))
         | 
| 244 | 
            +
                                )
         | 
| 245 | 
            +
                            ).detach()
         | 
| 246 | 
            +
                            total_loss = (
         | 
| 247 | 
            +
                                F.cross_entropy(a_logits_per_audio, labels, weight=audio_weight)
         | 
| 248 | 
            +
                                + F.cross_entropy(a_logits_per_text, labels, weight=audio_weight)
         | 
| 249 | 
            +
                                + F.cross_entropy(t_logits_per_audio, labels, weight=text_weight)
         | 
| 250 | 
            +
                                + F.cross_entropy(t_logits_per_text, labels, weight=text_weight)
         | 
| 251 | 
            +
                            ) / 4
         | 
| 252 | 
            +
                    else:
         | 
| 253 | 
            +
                        if self.world_size > 1:
         | 
| 254 | 
            +
                            all_audio_features, all_text_features = gather_features(
         | 
| 255 | 
            +
                                audio_features=audio_features,
         | 
| 256 | 
            +
                                text_features=text_features,
         | 
| 257 | 
            +
                                local_loss=self.local_loss,
         | 
| 258 | 
            +
                                gather_with_grad=self.gather_with_grad,
         | 
| 259 | 
            +
                                rank=self.rank,
         | 
| 260 | 
            +
                                world_size=self.world_size,
         | 
| 261 | 
            +
                                use_horovod=self.use_horovod,
         | 
| 262 | 
            +
                                mlp_loss=self.mlp_loss,
         | 
| 263 | 
            +
                            )
         | 
| 264 | 
            +
             | 
| 265 | 
            +
                            if self.local_loss:
         | 
| 266 | 
            +
                                logits_per_audio = (
         | 
| 267 | 
            +
                                    logit_scale_a * audio_features @ all_text_features.T
         | 
| 268 | 
            +
                                )
         | 
| 269 | 
            +
                                logits_per_text = (
         | 
| 270 | 
            +
                                    logit_scale_a * text_features @ all_audio_features.T
         | 
| 271 | 
            +
                                )
         | 
| 272 | 
            +
                            else:
         | 
| 273 | 
            +
                                logits_per_audio = (
         | 
| 274 | 
            +
                                    logit_scale_a * all_audio_features @ all_text_features.T
         | 
| 275 | 
            +
                                )
         | 
| 276 | 
            +
                                logits_per_text = logits_per_audio.T
         | 
| 277 | 
            +
                        else:
         | 
| 278 | 
            +
                            logits_per_audio = logit_scale_a * audio_features @ text_features.T
         | 
| 279 | 
            +
                            logits_per_text = logit_scale_a * text_features @ audio_features.T
         | 
| 280 | 
            +
             | 
| 281 | 
            +
                        # calculated ground-truth and cache if enabled
         | 
| 282 | 
            +
                        num_logits = logits_per_audio.shape[0]
         | 
| 283 | 
            +
                        if self.prev_num_logits != num_logits or device not in self.labels:
         | 
| 284 | 
            +
                            labels = torch.arange(num_logits, device=device, dtype=torch.long)
         | 
| 285 | 
            +
                            if self.world_size > 1 and self.local_loss:
         | 
| 286 | 
            +
                                labels = labels + num_logits * self.rank
         | 
| 287 | 
            +
                            if self.cache_labels:
         | 
| 288 | 
            +
                                self.labels[device] = labels
         | 
| 289 | 
            +
                                self.prev_num_logits = num_logits
         | 
| 290 | 
            +
                        else:
         | 
| 291 | 
            +
                            labels = self.labels[device]
         | 
| 292 | 
            +
                        if not self.weighted_loss:
         | 
| 293 | 
            +
                            total_loss = (
         | 
| 294 | 
            +
                                F.cross_entropy(logits_per_audio, labels)
         | 
| 295 | 
            +
                                + F.cross_entropy(logits_per_text, labels)
         | 
| 296 | 
            +
                            ) / 2
         | 
| 297 | 
            +
                        else:
         | 
| 298 | 
            +
                            audio_weight = (all_audio_features @ all_audio_features.T).detach()
         | 
| 299 | 
            +
                            audio_weight = (
         | 
| 300 | 
            +
                                torch.exp(
         | 
| 301 | 
            +
                                    torch.sum(audio_weight, axis=1)
         | 
| 302 | 
            +
                                    / (self.weight_loss_kappa * len(all_audio_features))
         | 
| 303 | 
            +
                                )
         | 
| 304 | 
            +
                            ).detach()
         | 
| 305 | 
            +
                            text_weight = (all_text_features @ all_text_features.T).detach()
         | 
| 306 | 
            +
                            text_weight = (
         | 
| 307 | 
            +
                                torch.exp(
         | 
| 308 | 
            +
                                    torch.sum(text_weight, axis=1)
         | 
| 309 | 
            +
                                    / (self.weight_loss_kappa * len(all_text_features))
         | 
| 310 | 
            +
                                )
         | 
| 311 | 
            +
                            ).detach()
         | 
| 312 | 
            +
                            total_loss = (
         | 
| 313 | 
            +
                                F.cross_entropy(logits_per_audio, labels, weight=text_weight)
         | 
| 314 | 
            +
                                + F.cross_entropy(logits_per_text, labels, weight=audio_weight)
         | 
| 315 | 
            +
                            ) / 2
         | 
| 316 | 
            +
                    return total_loss
         | 
| 317 | 
            +
             | 
| 318 | 
            +
             | 
| 319 | 
            +
            def lp_gather_features(pred, target, world_size=1, use_horovod=False):
         | 
| 320 | 
            +
                if use_horovod:
         | 
| 321 | 
            +
                    assert hvd is not None, "Please install horovod"
         | 
| 322 | 
            +
                    with torch.no_grad():
         | 
| 323 | 
            +
                        all_preds = hvd.allgather(pred)
         | 
| 324 | 
            +
                        all_targets = hvd.allgath(target)
         | 
| 325 | 
            +
                else:
         | 
| 326 | 
            +
                    gathered_preds = [torch.zeros_like(pred) for _ in range(world_size)]
         | 
| 327 | 
            +
                    gathered_targets = [torch.zeros_like(target) for _ in range(world_size)]
         | 
| 328 | 
            +
             | 
| 329 | 
            +
                    dist.all_gather(gathered_preds, pred)
         | 
| 330 | 
            +
                    dist.all_gather(gathered_targets, target)
         | 
| 331 | 
            +
                    all_preds = torch.cat(gathered_preds, dim=0)
         | 
| 332 | 
            +
                    all_targets = torch.cat(gathered_targets, dim=0)
         | 
| 333 | 
            +
             | 
| 334 | 
            +
                return all_preds, all_targets
         | 
| 335 | 
            +
             | 
| 336 | 
            +
             | 
| 337 | 
            +
            def get_map(pred, target):
         | 
| 338 | 
            +
                pred = torch.sigmoid(pred).numpy()
         | 
| 339 | 
            +
                target = target.numpy()
         | 
| 340 | 
            +
                return np.mean(average_precision_score(target, pred, average=None))
         | 
| 341 | 
            +
             | 
| 342 | 
            +
             | 
| 343 | 
            +
            def get_acc(pred, target):
         | 
| 344 | 
            +
                pred = torch.argmax(pred, 1).numpy()
         | 
| 345 | 
            +
                target = torch.argmax(target, 1).numpy()
         | 
| 346 | 
            +
                return accuracy_score(target, pred)
         | 
| 347 | 
            +
             | 
| 348 | 
            +
             | 
| 349 | 
            +
            def get_mauc(pred, target):
         | 
| 350 | 
            +
                pred = torch.sigmoid(pred).numpy()
         | 
| 351 | 
            +
                target = target.numpy()
         | 
| 352 | 
            +
                return np.mean(roc_auc_score(target, pred, average=None))
         | 
| 353 | 
            +
             | 
| 354 | 
            +
             | 
| 355 | 
            +
            class LPMetrics(object):
         | 
| 356 | 
            +
                def __init__(self, metric_names=["map", "acc", "mauc"]):
         | 
| 357 | 
            +
                    self.metrics = []
         | 
| 358 | 
            +
                    for name in metric_names:
         | 
| 359 | 
            +
                        self.metrics.append(self.get_metric(name))
         | 
| 360 | 
            +
                    self.metric_names = metric_names
         | 
| 361 | 
            +
             | 
| 362 | 
            +
                def get_metric(self, name):
         | 
| 363 | 
            +
                    if name == "map":
         | 
| 364 | 
            +
                        return get_map
         | 
| 365 | 
            +
                    elif name == "acc":
         | 
| 366 | 
            +
                        return get_acc
         | 
| 367 | 
            +
                    elif name == "mauc":
         | 
| 368 | 
            +
                        return get_mauc
         | 
| 369 | 
            +
                    else:
         | 
| 370 | 
            +
                        raise ValueError(f"the metric should be at least one of [map, acc, mauc]")
         | 
| 371 | 
            +
             | 
| 372 | 
            +
                def evaluate_mertics(self, pred, target):
         | 
| 373 | 
            +
                    metric_dict = {}
         | 
| 374 | 
            +
                    for i in range(len(self.metric_names)):
         | 
| 375 | 
            +
                        metric_dict[self.metric_names[i]] = self.metrics[i](pred, target)
         | 
| 376 | 
            +
                    return metric_dict
         | 
| 377 | 
            +
             | 
| 378 | 
            +
             | 
| 379 | 
            +
            def calc_celoss(pred, target):
         | 
| 380 | 
            +
                target = torch.argmax(target, 1).long()
         | 
| 381 | 
            +
                return nn.CrossEntropyLoss()(pred, target)
         | 
| 382 | 
            +
             | 
| 383 | 
            +
             | 
| 384 | 
            +
            class LPLoss(nn.Module):
         | 
| 385 | 
            +
                def __init__(self, loss_name):
         | 
| 386 | 
            +
                    super().__init__()
         | 
| 387 | 
            +
                    if loss_name == "bce":
         | 
| 388 | 
            +
                        self.loss_func = nn.BCEWithLogitsLoss()
         | 
| 389 | 
            +
                    elif loss_name == "ce":
         | 
| 390 | 
            +
                        self.loss_func = calc_celoss
         | 
| 391 | 
            +
                    elif loss_name == "mse":
         | 
| 392 | 
            +
                        self.loss_func = nn.MSELoss()
         | 
| 393 | 
            +
                    else:
         | 
| 394 | 
            +
                        raise ValueError(f"the loss func should be at least one of [bce, ce, mse]")
         | 
| 395 | 
            +
             | 
| 396 | 
            +
                def forward(self, pred, target):
         | 
| 397 | 
            +
                    loss = self.loss_func(pred, target)
         | 
| 398 | 
            +
                    return loss
         | 
    	
        audioldm/clap/open_clip/model.py
    ADDED
    
    | @@ -0,0 +1,936 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            """ CLAP Model
         | 
| 2 | 
            +
             | 
| 3 | 
            +
            Adapted from CLIP: https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
         | 
| 4 | 
            +
            Adapted to the Audio Task.
         | 
| 5 | 
            +
            """
         | 
| 6 | 
            +
             | 
| 7 | 
            +
            from collections import OrderedDict
         | 
| 8 | 
            +
            from dataclasses import dataclass
         | 
| 9 | 
            +
            from email.mime import audio
         | 
| 10 | 
            +
            from typing import Tuple, Union, Callable, Optional
         | 
| 11 | 
            +
             | 
| 12 | 
            +
            import numpy as np
         | 
| 13 | 
            +
            import torch
         | 
| 14 | 
            +
            import torch.nn.functional as F
         | 
| 15 | 
            +
            from torch import nn
         | 
| 16 | 
            +
             | 
| 17 | 
            +
            from .timm_model import TimmModel
         | 
| 18 | 
            +
            import logging
         | 
| 19 | 
            +
            from .utils import freeze_batch_norm_2d
         | 
| 20 | 
            +
             | 
| 21 | 
            +
            from .pann_model import create_pann_model
         | 
| 22 | 
            +
            from .htsat import create_htsat_model
         | 
| 23 | 
            +
            from transformers import BertModel, RobertaModel, BartModel
         | 
| 24 | 
            +
            from transformers.tokenization_utils_base import BatchEncoding
         | 
| 25 | 
            +
             | 
| 26 | 
            +
             | 
| 27 | 
            +
            class MLPLayers(nn.Module):
         | 
| 28 | 
            +
                def __init__(self, units=[512, 512, 512], nonlin=nn.ReLU(), dropout=0.1):
         | 
| 29 | 
            +
                    super(MLPLayers, self).__init__()
         | 
| 30 | 
            +
                    self.nonlin = nonlin
         | 
| 31 | 
            +
                    self.dropout = dropout
         | 
| 32 | 
            +
             | 
| 33 | 
            +
                    sequence = []
         | 
| 34 | 
            +
                    for u0, u1 in zip(units[:-1], units[1:]):
         | 
| 35 | 
            +
                        sequence.append(nn.Linear(u0, u1))
         | 
| 36 | 
            +
                        sequence.append(self.nonlin)
         | 
| 37 | 
            +
                        sequence.append(nn.Dropout(self.dropout))
         | 
| 38 | 
            +
                    sequence = sequence[:-2]
         | 
| 39 | 
            +
             | 
| 40 | 
            +
                    self.sequential = nn.Sequential(*sequence)
         | 
| 41 | 
            +
             | 
| 42 | 
            +
                def forward(self, X):
         | 
| 43 | 
            +
                    X = self.sequential(X)
         | 
| 44 | 
            +
                    return X
         | 
| 45 | 
            +
             | 
| 46 | 
            +
             | 
| 47 | 
            +
            class Bottleneck(nn.Module):
         | 
| 48 | 
            +
                expansion = 4
         | 
| 49 | 
            +
             | 
| 50 | 
            +
                def __init__(self, inplanes, planes, stride=1):
         | 
| 51 | 
            +
                    super().__init__()
         | 
| 52 | 
            +
             | 
| 53 | 
            +
                    # all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
         | 
| 54 | 
            +
                    self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
         | 
| 55 | 
            +
                    self.bn1 = nn.BatchNorm2d(planes)
         | 
| 56 | 
            +
             | 
| 57 | 
            +
                    self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
         | 
| 58 | 
            +
                    self.bn2 = nn.BatchNorm2d(planes)
         | 
| 59 | 
            +
             | 
| 60 | 
            +
                    self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
         | 
| 61 | 
            +
             | 
| 62 | 
            +
                    self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
         | 
| 63 | 
            +
                    self.bn3 = nn.BatchNorm2d(planes * self.expansion)
         | 
| 64 | 
            +
             | 
| 65 | 
            +
                    self.relu = nn.ReLU(inplace=True)
         | 
| 66 | 
            +
                    self.downsample = None
         | 
| 67 | 
            +
                    self.stride = stride
         | 
| 68 | 
            +
             | 
| 69 | 
            +
                    if stride > 1 or inplanes != planes * Bottleneck.expansion:
         | 
| 70 | 
            +
                        # downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
         | 
| 71 | 
            +
                        self.downsample = nn.Sequential(
         | 
| 72 | 
            +
                            OrderedDict(
         | 
| 73 | 
            +
                                [
         | 
| 74 | 
            +
                                    ("-1", nn.AvgPool2d(stride)),
         | 
| 75 | 
            +
                                    (
         | 
| 76 | 
            +
                                        "0",
         | 
| 77 | 
            +
                                        nn.Conv2d(
         | 
| 78 | 
            +
                                            inplanes,
         | 
| 79 | 
            +
                                            planes * self.expansion,
         | 
| 80 | 
            +
                                            1,
         | 
| 81 | 
            +
                                            stride=1,
         | 
| 82 | 
            +
                                            bias=False,
         | 
| 83 | 
            +
                                        ),
         | 
| 84 | 
            +
                                    ),
         | 
| 85 | 
            +
                                    ("1", nn.BatchNorm2d(planes * self.expansion)),
         | 
| 86 | 
            +
                                ]
         | 
| 87 | 
            +
                            )
         | 
| 88 | 
            +
                        )
         | 
| 89 | 
            +
             | 
| 90 | 
            +
                def forward(self, x: torch.Tensor):
         | 
| 91 | 
            +
                    identity = x
         | 
| 92 | 
            +
             | 
| 93 | 
            +
                    out = self.relu(self.bn1(self.conv1(x)))
         | 
| 94 | 
            +
                    out = self.relu(self.bn2(self.conv2(out)))
         | 
| 95 | 
            +
                    out = self.avgpool(out)
         | 
| 96 | 
            +
                    out = self.bn3(self.conv3(out))
         | 
| 97 | 
            +
             | 
| 98 | 
            +
                    if self.downsample is not None:
         | 
| 99 | 
            +
                        identity = self.downsample(x)
         | 
| 100 | 
            +
             | 
| 101 | 
            +
                    out += identity
         | 
| 102 | 
            +
                    out = self.relu(out)
         | 
| 103 | 
            +
                    return out
         | 
| 104 | 
            +
             | 
| 105 | 
            +
             | 
| 106 | 
            +
            class AttentionPool2d(nn.Module):
         | 
| 107 | 
            +
                def __init__(
         | 
| 108 | 
            +
                    self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None
         | 
| 109 | 
            +
                ):
         | 
| 110 | 
            +
                    super().__init__()
         | 
| 111 | 
            +
                    self.positional_embedding = nn.Parameter(
         | 
| 112 | 
            +
                        torch.randn(spacial_dim**2 + 1, embed_dim) / embed_dim**0.5
         | 
| 113 | 
            +
                    )
         | 
| 114 | 
            +
                    self.k_proj = nn.Linear(embed_dim, embed_dim)
         | 
| 115 | 
            +
                    self.q_proj = nn.Linear(embed_dim, embed_dim)
         | 
| 116 | 
            +
                    self.v_proj = nn.Linear(embed_dim, embed_dim)
         | 
| 117 | 
            +
                    self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
         | 
| 118 | 
            +
                    self.num_heads = num_heads
         | 
| 119 | 
            +
             | 
| 120 | 
            +
                def forward(self, x):
         | 
| 121 | 
            +
                    x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(
         | 
| 122 | 
            +
                        2, 0, 1
         | 
| 123 | 
            +
                    )  # NCHW -> (HW)NC
         | 
| 124 | 
            +
                    x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0)  # (HW+1)NC
         | 
| 125 | 
            +
                    x = x + self.positional_embedding[:, None, :].to(x.dtype)  # (HW+1)NC
         | 
| 126 | 
            +
                    x, _ = F.multi_head_attention_forward(
         | 
| 127 | 
            +
                        query=x,
         | 
| 128 | 
            +
                        key=x,
         | 
| 129 | 
            +
                        value=x,
         | 
| 130 | 
            +
                        embed_dim_to_check=x.shape[-1],
         | 
| 131 | 
            +
                        num_heads=self.num_heads,
         | 
| 132 | 
            +
                        q_proj_weight=self.q_proj.weight,
         | 
| 133 | 
            +
                        k_proj_weight=self.k_proj.weight,
         | 
| 134 | 
            +
                        v_proj_weight=self.v_proj.weight,
         | 
| 135 | 
            +
                        in_proj_weight=None,
         | 
| 136 | 
            +
                        in_proj_bias=torch.cat(
         | 
| 137 | 
            +
                            [self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]
         | 
| 138 | 
            +
                        ),
         | 
| 139 | 
            +
                        bias_k=None,
         | 
| 140 | 
            +
                        bias_v=None,
         | 
| 141 | 
            +
                        add_zero_attn=False,
         | 
| 142 | 
            +
                        dropout_p=0,
         | 
| 143 | 
            +
                        out_proj_weight=self.c_proj.weight,
         | 
| 144 | 
            +
                        out_proj_bias=self.c_proj.bias,
         | 
| 145 | 
            +
                        use_separate_proj_weight=True,
         | 
| 146 | 
            +
                        training=self.training,
         | 
| 147 | 
            +
                        need_weights=False,
         | 
| 148 | 
            +
                    )
         | 
| 149 | 
            +
             | 
| 150 | 
            +
                    return x[0]
         | 
| 151 | 
            +
             | 
| 152 | 
            +
             | 
| 153 | 
            +
            class ModifiedResNet(nn.Module):
         | 
| 154 | 
            +
                """
         | 
| 155 | 
            +
                A ResNet class that is similar to torchvision's but contains the following changes:
         | 
| 156 | 
            +
                - There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
         | 
| 157 | 
            +
                - Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
         | 
| 158 | 
            +
                - The final pooling layer is a QKV attention instead of an average pool
         | 
| 159 | 
            +
                """
         | 
| 160 | 
            +
             | 
| 161 | 
            +
                def __init__(self, layers, output_dim, heads, image_size=224, width=64):
         | 
| 162 | 
            +
                    super().__init__()
         | 
| 163 | 
            +
                    self.output_dim = output_dim
         | 
| 164 | 
            +
                    self.image_size = image_size
         | 
| 165 | 
            +
             | 
| 166 | 
            +
                    # the 3-layer stem
         | 
| 167 | 
            +
                    self.conv1 = nn.Conv2d(
         | 
| 168 | 
            +
                        3, width // 2, kernel_size=3, stride=2, padding=1, bias=False
         | 
| 169 | 
            +
                    )
         | 
| 170 | 
            +
                    self.bn1 = nn.BatchNorm2d(width // 2)
         | 
| 171 | 
            +
                    self.conv2 = nn.Conv2d(
         | 
| 172 | 
            +
                        width // 2, width // 2, kernel_size=3, padding=1, bias=False
         | 
| 173 | 
            +
                    )
         | 
| 174 | 
            +
                    self.bn2 = nn.BatchNorm2d(width // 2)
         | 
| 175 | 
            +
                    self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
         | 
| 176 | 
            +
                    self.bn3 = nn.BatchNorm2d(width)
         | 
| 177 | 
            +
                    self.avgpool = nn.AvgPool2d(2)
         | 
| 178 | 
            +
                    self.relu = nn.ReLU(inplace=True)
         | 
| 179 | 
            +
             | 
| 180 | 
            +
                    # residual layers
         | 
| 181 | 
            +
                    self._inplanes = width  # this is a *mutable* variable used during construction
         | 
| 182 | 
            +
                    self.layer1 = self._make_layer(width, layers[0])
         | 
| 183 | 
            +
                    self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
         | 
| 184 | 
            +
                    self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
         | 
| 185 | 
            +
                    self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
         | 
| 186 | 
            +
             | 
| 187 | 
            +
                    embed_dim = width * 32  # the ResNet feature dimension
         | 
| 188 | 
            +
                    self.attnpool = AttentionPool2d(image_size // 32, embed_dim, heads, output_dim)
         | 
| 189 | 
            +
             | 
| 190 | 
            +
                    self.init_parameters()
         | 
| 191 | 
            +
             | 
| 192 | 
            +
                def _make_layer(self, planes, blocks, stride=1):
         | 
| 193 | 
            +
                    layers = [Bottleneck(self._inplanes, planes, stride)]
         | 
| 194 | 
            +
             | 
| 195 | 
            +
                    self._inplanes = planes * Bottleneck.expansion
         | 
| 196 | 
            +
                    for _ in range(1, blocks):
         | 
| 197 | 
            +
                        layers.append(Bottleneck(self._inplanes, planes))
         | 
| 198 | 
            +
             | 
| 199 | 
            +
                    return nn.Sequential(*layers)
         | 
| 200 | 
            +
             | 
| 201 | 
            +
                def init_parameters(self):
         | 
| 202 | 
            +
                    if self.attnpool is not None:
         | 
| 203 | 
            +
                        std = self.attnpool.c_proj.in_features**-0.5
         | 
| 204 | 
            +
                        nn.init.normal_(self.attnpool.q_proj.weight, std=std)
         | 
| 205 | 
            +
                        nn.init.normal_(self.attnpool.k_proj.weight, std=std)
         | 
| 206 | 
            +
                        nn.init.normal_(self.attnpool.v_proj.weight, std=std)
         | 
| 207 | 
            +
                        nn.init.normal_(self.attnpool.c_proj.weight, std=std)
         | 
| 208 | 
            +
             | 
| 209 | 
            +
                    for resnet_block in [self.layer1, self.layer2, self.layer3, self.layer4]:
         | 
| 210 | 
            +
                        for name, param in resnet_block.named_parameters():
         | 
| 211 | 
            +
                            if name.endswith("bn3.weight"):
         | 
| 212 | 
            +
                                nn.init.zeros_(param)
         | 
| 213 | 
            +
             | 
| 214 | 
            +
                def lock(self, unlocked_groups=0, freeze_bn_stats=False):
         | 
| 215 | 
            +
                    assert (
         | 
| 216 | 
            +
                        unlocked_groups == 0
         | 
| 217 | 
            +
                    ), "partial locking not currently supported for this model"
         | 
| 218 | 
            +
                    for param in self.parameters():
         | 
| 219 | 
            +
                        param.requires_grad = False
         | 
| 220 | 
            +
                    if freeze_bn_stats:
         | 
| 221 | 
            +
                        freeze_batch_norm_2d(self)
         | 
| 222 | 
            +
             | 
| 223 | 
            +
                def stem(self, x):
         | 
| 224 | 
            +
                    for conv, bn in [
         | 
| 225 | 
            +
                        (self.conv1, self.bn1),
         | 
| 226 | 
            +
                        (self.conv2, self.bn2),
         | 
| 227 | 
            +
                        (self.conv3, self.bn3),
         | 
| 228 | 
            +
                    ]:
         | 
| 229 | 
            +
                        x = self.relu(bn(conv(x)))
         | 
| 230 | 
            +
                    x = self.avgpool(x)
         | 
| 231 | 
            +
                    return x
         | 
| 232 | 
            +
             | 
| 233 | 
            +
                def forward(self, x):
         | 
| 234 | 
            +
                    x = self.stem(x)
         | 
| 235 | 
            +
                    x = self.layer1(x)
         | 
| 236 | 
            +
                    x = self.layer2(x)
         | 
| 237 | 
            +
                    x = self.layer3(x)
         | 
| 238 | 
            +
                    x = self.layer4(x)
         | 
| 239 | 
            +
                    x = self.attnpool(x)
         | 
| 240 | 
            +
             | 
| 241 | 
            +
                    return x
         | 
| 242 | 
            +
             | 
| 243 | 
            +
             | 
| 244 | 
            +
            class LayerNorm(nn.LayerNorm):
         | 
| 245 | 
            +
                """Subclass torch's LayerNorm to handle fp16."""
         | 
| 246 | 
            +
             | 
| 247 | 
            +
                def forward(self, x: torch.Tensor):
         | 
| 248 | 
            +
                    orig_type = x.dtype
         | 
| 249 | 
            +
                    x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
         | 
| 250 | 
            +
                    return x.to(orig_type)
         | 
| 251 | 
            +
             | 
| 252 | 
            +
             | 
| 253 | 
            +
            class QuickGELU(nn.Module):
         | 
| 254 | 
            +
                # NOTE This is slower than nn.GELU or nn.SiLU and uses more GPU memory
         | 
| 255 | 
            +
                def forward(self, x: torch.Tensor):
         | 
| 256 | 
            +
                    return x * torch.sigmoid(1.702 * x)
         | 
| 257 | 
            +
             | 
| 258 | 
            +
             | 
| 259 | 
            +
            class ResidualAttentionBlock(nn.Module):
         | 
| 260 | 
            +
                def __init__(self, d_model: int, n_head: int, act_layer: Callable = nn.GELU):
         | 
| 261 | 
            +
                    super().__init__()
         | 
| 262 | 
            +
             | 
| 263 | 
            +
                    self.attn = nn.MultiheadAttention(d_model, n_head)
         | 
| 264 | 
            +
                    self.ln_1 = LayerNorm(d_model)
         | 
| 265 | 
            +
                    self.mlp = nn.Sequential(
         | 
| 266 | 
            +
                        OrderedDict(
         | 
| 267 | 
            +
                            [
         | 
| 268 | 
            +
                                ("c_fc", nn.Linear(d_model, d_model * 4)),
         | 
| 269 | 
            +
                                ("gelu", act_layer()),
         | 
| 270 | 
            +
                                ("c_proj", nn.Linear(d_model * 4, d_model)),
         | 
| 271 | 
            +
                            ]
         | 
| 272 | 
            +
                        )
         | 
| 273 | 
            +
                    )
         | 
| 274 | 
            +
                    self.ln_2 = LayerNorm(d_model)
         | 
| 275 | 
            +
             | 
| 276 | 
            +
                def attention(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
         | 
| 277 | 
            +
                    return self.attn(x, x, x, need_weights=False, attn_mask=attn_mask)[0]
         | 
| 278 | 
            +
             | 
| 279 | 
            +
                def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
         | 
| 280 | 
            +
                    x = x + self.attention(self.ln_1(x), attn_mask=attn_mask)
         | 
| 281 | 
            +
                    x = x + self.mlp(self.ln_2(x))
         | 
| 282 | 
            +
                    return x
         | 
| 283 | 
            +
             | 
| 284 | 
            +
             | 
| 285 | 
            +
            class Transformer(nn.Module):
         | 
| 286 | 
            +
                def __init__(
         | 
| 287 | 
            +
                    self, width: int, layers: int, heads: int, act_layer: Callable = nn.GELU
         | 
| 288 | 
            +
                ):
         | 
| 289 | 
            +
                    super().__init__()
         | 
| 290 | 
            +
                    self.width = width
         | 
| 291 | 
            +
                    self.layers = layers
         | 
| 292 | 
            +
                    self.resblocks = nn.ModuleList(
         | 
| 293 | 
            +
                        [
         | 
| 294 | 
            +
                            ResidualAttentionBlock(width, heads, act_layer=act_layer)
         | 
| 295 | 
            +
                            for _ in range(layers)
         | 
| 296 | 
            +
                        ]
         | 
| 297 | 
            +
                    )
         | 
| 298 | 
            +
             | 
| 299 | 
            +
                def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
         | 
| 300 | 
            +
                    for r in self.resblocks:
         | 
| 301 | 
            +
                        x = r(x, attn_mask=attn_mask)
         | 
| 302 | 
            +
                    return x
         | 
| 303 | 
            +
             | 
| 304 | 
            +
             | 
| 305 | 
            +
            class VisualTransformer(nn.Module):
         | 
| 306 | 
            +
                def __init__(
         | 
| 307 | 
            +
                    self,
         | 
| 308 | 
            +
                    image_size: int,
         | 
| 309 | 
            +
                    patch_size: int,
         | 
| 310 | 
            +
                    width: int,
         | 
| 311 | 
            +
                    layers: int,
         | 
| 312 | 
            +
                    heads: int,
         | 
| 313 | 
            +
                    output_dim: int,
         | 
| 314 | 
            +
                    act_layer: Callable = nn.GELU,
         | 
| 315 | 
            +
                ):
         | 
| 316 | 
            +
                    super().__init__()
         | 
| 317 | 
            +
                    self.image_size = image_size
         | 
| 318 | 
            +
                    self.output_dim = output_dim
         | 
| 319 | 
            +
                    self.conv1 = nn.Conv2d(
         | 
| 320 | 
            +
                        in_channels=3,
         | 
| 321 | 
            +
                        out_channels=width,
         | 
| 322 | 
            +
                        kernel_size=patch_size,
         | 
| 323 | 
            +
                        stride=patch_size,
         | 
| 324 | 
            +
                        bias=False,
         | 
| 325 | 
            +
                    )
         | 
| 326 | 
            +
             | 
| 327 | 
            +
                    scale = width**-0.5
         | 
| 328 | 
            +
                    self.class_embedding = nn.Parameter(scale * torch.randn(width))
         | 
| 329 | 
            +
                    self.positional_embedding = nn.Parameter(
         | 
| 330 | 
            +
                        scale * torch.randn((image_size // patch_size) ** 2 + 1, width)
         | 
| 331 | 
            +
                    )
         | 
| 332 | 
            +
                    self.ln_pre = LayerNorm(width)
         | 
| 333 | 
            +
             | 
| 334 | 
            +
                    self.text_branch = Transformer(width, layers, heads, act_layer=act_layer)
         | 
| 335 | 
            +
             | 
| 336 | 
            +
                    self.ln_post = LayerNorm(width)
         | 
| 337 | 
            +
                    self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
         | 
| 338 | 
            +
             | 
| 339 | 
            +
                def lock(self, unlocked_groups=0, freeze_bn_stats=False):
         | 
| 340 | 
            +
                    assert (
         | 
| 341 | 
            +
                        unlocked_groups == 0
         | 
| 342 | 
            +
                    ), "partial locking not currently supported for this model"
         | 
| 343 | 
            +
                    for param in self.parameters():
         | 
| 344 | 
            +
                        param.requires_grad = False
         | 
| 345 | 
            +
             | 
| 346 | 
            +
                def forward(self, x: torch.Tensor):
         | 
| 347 | 
            +
                    x = self.conv1(x)  # shape = [*, width, grid, grid]
         | 
| 348 | 
            +
                    x = x.reshape(x.shape[0], x.shape[1], -1)  # shape = [*, width, grid ** 2]
         | 
| 349 | 
            +
                    x = x.permute(0, 2, 1)  # shape = [*, grid ** 2, width]
         | 
| 350 | 
            +
                    x = torch.cat(
         | 
| 351 | 
            +
                        [
         | 
| 352 | 
            +
                            self.class_embedding.to(x.dtype)
         | 
| 353 | 
            +
                            + torch.zeros(
         | 
| 354 | 
            +
                                x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device
         | 
| 355 | 
            +
                            ),
         | 
| 356 | 
            +
                            x,
         | 
| 357 | 
            +
                        ],
         | 
| 358 | 
            +
                        dim=1,
         | 
| 359 | 
            +
                    )  # shape = [*, grid ** 2 + 1, width]
         | 
| 360 | 
            +
                    x = x + self.positional_embedding.to(x.dtype)
         | 
| 361 | 
            +
                    x = self.ln_pre(x)
         | 
| 362 | 
            +
             | 
| 363 | 
            +
                    x = x.permute(1, 0, 2)  # NLD -> LND
         | 
| 364 | 
            +
                    x = self.text_branch(x)
         | 
| 365 | 
            +
                    x = x.permute(1, 0, 2)  # LND -> NLD
         | 
| 366 | 
            +
             | 
| 367 | 
            +
                    x = self.ln_post(x[:, 0, :])
         | 
| 368 | 
            +
             | 
| 369 | 
            +
                    if self.proj is not None:
         | 
| 370 | 
            +
                        x = x @ self.proj
         | 
| 371 | 
            +
             | 
| 372 | 
            +
                    return x
         | 
| 373 | 
            +
             | 
| 374 | 
            +
             | 
| 375 | 
            +
            @dataclass
         | 
| 376 | 
            +
            class CLAPVisionCfg:
         | 
| 377 | 
            +
                layers: Union[Tuple[int, int, int, int], int] = 12
         | 
| 378 | 
            +
                width: int = 768
         | 
| 379 | 
            +
                patch_size: int = 16
         | 
| 380 | 
            +
                image_size: Union[Tuple[int, int], int] = 224
         | 
| 381 | 
            +
                timm_model_name: str = (
         | 
| 382 | 
            +
                    None  # a valid model name overrides layers, width, patch_size
         | 
| 383 | 
            +
                )
         | 
| 384 | 
            +
                timm_model_pretrained: bool = (
         | 
| 385 | 
            +
                    False  # use (imagenet) pretrained weights for named model
         | 
| 386 | 
            +
                )
         | 
| 387 | 
            +
                timm_pool: str = (
         | 
| 388 | 
            +
                    "avg"  # feature pooling for timm model ('abs_attn', 'rot_attn', 'avg', '')
         | 
| 389 | 
            +
                )
         | 
| 390 | 
            +
                timm_proj: str = (
         | 
| 391 | 
            +
                    "linear"  # linear projection for timm model output ('linear', 'mlp', '')
         | 
| 392 | 
            +
                )
         | 
| 393 | 
            +
             | 
| 394 | 
            +
             | 
| 395 | 
            +
            # Audio Config Class
         | 
| 396 | 
            +
            @dataclass
         | 
| 397 | 
            +
            class CLAPAudioCfp:
         | 
| 398 | 
            +
                model_type: str = "PANN"
         | 
| 399 | 
            +
                model_name: str = "Cnn14"
         | 
| 400 | 
            +
                sample_rate: int = 48000
         | 
| 401 | 
            +
                # Param
         | 
| 402 | 
            +
                audio_length: int = 1024
         | 
| 403 | 
            +
                window_size: int = 1024
         | 
| 404 | 
            +
                hop_size: int = 1024
         | 
| 405 | 
            +
                fmin: int = 50
         | 
| 406 | 
            +
                fmax: int = 14000
         | 
| 407 | 
            +
                class_num: int = 527
         | 
| 408 | 
            +
                mel_bins: int = 64
         | 
| 409 | 
            +
                clip_samples: int = 480000
         | 
| 410 | 
            +
             | 
| 411 | 
            +
             | 
| 412 | 
            +
            @dataclass
         | 
| 413 | 
            +
            class CLAPTextCfg:
         | 
| 414 | 
            +
                context_length: int
         | 
| 415 | 
            +
                vocab_size: int
         | 
| 416 | 
            +
                width: int
         | 
| 417 | 
            +
                heads: int
         | 
| 418 | 
            +
                layers: int
         | 
| 419 | 
            +
                model_type: str
         | 
| 420 | 
            +
             | 
| 421 | 
            +
             | 
| 422 | 
            +
            class CLAP(nn.Module):
         | 
| 423 | 
            +
                def __init__(
         | 
| 424 | 
            +
                    self,
         | 
| 425 | 
            +
                    embed_dim: int,
         | 
| 426 | 
            +
                    audio_cfg: CLAPAudioCfp,
         | 
| 427 | 
            +
                    text_cfg: CLAPTextCfg,
         | 
| 428 | 
            +
                    quick_gelu: bool = False,
         | 
| 429 | 
            +
                    enable_fusion: bool = False,
         | 
| 430 | 
            +
                    fusion_type: str = "None",
         | 
| 431 | 
            +
                    joint_embed_shape: int = 512,
         | 
| 432 | 
            +
                    mlp_act: str = "relu",
         | 
| 433 | 
            +
                ):
         | 
| 434 | 
            +
                    super().__init__()
         | 
| 435 | 
            +
                    if isinstance(audio_cfg, dict):
         | 
| 436 | 
            +
                        audio_cfg = CLAPAudioCfp(**audio_cfg)
         | 
| 437 | 
            +
                    if isinstance(text_cfg, dict):
         | 
| 438 | 
            +
                        text_cfg = CLAPTextCfg(**text_cfg)
         | 
| 439 | 
            +
             | 
| 440 | 
            +
                    self.audio_cfg = audio_cfg
         | 
| 441 | 
            +
                    self.text_cfg = text_cfg
         | 
| 442 | 
            +
                    self.enable_fusion = enable_fusion
         | 
| 443 | 
            +
                    self.fusion_type = fusion_type
         | 
| 444 | 
            +
                    self.joint_embed_shape = joint_embed_shape
         | 
| 445 | 
            +
                    self.mlp_act = mlp_act
         | 
| 446 | 
            +
             | 
| 447 | 
            +
                    self.context_length = text_cfg.context_length
         | 
| 448 | 
            +
             | 
| 449 | 
            +
                    # OpenAI models are pretrained w/ QuickGELU but native nn.GELU is both faster and more
         | 
| 450 | 
            +
                    # memory efficient in recent PyTorch releases (>= 1.10).
         | 
| 451 | 
            +
                    # NOTE: timm models always use native GELU regardless of quick_gelu flag.
         | 
| 452 | 
            +
                    act_layer = QuickGELU if quick_gelu else nn.GELU
         | 
| 453 | 
            +
             | 
| 454 | 
            +
                    if mlp_act == "relu":
         | 
| 455 | 
            +
                        mlp_act_layer = nn.ReLU()
         | 
| 456 | 
            +
                    elif mlp_act == "gelu":
         | 
| 457 | 
            +
                        mlp_act_layer = nn.GELU()
         | 
| 458 | 
            +
                    else:
         | 
| 459 | 
            +
                        raise NotImplementedError
         | 
| 460 | 
            +
             | 
| 461 | 
            +
                    # audio branch
         | 
| 462 | 
            +
                    # audio branch parameters
         | 
| 463 | 
            +
                    if audio_cfg.model_type == "PANN":
         | 
| 464 | 
            +
                        self.audio_branch = create_pann_model(audio_cfg, enable_fusion, fusion_type)
         | 
| 465 | 
            +
                    elif audio_cfg.model_type == "HTSAT":
         | 
| 466 | 
            +
                        self.audio_branch = create_htsat_model(
         | 
| 467 | 
            +
                            audio_cfg, enable_fusion, fusion_type
         | 
| 468 | 
            +
                        )
         | 
| 469 | 
            +
                    else:
         | 
| 470 | 
            +
                        logging.error(f"Model config for {audio_cfg.model_type} not found")
         | 
| 471 | 
            +
                        raise RuntimeError(f"Model config for {audio_cfg.model_type} not found.")
         | 
| 472 | 
            +
             | 
| 473 | 
            +
                    # text branch
         | 
| 474 | 
            +
                    # text branch parameters
         | 
| 475 | 
            +
                    if text_cfg.model_type == "transformer":
         | 
| 476 | 
            +
                        self.text_branch = Transformer(
         | 
| 477 | 
            +
                            width=text_cfg.width,
         | 
| 478 | 
            +
                            layers=text_cfg.layers,
         | 
| 479 | 
            +
                            heads=text_cfg.heads,
         | 
| 480 | 
            +
                            act_layer=act_layer,
         | 
| 481 | 
            +
                        )
         | 
| 482 | 
            +
                        self.vocab_size = text_cfg.vocab_size
         | 
| 483 | 
            +
                        self.token_embedding = nn.Embedding(text_cfg.vocab_size, text_cfg.width)
         | 
| 484 | 
            +
                        self.positional_embedding = nn.Parameter(
         | 
| 485 | 
            +
                            torch.empty(self.context_length, text_cfg.width)
         | 
| 486 | 
            +
                        )
         | 
| 487 | 
            +
                        self.ln_final = LayerNorm(text_cfg.width)
         | 
| 488 | 
            +
                        self.text_transform = MLPLayers(
         | 
| 489 | 
            +
                            units=[
         | 
| 490 | 
            +
                                self.joint_embed_shape,
         | 
| 491 | 
            +
                                self.joint_embed_shape,
         | 
| 492 | 
            +
                                self.joint_embed_shape,
         | 
| 493 | 
            +
                            ],
         | 
| 494 | 
            +
                            dropout=0.1,
         | 
| 495 | 
            +
                        )
         | 
| 496 | 
            +
                        self.text_projection = nn.Sequential(
         | 
| 497 | 
            +
                            nn.Linear(text_cfg.width, self.joint_embed_shape),
         | 
| 498 | 
            +
                            mlp_act_layer,
         | 
| 499 | 
            +
                            nn.Linear(self.joint_embed_shape, self.joint_embed_shape),
         | 
| 500 | 
            +
                        )
         | 
| 501 | 
            +
                    elif text_cfg.model_type == "bert":
         | 
| 502 | 
            +
                        self.text_branch = BertModel.from_pretrained("bert-base-uncased")
         | 
| 503 | 
            +
                        self.text_transform = MLPLayers(
         | 
| 504 | 
            +
                            units=[
         | 
| 505 | 
            +
                                self.joint_embed_shape,
         | 
| 506 | 
            +
                                self.joint_embed_shape,
         | 
| 507 | 
            +
                                self.joint_embed_shape,
         | 
| 508 | 
            +
                            ],
         | 
| 509 | 
            +
                            dropout=0.1,
         | 
| 510 | 
            +
                        )
         | 
| 511 | 
            +
                        self.text_projection = nn.Sequential(
         | 
| 512 | 
            +
                            nn.Linear(768, self.joint_embed_shape),
         | 
| 513 | 
            +
                            mlp_act_layer,
         | 
| 514 | 
            +
                            nn.Linear(self.joint_embed_shape, self.joint_embed_shape),
         | 
| 515 | 
            +
                        )
         | 
| 516 | 
            +
                    elif text_cfg.model_type == "roberta":
         | 
| 517 | 
            +
                        self.text_branch = RobertaModel.from_pretrained("roberta-base")
         | 
| 518 | 
            +
                        self.text_transform = MLPLayers(
         | 
| 519 | 
            +
                            units=[
         | 
| 520 | 
            +
                                self.joint_embed_shape,
         | 
| 521 | 
            +
                                self.joint_embed_shape,
         | 
| 522 | 
            +
                                self.joint_embed_shape,
         | 
| 523 | 
            +
                            ],
         | 
| 524 | 
            +
                            dropout=0.1,
         | 
| 525 | 
            +
                        )
         | 
| 526 | 
            +
                        self.text_projection = nn.Sequential(
         | 
| 527 | 
            +
                            nn.Linear(768, self.joint_embed_shape),
         | 
| 528 | 
            +
                            mlp_act_layer,
         | 
| 529 | 
            +
                            nn.Linear(self.joint_embed_shape, self.joint_embed_shape),
         | 
| 530 | 
            +
                        )
         | 
| 531 | 
            +
                    elif text_cfg.model_type == "bart":
         | 
| 532 | 
            +
                        self.text_branch = BartModel.from_pretrained("facebook/bart-base")
         | 
| 533 | 
            +
                        self.text_transform = MLPLayers(
         | 
| 534 | 
            +
                            units=[
         | 
| 535 | 
            +
                                self.joint_embed_shape,
         | 
| 536 | 
            +
                                self.joint_embed_shape,
         | 
| 537 | 
            +
                                self.joint_embed_shape,
         | 
| 538 | 
            +
                            ],
         | 
| 539 | 
            +
                            dropout=0.1,
         | 
| 540 | 
            +
                        )
         | 
| 541 | 
            +
                        self.text_projection = nn.Sequential(
         | 
| 542 | 
            +
                            nn.Linear(768, self.joint_embed_shape),
         | 
| 543 | 
            +
                            mlp_act_layer,
         | 
| 544 | 
            +
                            nn.Linear(self.joint_embed_shape, self.joint_embed_shape),
         | 
| 545 | 
            +
                        )
         | 
| 546 | 
            +
                    else:
         | 
| 547 | 
            +
                        logging.error(f"Model config for {text_cfg.model_type} not found")
         | 
| 548 | 
            +
                        raise RuntimeError(f"Model config for {text_cfg.model_type} not found.")
         | 
| 549 | 
            +
                    self.text_branch_type = text_cfg.model_type
         | 
| 550 | 
            +
                    # text branch parameters
         | 
| 551 | 
            +
             | 
| 552 | 
            +
                    # audio branch parameters
         | 
| 553 | 
            +
                    self.audio_transform = MLPLayers(
         | 
| 554 | 
            +
                        units=[
         | 
| 555 | 
            +
                            self.joint_embed_shape,
         | 
| 556 | 
            +
                            self.joint_embed_shape,
         | 
| 557 | 
            +
                            self.joint_embed_shape,
         | 
| 558 | 
            +
                        ],
         | 
| 559 | 
            +
                        dropout=0.1,
         | 
| 560 | 
            +
                    )
         | 
| 561 | 
            +
             | 
| 562 | 
            +
                    # below here is text branch parameters
         | 
| 563 | 
            +
             | 
| 564 | 
            +
                    # ============================================================================================================
         | 
| 565 | 
            +
                    self.audio_projection = nn.Sequential(
         | 
| 566 | 
            +
                        nn.Linear(embed_dim, self.joint_embed_shape),
         | 
| 567 | 
            +
                        mlp_act_layer,
         | 
| 568 | 
            +
                        nn.Linear(self.joint_embed_shape, self.joint_embed_shape),
         | 
| 569 | 
            +
                    )
         | 
| 570 | 
            +
             | 
| 571 | 
            +
                    self.logit_scale_a = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
         | 
| 572 | 
            +
                    self.logit_scale_t = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
         | 
| 573 | 
            +
                    self.register_buffer("attn_mask", self.build_attention_mask(), persistent=False)
         | 
| 574 | 
            +
             | 
| 575 | 
            +
                    self.init_text_branch_parameters()
         | 
| 576 | 
            +
             | 
| 577 | 
            +
                def init_text_branch_parameters(self):
         | 
| 578 | 
            +
                    if self.text_branch_type == "transformer":
         | 
| 579 | 
            +
                        nn.init.normal_(self.token_embedding.weight, std=0.02)
         | 
| 580 | 
            +
                        nn.init.normal_(self.positional_embedding, std=0.01)
         | 
| 581 | 
            +
                        proj_std = (self.text_branch.width**-0.5) * (
         | 
| 582 | 
            +
                            (2 * self.text_branch.layers) ** -0.5
         | 
| 583 | 
            +
                        )
         | 
| 584 | 
            +
                        attn_std = self.text_branch.width**-0.5
         | 
| 585 | 
            +
                        fc_std = (2 * self.text_branch.width) ** -0.5
         | 
| 586 | 
            +
                        for block in self.text_branch.resblocks:
         | 
| 587 | 
            +
                            nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
         | 
| 588 | 
            +
                            nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
         | 
| 589 | 
            +
                            nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
         | 
| 590 | 
            +
                            nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
         | 
| 591 | 
            +
                    if self.text_branch_type == "bert" or self.text_branch_type == "roberta":
         | 
| 592 | 
            +
                        width = self.text_branch.embeddings.word_embeddings.weight.shape[-1]
         | 
| 593 | 
            +
                    elif self.text_branch_type == "bart":
         | 
| 594 | 
            +
                        width = self.text_branch.shared.weight.shape[-1]
         | 
| 595 | 
            +
                    else:
         | 
| 596 | 
            +
                        width = self.text_branch.width
         | 
| 597 | 
            +
                    nn.init.constant_(self.logit_scale_a, np.log(1 / 0.07))
         | 
| 598 | 
            +
                    nn.init.constant_(self.logit_scale_t, np.log(1 / 0.07))
         | 
| 599 | 
            +
             | 
| 600 | 
            +
                    # deprecated
         | 
| 601 | 
            +
                    # if hasattr(self.visual, 'init_parameters'):
         | 
| 602 | 
            +
                    # self.visual.init_parameters()
         | 
| 603 | 
            +
             | 
| 604 | 
            +
                    # if self.text_projection is not None:
         | 
| 605 | 
            +
                    #     nn.init.normal_(self.text_projection, std=width**-0.5)
         | 
| 606 | 
            +
             | 
| 607 | 
            +
                def build_attention_mask(self):
         | 
| 608 | 
            +
                    # lazily create causal attention mask, with full attention between the vision tokens
         | 
| 609 | 
            +
                    # pytorch uses additive attention mask; fill with -inf
         | 
| 610 | 
            +
                    mask = torch.empty(self.context_length, self.context_length)
         | 
| 611 | 
            +
                    mask.fill_(float("-inf"))
         | 
| 612 | 
            +
                    mask.triu_(1)  # zero out the lower diagonal
         | 
| 613 | 
            +
                    return mask
         | 
| 614 | 
            +
             | 
| 615 | 
            +
                def encode_audio(self, audio, device):
         | 
| 616 | 
            +
                    return self.audio_branch(
         | 
| 617 | 
            +
                        audio, mixup_lambda=None, device=device
         | 
| 618 | 
            +
                    )  # mix lambda needs to add
         | 
| 619 | 
            +
             | 
| 620 | 
            +
                # def list_of_dict_of_tensor2dict_of_tensor(self, x, device):
         | 
| 621 | 
            +
                #     tmp = {}
         | 
| 622 | 
            +
                #     for k in x[0].keys():
         | 
| 623 | 
            +
                #         tmp[k] = []
         | 
| 624 | 
            +
                #         for i in range(len(x)):
         | 
| 625 | 
            +
                #             tmp[k].append(x[i][k][:77])
         | 
| 626 | 
            +
                #     for k in x[0].keys():
         | 
| 627 | 
            +
                #         tmp[k] = torch.tensor(tmp[k]).to(device=device, non_blocking=True)
         | 
| 628 | 
            +
                #     return tmp
         | 
| 629 | 
            +
             | 
| 630 | 
            +
                def encode_text(self, text, device):
         | 
| 631 | 
            +
                    if self.text_branch_type == "transformer":
         | 
| 632 | 
            +
                        text = text.to(device=device, non_blocking=True)
         | 
| 633 | 
            +
                        x = self.token_embedding(text)  # [batch_size, n_ctx, d_model]
         | 
| 634 | 
            +
             | 
| 635 | 
            +
                        x = x + self.positional_embedding
         | 
| 636 | 
            +
                        x = x.permute(1, 0, 2)  # NLD -> LND
         | 
| 637 | 
            +
                        x = self.text_branch(x, attn_mask=self.attn_mask)
         | 
| 638 | 
            +
                        x = x.permute(1, 0, 2)  # LND -> NLD
         | 
| 639 | 
            +
                        x = self.ln_final(x)
         | 
| 640 | 
            +
             | 
| 641 | 
            +
                        # x.shape = [batch_size, n_ctx, transformer.width]
         | 
| 642 | 
            +
                        # take features from the eot embedding (eot_token is the highest number in each sequence)
         | 
| 643 | 
            +
                        x = self.text_projection(x[torch.arange(x.shape[0]), text.argmax(dim=-1)])
         | 
| 644 | 
            +
                    elif self.text_branch_type == "bert":
         | 
| 645 | 
            +
                        # text = self.list_of_dict_of_tensor2dict_of_tensor(text, device)
         | 
| 646 | 
            +
                        # text = BatchEncoding(text)
         | 
| 647 | 
            +
                        x = self.text_branch(
         | 
| 648 | 
            +
                            input_ids=text["input_ids"].to(device=device, non_blocking=True),
         | 
| 649 | 
            +
                            attention_mask=text["attention_mask"].to(
         | 
| 650 | 
            +
                                device=device, non_blocking=True
         | 
| 651 | 
            +
                            ),
         | 
| 652 | 
            +
                            token_type_ids=text["token_type_ids"].to(
         | 
| 653 | 
            +
                                device=device, non_blocking=True
         | 
| 654 | 
            +
                            ),
         | 
| 655 | 
            +
                        )["pooler_output"]
         | 
| 656 | 
            +
                        x = self.text_projection(x)
         | 
| 657 | 
            +
                    elif self.text_branch_type == "roberta":
         | 
| 658 | 
            +
                        x = self.text_branch(
         | 
| 659 | 
            +
                            input_ids=text["input_ids"].to(device=device, non_blocking=True),
         | 
| 660 | 
            +
                            attention_mask=text["attention_mask"].to(
         | 
| 661 | 
            +
                                device=device, non_blocking=True
         | 
| 662 | 
            +
                            ),
         | 
| 663 | 
            +
                        )["pooler_output"]
         | 
| 664 | 
            +
                        x = self.text_projection(x)
         | 
| 665 | 
            +
                    elif self.text_branch_type == "bart":
         | 
| 666 | 
            +
                        x = torch.mean(
         | 
| 667 | 
            +
                            self.text_branch(
         | 
| 668 | 
            +
                                input_ids=text["input_ids"].to(device=device, non_blocking=True),
         | 
| 669 | 
            +
                                attention_mask=text["attention_mask"].to(
         | 
| 670 | 
            +
                                    device=device, non_blocking=True
         | 
| 671 | 
            +
                                ),
         | 
| 672 | 
            +
                            )["encoder_last_hidden_state"],
         | 
| 673 | 
            +
                            axis=1,
         | 
| 674 | 
            +
                        )
         | 
| 675 | 
            +
                        x = self.text_projection(x)
         | 
| 676 | 
            +
                    else:
         | 
| 677 | 
            +
                        logging.error(f"Model type {self.text_branch_type} not found")
         | 
| 678 | 
            +
                        raise RuntimeError(f"Model type {self.text_branch_type} not found.")
         | 
| 679 | 
            +
                    return x
         | 
| 680 | 
            +
             | 
| 681 | 
            +
                def forward(self, audio, text, device=None):
         | 
| 682 | 
            +
                    """Forward audio and text into the CLAP
         | 
| 683 | 
            +
             | 
| 684 | 
            +
                    Parameters
         | 
| 685 | 
            +
                    ----------
         | 
| 686 | 
            +
                    audio: torch.Tensor (batch_size, audio_length)
         | 
| 687 | 
            +
                        the time-domain audio input / the batch of mel_spec and longer list.
         | 
| 688 | 
            +
                    text: torch.Tensor () // need to add
         | 
| 689 | 
            +
                        the text token input
         | 
| 690 | 
            +
                    """
         | 
| 691 | 
            +
                    if device is None:
         | 
| 692 | 
            +
                        if audio is not None:
         | 
| 693 | 
            +
                            device = audio.device
         | 
| 694 | 
            +
                        elif text is not None:
         | 
| 695 | 
            +
                            device = text.device
         | 
| 696 | 
            +
                    if audio is None and text is None:
         | 
| 697 | 
            +
                        # a hack to get the logit scale
         | 
| 698 | 
            +
                        return self.logit_scale_a.exp(), self.logit_scale_t.exp()
         | 
| 699 | 
            +
                    elif audio is None:
         | 
| 700 | 
            +
                        return self.encode_text(text, device=device)
         | 
| 701 | 
            +
                    elif text is None:
         | 
| 702 | 
            +
                        return self.audio_projection(
         | 
| 703 | 
            +
                            self.encode_audio(audio, device=device)["embedding"]
         | 
| 704 | 
            +
                        )
         | 
| 705 | 
            +
                    audio_features = self.audio_projection(
         | 
| 706 | 
            +
                        self.encode_audio(audio, device=device)["embedding"]
         | 
| 707 | 
            +
                    )
         | 
| 708 | 
            +
                    audio_features = F.normalize(audio_features, dim=-1)
         | 
| 709 | 
            +
             | 
| 710 | 
            +
                    text_features = self.encode_text(text, device=device)
         | 
| 711 | 
            +
                    # print("text_features", text_features)
         | 
| 712 | 
            +
                    # print("text_features.shape", text_features.shape)
         | 
| 713 | 
            +
                    # print("text_features.type", type(text_features))
         | 
| 714 | 
            +
                    text_features = F.normalize(text_features, dim=-1)
         | 
| 715 | 
            +
             | 
| 716 | 
            +
                    audio_features_mlp = self.audio_transform(audio_features)
         | 
| 717 | 
            +
                    text_features_mlp = self.text_transform(text_features)
         | 
| 718 | 
            +
                    # Four outputs: audio features (basic & MLP), text features (basic & MLP)
         | 
| 719 | 
            +
                    return (
         | 
| 720 | 
            +
                        audio_features,
         | 
| 721 | 
            +
                        text_features,
         | 
| 722 | 
            +
                        audio_features_mlp,
         | 
| 723 | 
            +
                        text_features_mlp,
         | 
| 724 | 
            +
                        self.logit_scale_a.exp(),
         | 
| 725 | 
            +
                        self.logit_scale_t.exp(),
         | 
| 726 | 
            +
                    )
         | 
| 727 | 
            +
             | 
| 728 | 
            +
                def get_logit_scale(self):
         | 
| 729 | 
            +
                    return self.logit_scale_a.exp(), self.logit_scale_t.exp()
         | 
| 730 | 
            +
             | 
| 731 | 
            +
                def get_text_embedding(self, data):
         | 
| 732 | 
            +
                    """Get the text embedding from the model
         | 
| 733 | 
            +
             | 
| 734 | 
            +
                    Parameters
         | 
| 735 | 
            +
                    ----------
         | 
| 736 | 
            +
                    data: torch.Tensor
         | 
| 737 | 
            +
                        a tensor of text embedding
         | 
| 738 | 
            +
             | 
| 739 | 
            +
                    Returns
         | 
| 740 | 
            +
                    ----------
         | 
| 741 | 
            +
                    text_embed: torch.Tensor
         | 
| 742 | 
            +
                        a tensor of text_embeds (N, D)
         | 
| 743 | 
            +
             | 
| 744 | 
            +
                    """
         | 
| 745 | 
            +
                    device = next(self.parameters()).device
         | 
| 746 | 
            +
                    for k in data:
         | 
| 747 | 
            +
                        data[k] = data[k].to(device)
         | 
| 748 | 
            +
                        if len(data[k].size()) < 2:
         | 
| 749 | 
            +
                            data[k] = data[k].unsqueeze(0)
         | 
| 750 | 
            +
                    text_embeds = self.encode_text(data, device=device)
         | 
| 751 | 
            +
                    text_embeds = F.normalize(text_embeds, dim=-1)
         | 
| 752 | 
            +
             | 
| 753 | 
            +
                    return text_embeds
         | 
| 754 | 
            +
             | 
| 755 | 
            +
                def get_audio_embedding(self, data):
         | 
| 756 | 
            +
                    """Get the audio embedding from the model
         | 
| 757 | 
            +
             | 
| 758 | 
            +
                    Parameters
         | 
| 759 | 
            +
                    ----------
         | 
| 760 | 
            +
                    data: a list of dict
         | 
| 761 | 
            +
                        the audio input dict list from 'get_audio_feature' method
         | 
| 762 | 
            +
             | 
| 763 | 
            +
                    Returns
         | 
| 764 | 
            +
                    ----------
         | 
| 765 | 
            +
                    audio_embed: torch.Tensor
         | 
| 766 | 
            +
                        a tensor of audio_embeds (N, D)
         | 
| 767 | 
            +
             | 
| 768 | 
            +
                    """
         | 
| 769 | 
            +
                    device = next(self.parameters()).device
         | 
| 770 | 
            +
                    input_dict = {}
         | 
| 771 | 
            +
                    keys = data[0].keys()
         | 
| 772 | 
            +
                    for k in keys:
         | 
| 773 | 
            +
                        input_dict[k] = torch.cat([d[k].unsqueeze(0) for d in data], dim=0).to(
         | 
| 774 | 
            +
                            device
         | 
| 775 | 
            +
                        )
         | 
| 776 | 
            +
             | 
| 777 | 
            +
                    audio_embeds = self.audio_projection(
         | 
| 778 | 
            +
                        self.encode_audio(input_dict, device=device)["embedding"]
         | 
| 779 | 
            +
                    )
         | 
| 780 | 
            +
                    audio_embeds = F.normalize(audio_embeds, dim=-1)
         | 
| 781 | 
            +
             | 
| 782 | 
            +
                    return audio_embeds
         | 
| 783 | 
            +
             | 
| 784 | 
            +
                def audio_infer(self, audio, hopsize=None, device=None):
         | 
| 785 | 
            +
                    """Forward one audio and produce the audio embedding
         | 
| 786 | 
            +
             | 
| 787 | 
            +
                    Parameters
         | 
| 788 | 
            +
                    ----------
         | 
| 789 | 
            +
                    audio:  (audio_length)
         | 
| 790 | 
            +
                        the time-domain audio input, notice that it must be only one input
         | 
| 791 | 
            +
                    hopsize: int
         | 
| 792 | 
            +
                        the overlap hopsize as the sliding window
         | 
| 793 | 
            +
             | 
| 794 | 
            +
                    Returns
         | 
| 795 | 
            +
                    ----------
         | 
| 796 | 
            +
                    output_dict: {
         | 
| 797 | 
            +
                        key: [n, (embedding_shape)] if "HTS-AT"
         | 
| 798 | 
            +
                        or
         | 
| 799 | 
            +
                        key: [(embedding_shape)] if "PANN"
         | 
| 800 | 
            +
                    }
         | 
| 801 | 
            +
                        the list of key values of the audio branch
         | 
| 802 | 
            +
             | 
| 803 | 
            +
                    """
         | 
| 804 | 
            +
             | 
| 805 | 
            +
                    assert not self.training, "the inference mode must be run at eval stage"
         | 
| 806 | 
            +
                    output_dict = {}
         | 
| 807 | 
            +
                    # PANN
         | 
| 808 | 
            +
                    if self.audio_cfg.model_type == "PANN":
         | 
| 809 | 
            +
                        audio_input = audio.unsqueeze(dim=0)
         | 
| 810 | 
            +
                        output_dict[key] = self.encode_audio(audio_input, device=device)[
         | 
| 811 | 
            +
                            key
         | 
| 812 | 
            +
                        ].squeeze(dim=0)
         | 
| 813 | 
            +
                    elif self.audio_cfg.model_type == "HTSAT":
         | 
| 814 | 
            +
                        # repeat
         | 
| 815 | 
            +
                        audio_len = len(audio)
         | 
| 816 | 
            +
                        k = self.audio_cfg.clip_samples // audio_len
         | 
| 817 | 
            +
                        if k > 1:
         | 
| 818 | 
            +
                            audio = audio.repeat(k)
         | 
| 819 | 
            +
                            audio_len = len(audio)
         | 
| 820 | 
            +
             | 
| 821 | 
            +
                        if hopsize is None:
         | 
| 822 | 
            +
                            hopsize = min(hopsize, audio_len)
         | 
| 823 | 
            +
             | 
| 824 | 
            +
                        if audio_len > self.audio_cfg.clip_samples:
         | 
| 825 | 
            +
                            audio_input = [
         | 
| 826 | 
            +
                                audio[pos : pos + self.audio_cfg.clip_samples].clone()
         | 
| 827 | 
            +
                                for pos in range(
         | 
| 828 | 
            +
                                    0, audio_len - self.audio_cfg.clip_samples, hopsize
         | 
| 829 | 
            +
                                )
         | 
| 830 | 
            +
                            ]
         | 
| 831 | 
            +
                            audio_input.append(audio[-self.audio_cfg.clip_samples :].clone())
         | 
| 832 | 
            +
                            audio_input = torch.stack(audio_input)
         | 
| 833 | 
            +
                            output_dict[key] = self.encode_audio(audio_input, device=device)[key]
         | 
| 834 | 
            +
                        else:
         | 
| 835 | 
            +
                            audio_input = audio.unsqueeze(dim=0)
         | 
| 836 | 
            +
                            output_dict[key] = self.encode_audio(audio_input, device=device)[
         | 
| 837 | 
            +
                                key
         | 
| 838 | 
            +
                            ].squeeze(dim=0)
         | 
| 839 | 
            +
             | 
| 840 | 
            +
                    return output_dict
         | 
| 841 | 
            +
             | 
| 842 | 
            +
             | 
| 843 | 
            +
            def convert_weights_to_fp16(model: nn.Module):
         | 
| 844 | 
            +
                """Convert applicable model parameters to fp16"""
         | 
| 845 | 
            +
             | 
| 846 | 
            +
                def _convert_weights_to_fp16(l):
         | 
| 847 | 
            +
                    if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
         | 
| 848 | 
            +
                        l.weight.data = l.weight.data.half()
         | 
| 849 | 
            +
                        if l.bias is not None:
         | 
| 850 | 
            +
                            l.bias.data = l.bias.data.half()
         | 
| 851 | 
            +
             | 
| 852 | 
            +
                    if isinstance(l, nn.MultiheadAttention):
         | 
| 853 | 
            +
                        for attr in [
         | 
| 854 | 
            +
                            *[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]],
         | 
| 855 | 
            +
                            "in_proj_bias",
         | 
| 856 | 
            +
                            "bias_k",
         | 
| 857 | 
            +
                            "bias_v",
         | 
| 858 | 
            +
                        ]:
         | 
| 859 | 
            +
                            tensor = getattr(l, attr)
         | 
| 860 | 
            +
                            if tensor is not None:
         | 
| 861 | 
            +
                                tensor.data = tensor.data.half()
         | 
| 862 | 
            +
             | 
| 863 | 
            +
                    for name in ["text_projection", "proj"]:
         | 
| 864 | 
            +
                        if hasattr(l, name):
         | 
| 865 | 
            +
                            attr = getattr(l, name)
         | 
| 866 | 
            +
                            if attr is not None:
         | 
| 867 | 
            +
                                attr.data = attr.data.half()
         | 
| 868 | 
            +
             | 
| 869 | 
            +
                model.apply(_convert_weights_to_fp16)
         | 
| 870 | 
            +
             | 
| 871 | 
            +
             | 
| 872 | 
            +
            # Ignore the state dict of the vision part
         | 
| 873 | 
            +
            def build_model_from_openai_state_dict(
         | 
| 874 | 
            +
                state_dict: dict, model_cfg, enable_fusion: bool = False, fusion_type: str = "None"
         | 
| 875 | 
            +
            ):
         | 
| 876 | 
            +
             | 
| 877 | 
            +
                embed_dim = model_cfg["embed_dim"]
         | 
| 878 | 
            +
                audio_cfg = model_cfg["audio_cfg"]
         | 
| 879 | 
            +
                text_cfg = model_cfg["text_cfg"]
         | 
| 880 | 
            +
                context_length = state_dict["positional_embedding"].shape[0]
         | 
| 881 | 
            +
                vocab_size = state_dict["token_embedding.weight"].shape[0]
         | 
| 882 | 
            +
                transformer_width = state_dict["ln_final.weight"].shape[0]
         | 
| 883 | 
            +
                transformer_heads = transformer_width // 64
         | 
| 884 | 
            +
                transformer_layers = len(
         | 
| 885 | 
            +
                    set(
         | 
| 886 | 
            +
                        k.split(".")[2]
         | 
| 887 | 
            +
                        for k in state_dict
         | 
| 888 | 
            +
                        if k.startswith(f"transformer.resblocks")
         | 
| 889 | 
            +
                    )
         | 
| 890 | 
            +
                )
         | 
| 891 | 
            +
             | 
| 892 | 
            +
                audio_cfg = CLAPAudioCfp(**audio_cfg)
         | 
| 893 | 
            +
                text_cfg = CLAPTextCfg(**text_cfg)
         | 
| 894 | 
            +
             | 
| 895 | 
            +
                model = CLAP(
         | 
| 896 | 
            +
                    embed_dim,
         | 
| 897 | 
            +
                    audio_cfg=audio_cfg,
         | 
| 898 | 
            +
                    text_cfg=text_cfg,
         | 
| 899 | 
            +
                    quick_gelu=True,  # OpenAI models were trained with QuickGELU
         | 
| 900 | 
            +
                    enable_fusion=enable_fusion,
         | 
| 901 | 
            +
                    fusion_type=fusion_type,
         | 
| 902 | 
            +
                )
         | 
| 903 | 
            +
                state_dict["logit_scale_a"] = state_dict["logit_scale"]
         | 
| 904 | 
            +
                state_dict["logit_scale_t"] = state_dict["logit_scale"]
         | 
| 905 | 
            +
                pop_keys = list(state_dict.keys())[::]
         | 
| 906 | 
            +
                # pop the visual branch saved weights
         | 
| 907 | 
            +
                for key in pop_keys:
         | 
| 908 | 
            +
                    if key.startswith("visual."):
         | 
| 909 | 
            +
                        state_dict.pop(key, None)
         | 
| 910 | 
            +
             | 
| 911 | 
            +
                for key in ["logit_scale", "input_resolution", "context_length", "vocab_size"]:
         | 
| 912 | 
            +
                    state_dict.pop(key, None)
         | 
| 913 | 
            +
             | 
| 914 | 
            +
                # not use fp16
         | 
| 915 | 
            +
                # convert_weights_to_fp16(model)
         | 
| 916 | 
            +
                model.load_state_dict(state_dict, strict=False)
         | 
| 917 | 
            +
                return model.eval()
         | 
| 918 | 
            +
             | 
| 919 | 
            +
             | 
| 920 | 
            +
            def trace_model(model, batch_size=256, device=torch.device("cpu")):
         | 
| 921 | 
            +
                model.eval()
         | 
| 922 | 
            +
                audio_length = model.audio_cfg.audio_length
         | 
| 923 | 
            +
                example_audio = torch.ones((batch_size, audio_length), device=device)
         | 
| 924 | 
            +
                example_text = torch.zeros(
         | 
| 925 | 
            +
                    (batch_size, model.context_length), dtype=torch.int, device=device
         | 
| 926 | 
            +
                )
         | 
| 927 | 
            +
                model = torch.jit.trace_module(
         | 
| 928 | 
            +
                    model,
         | 
| 929 | 
            +
                    inputs=dict(
         | 
| 930 | 
            +
                        forward=(example_audio, example_text),
         | 
| 931 | 
            +
                        encode_text=(example_text,),
         | 
| 932 | 
            +
                        encode_image=(example_audio,),
         | 
| 933 | 
            +
                    ),
         | 
| 934 | 
            +
                )
         | 
| 935 | 
            +
                model.audio_cfg.audio_length = audio_length  # Question: what does this do?
         | 
| 936 | 
            +
                return model
         | 
    	
        audioldm/clap/open_clip/model_configs/HTSAT-base.json
    ADDED
    
    | @@ -0,0 +1,23 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "embed_dim": 1024,
         | 
| 3 | 
            +
                "audio_cfg": {
         | 
| 4 | 
            +
                    "audio_length": 1024,
         | 
| 5 | 
            +
                    "clip_samples": 480000,
         | 
| 6 | 
            +
                    "mel_bins": 64,
         | 
| 7 | 
            +
                    "sample_rate": 48000,
         | 
| 8 | 
            +
                    "window_size": 1024,
         | 
| 9 | 
            +
                    "hop_size": 480,
         | 
| 10 | 
            +
                    "fmin": 50,
         | 
| 11 | 
            +
                    "fmax": 14000,
         | 
| 12 | 
            +
                    "class_num": 527,
         | 
| 13 | 
            +
                    "model_type": "HTSAT",
         | 
| 14 | 
            +
                    "model_name": "base"
         | 
| 15 | 
            +
                },
         | 
| 16 | 
            +
                "text_cfg": {
         | 
| 17 | 
            +
                    "context_length": 77,
         | 
| 18 | 
            +
                    "vocab_size": 49408,
         | 
| 19 | 
            +
                    "width": 512,
         | 
| 20 | 
            +
                    "heads": 8,
         | 
| 21 | 
            +
                    "layers": 12
         | 
| 22 | 
            +
                }
         | 
| 23 | 
            +
            }
         | 
    	
        audioldm/clap/open_clip/model_configs/HTSAT-large.json
    ADDED
    
    | @@ -0,0 +1,23 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "embed_dim": 2048,
         | 
| 3 | 
            +
                "audio_cfg": {
         | 
| 4 | 
            +
                    "audio_length": 1024,
         | 
| 5 | 
            +
                    "clip_samples": 480000,
         | 
| 6 | 
            +
                    "mel_bins": 64,
         | 
| 7 | 
            +
                    "sample_rate": 48000,
         | 
| 8 | 
            +
                    "window_size": 1024,
         | 
| 9 | 
            +
                    "hop_size": 480,
         | 
| 10 | 
            +
                    "fmin": 50,
         | 
| 11 | 
            +
                    "fmax": 14000,
         | 
| 12 | 
            +
                    "class_num": 527,
         | 
| 13 | 
            +
                    "model_type": "HTSAT",
         | 
| 14 | 
            +
                    "model_name": "large"
         | 
| 15 | 
            +
                },
         | 
| 16 | 
            +
                "text_cfg": {
         | 
| 17 | 
            +
                    "context_length": 77,
         | 
| 18 | 
            +
                    "vocab_size": 49408,
         | 
| 19 | 
            +
                    "width": 512,
         | 
| 20 | 
            +
                    "heads": 8,
         | 
| 21 | 
            +
                    "layers": 12
         | 
| 22 | 
            +
                }
         | 
| 23 | 
            +
            }
         | 
    	
        audioldm/clap/open_clip/model_configs/HTSAT-tiny-win-1536.json
    ADDED
    
    | @@ -0,0 +1,23 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "embed_dim": 768,
         | 
| 3 | 
            +
                "audio_cfg": {
         | 
| 4 | 
            +
                    "audio_length": 1024,
         | 
| 5 | 
            +
                    "clip_samples": 480000,
         | 
| 6 | 
            +
                    "mel_bins": 64,
         | 
| 7 | 
            +
                    "sample_rate": 48000,
         | 
| 8 | 
            +
                    "window_size": 1536,
         | 
| 9 | 
            +
                    "hop_size": 480,
         | 
| 10 | 
            +
                    "fmin": 50,
         | 
| 11 | 
            +
                    "fmax": 14000,
         | 
| 12 | 
            +
                    "class_num": 527,
         | 
| 13 | 
            +
                    "model_type": "HTSAT",
         | 
| 14 | 
            +
                    "model_name": "tiny"
         | 
| 15 | 
            +
                },
         | 
| 16 | 
            +
                "text_cfg": {
         | 
| 17 | 
            +
                    "context_length": 77,
         | 
| 18 | 
            +
                    "vocab_size": 49408,
         | 
| 19 | 
            +
                    "width": 512,
         | 
| 20 | 
            +
                    "heads": 8,
         | 
| 21 | 
            +
                    "layers": 12
         | 
| 22 | 
            +
                }
         | 
| 23 | 
            +
            }
         | 
    	
        audioldm/clap/open_clip/model_configs/HTSAT-tiny.json
    ADDED
    
    | @@ -0,0 +1,23 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "embed_dim": 768,
         | 
| 3 | 
            +
                "audio_cfg": {
         | 
| 4 | 
            +
                    "audio_length": 1024,
         | 
| 5 | 
            +
                    "clip_samples": 480000,
         | 
| 6 | 
            +
                    "mel_bins": 64,
         | 
| 7 | 
            +
                    "sample_rate": 48000,
         | 
| 8 | 
            +
                    "window_size": 1024,
         | 
| 9 | 
            +
                    "hop_size": 480,
         | 
| 10 | 
            +
                    "fmin": 50,
         | 
| 11 | 
            +
                    "fmax": 14000,
         | 
| 12 | 
            +
                    "class_num": 527,
         | 
| 13 | 
            +
                    "model_type": "HTSAT",
         | 
| 14 | 
            +
                    "model_name": "tiny"
         | 
| 15 | 
            +
                },
         | 
| 16 | 
            +
                "text_cfg": {
         | 
| 17 | 
            +
                    "context_length": 77,
         | 
| 18 | 
            +
                    "vocab_size": 49408,
         | 
| 19 | 
            +
                    "width": 512,
         | 
| 20 | 
            +
                    "heads": 8,
         | 
| 21 | 
            +
                    "layers": 12
         | 
| 22 | 
            +
                }
         | 
| 23 | 
            +
            }
         | 
    	
        audioldm/clap/open_clip/model_configs/PANN-10.json
    ADDED
    
    | @@ -0,0 +1,23 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "embed_dim": 1024,
         | 
| 3 | 
            +
                "audio_cfg": {
         | 
| 4 | 
            +
                    "audio_length": 1024,
         | 
| 5 | 
            +
                    "clip_samples": 480000,
         | 
| 6 | 
            +
                    "mel_bins": 64,
         | 
| 7 | 
            +
                    "sample_rate": 48000,
         | 
| 8 | 
            +
                    "window_size": 1024,
         | 
| 9 | 
            +
                    "hop_size": 480,
         | 
| 10 | 
            +
                    "fmin": 50,
         | 
| 11 | 
            +
                    "fmax": 14000,
         | 
| 12 | 
            +
                    "class_num": 527,
         | 
| 13 | 
            +
                    "model_type": "PANN",
         | 
| 14 | 
            +
                    "model_name": "Cnn10"
         | 
| 15 | 
            +
                },
         | 
| 16 | 
            +
                "text_cfg": {
         | 
| 17 | 
            +
                    "context_length": 77,
         | 
| 18 | 
            +
                    "vocab_size": 49408,
         | 
| 19 | 
            +
                    "width": 512,
         | 
| 20 | 
            +
                    "heads": 8,
         | 
| 21 | 
            +
                    "layers": 12
         | 
| 22 | 
            +
                }
         | 
| 23 | 
            +
            }
         | 
    	
        audioldm/clap/open_clip/model_configs/PANN-14-fmax-18k.json
    ADDED
    
    | @@ -0,0 +1,23 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "embed_dim": 2048,
         | 
| 3 | 
            +
                "audio_cfg": {
         | 
| 4 | 
            +
                    "audio_length": 1024,
         | 
| 5 | 
            +
                    "clip_samples": 480000,
         | 
| 6 | 
            +
                    "mel_bins": 64,
         | 
| 7 | 
            +
                    "sample_rate": 48000,
         | 
| 8 | 
            +
                    "window_size": 1024,
         | 
| 9 | 
            +
                    "hop_size": 480,
         | 
| 10 | 
            +
                    "fmin": 50,
         | 
| 11 | 
            +
                    "fmax": 18000,
         | 
| 12 | 
            +
                    "class_num": 527,
         | 
| 13 | 
            +
                    "model_type": "PANN",
         | 
| 14 | 
            +
                    "model_name": "Cnn14"
         | 
| 15 | 
            +
                },
         | 
| 16 | 
            +
                "text_cfg": {
         | 
| 17 | 
            +
                    "context_length": 77,
         | 
| 18 | 
            +
                    "vocab_size": 49408,
         | 
| 19 | 
            +
                    "width": 512,
         | 
| 20 | 
            +
                    "heads": 8,
         | 
| 21 | 
            +
                    "layers": 12
         | 
| 22 | 
            +
                }
         | 
| 23 | 
            +
            }
         | 
    	
        audioldm/clap/open_clip/model_configs/PANN-14-fmax-8k-20s.json
    ADDED
    
    | @@ -0,0 +1,23 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "embed_dim": 2048,
         | 
| 3 | 
            +
                "audio_cfg": {
         | 
| 4 | 
            +
                    "audio_length": 1024,
         | 
| 5 | 
            +
                    "clip_samples": 960000,
         | 
| 6 | 
            +
                    "mel_bins": 64,
         | 
| 7 | 
            +
                    "sample_rate": 48000,
         | 
| 8 | 
            +
                    "window_size": 1024,
         | 
| 9 | 
            +
                    "hop_size": 360,
         | 
| 10 | 
            +
                    "fmin": 50,
         | 
| 11 | 
            +
                    "fmax": 8000,
         | 
| 12 | 
            +
                    "class_num": 527,
         | 
| 13 | 
            +
                    "model_type": "PANN",
         | 
| 14 | 
            +
                    "model_name": "Cnn14"
         | 
| 15 | 
            +
                },
         | 
| 16 | 
            +
                "text_cfg": {
         | 
| 17 | 
            +
                    "context_length": 77,
         | 
| 18 | 
            +
                    "vocab_size": 49408,
         | 
| 19 | 
            +
                    "width": 512,
         | 
| 20 | 
            +
                    "heads": 8,
         | 
| 21 | 
            +
                    "layers": 12
         | 
| 22 | 
            +
                }
         | 
| 23 | 
            +
            }
         | 
    	
        audioldm/clap/open_clip/model_configs/PANN-14-tiny-transformer.json
    ADDED
    
    | @@ -0,0 +1,23 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "embed_dim": 2048,
         | 
| 3 | 
            +
                "audio_cfg": {
         | 
| 4 | 
            +
                    "audio_length": 1024,
         | 
| 5 | 
            +
                    "clip_samples": 480000,
         | 
| 6 | 
            +
                    "mel_bins": 64,
         | 
| 7 | 
            +
                    "sample_rate": 48000,
         | 
| 8 | 
            +
                    "window_size": 1024,
         | 
| 9 | 
            +
                    "hop_size": 480,
         | 
| 10 | 
            +
                    "fmin": 50,
         | 
| 11 | 
            +
                    "fmax": 14000,
         | 
| 12 | 
            +
                    "class_num": 527,
         | 
| 13 | 
            +
                    "model_type": "PANN",
         | 
| 14 | 
            +
                    "model_name": "Cnn14"
         | 
| 15 | 
            +
                },
         | 
| 16 | 
            +
                "text_cfg": {
         | 
| 17 | 
            +
                    "context_length": 77,
         | 
| 18 | 
            +
                    "vocab_size": 49408,
         | 
| 19 | 
            +
                    "width": 512,
         | 
| 20 | 
            +
                    "heads": 8,
         | 
| 21 | 
            +
                    "layers": 4
         | 
| 22 | 
            +
                }
         | 
| 23 | 
            +
            }
         | 
    	
        audioldm/clap/open_clip/model_configs/PANN-14-win-1536.json
    ADDED
    
    | @@ -0,0 +1,23 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "embed_dim": 2048,
         | 
| 3 | 
            +
                "audio_cfg": {
         | 
| 4 | 
            +
                    "audio_length": 1024,
         | 
| 5 | 
            +
                    "clip_samples": 480000,
         | 
| 6 | 
            +
                    "mel_bins": 64,
         | 
| 7 | 
            +
                    "sample_rate": 48000,
         | 
| 8 | 
            +
                    "window_size": 1536,
         | 
| 9 | 
            +
                    "hop_size": 480,
         | 
| 10 | 
            +
                    "fmin": 50,
         | 
| 11 | 
            +
                    "fmax": 14000,
         | 
| 12 | 
            +
                    "class_num": 527,
         | 
| 13 | 
            +
                    "model_type": "PANN",
         | 
| 14 | 
            +
                    "model_name": "Cnn14"
         | 
| 15 | 
            +
                },
         | 
| 16 | 
            +
                "text_cfg": {
         | 
| 17 | 
            +
                    "context_length": 77,
         | 
| 18 | 
            +
                    "vocab_size": 49408,
         | 
| 19 | 
            +
                    "width": 512,
         | 
| 20 | 
            +
                    "heads": 8,
         | 
| 21 | 
            +
                    "layers": 12
         | 
| 22 | 
            +
                }
         | 
| 23 | 
            +
            }
         | 
    	
        audioldm/clap/open_clip/model_configs/PANN-14.json
    ADDED
    
    | @@ -0,0 +1,23 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "embed_dim": 2048,
         | 
| 3 | 
            +
                "audio_cfg": {
         | 
| 4 | 
            +
                    "audio_length": 1024,
         | 
| 5 | 
            +
                    "clip_samples": 480000,
         | 
| 6 | 
            +
                    "mel_bins": 64,
         | 
| 7 | 
            +
                    "sample_rate": 48000,
         | 
| 8 | 
            +
                    "window_size": 1024,
         | 
| 9 | 
            +
                    "hop_size": 480,
         | 
| 10 | 
            +
                    "fmin": 50,
         | 
| 11 | 
            +
                    "fmax": 14000,
         | 
| 12 | 
            +
                    "class_num": 527,
         | 
| 13 | 
            +
                    "model_type": "PANN",
         | 
| 14 | 
            +
                    "model_name": "Cnn14"
         | 
| 15 | 
            +
                },
         | 
| 16 | 
            +
                "text_cfg": {
         | 
| 17 | 
            +
                    "context_length": 77,
         | 
| 18 | 
            +
                    "vocab_size": 49408,
         | 
| 19 | 
            +
                    "width": 512,
         | 
| 20 | 
            +
                    "heads": 8,
         | 
| 21 | 
            +
                    "layers": 12
         | 
| 22 | 
            +
                }
         | 
| 23 | 
            +
            }
         | 
    	
        audioldm/clap/open_clip/model_configs/PANN-6.json
    ADDED
    
    | @@ -0,0 +1,23 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "embed_dim": 512,
         | 
| 3 | 
            +
                "audio_cfg": {
         | 
| 4 | 
            +
                    "audio_length": 1024,
         | 
| 5 | 
            +
                    "clip_samples": 480000,
         | 
| 6 | 
            +
                    "mel_bins": 64,
         | 
| 7 | 
            +
                    "sample_rate": 48000,
         | 
| 8 | 
            +
                    "window_size": 1024,
         | 
| 9 | 
            +
                    "hop_size": 480,
         | 
| 10 | 
            +
                    "fmin": 50,
         | 
| 11 | 
            +
                    "fmax": 14000,
         | 
| 12 | 
            +
                    "class_num": 527,
         | 
| 13 | 
            +
                    "model_type": "PANN",
         | 
| 14 | 
            +
                    "model_name": "Cnn6"
         | 
| 15 | 
            +
                },
         | 
| 16 | 
            +
                "text_cfg": {
         | 
| 17 | 
            +
                    "context_length": 77,
         | 
| 18 | 
            +
                    "vocab_size": 49408,
         | 
| 19 | 
            +
                    "width": 512,
         | 
| 20 | 
            +
                    "heads": 8,
         | 
| 21 | 
            +
                    "layers": 12
         | 
| 22 | 
            +
                }
         | 
| 23 | 
            +
            }
         | 
    	
        audioldm/clap/open_clip/model_configs/RN101-quickgelu.json
    ADDED
    
    | @@ -0,0 +1,22 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "embed_dim": 512,
         | 
| 3 | 
            +
                "quick_gelu": true,
         | 
| 4 | 
            +
                "vision_cfg": {
         | 
| 5 | 
            +
                    "image_size": 224,
         | 
| 6 | 
            +
                    "layers": [
         | 
| 7 | 
            +
                        3,
         | 
| 8 | 
            +
                        4,
         | 
| 9 | 
            +
                        23,
         | 
| 10 | 
            +
                        3
         | 
| 11 | 
            +
                    ],
         | 
| 12 | 
            +
                    "width": 64,
         | 
| 13 | 
            +
                    "patch_size": null
         | 
| 14 | 
            +
                },
         | 
| 15 | 
            +
                "text_cfg": {
         | 
| 16 | 
            +
                    "context_length": 77,
         | 
| 17 | 
            +
                    "vocab_size": 49408,
         | 
| 18 | 
            +
                    "width": 512,
         | 
| 19 | 
            +
                    "heads": 8,
         | 
| 20 | 
            +
                    "layers": 12
         | 
| 21 | 
            +
                }
         | 
| 22 | 
            +
            }
         | 
    	
        audioldm/clap/open_clip/model_configs/RN101.json
    ADDED
    
    | @@ -0,0 +1,21 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "embed_dim": 512,
         | 
| 3 | 
            +
                "vision_cfg": {
         | 
| 4 | 
            +
                    "image_size": 224,
         | 
| 5 | 
            +
                    "layers": [
         | 
| 6 | 
            +
                        3,
         | 
| 7 | 
            +
                        4,
         | 
| 8 | 
            +
                        23,
         | 
| 9 | 
            +
                        3
         | 
| 10 | 
            +
                    ],
         | 
| 11 | 
            +
                    "width": 64,
         | 
| 12 | 
            +
                    "patch_size": null
         | 
| 13 | 
            +
                },
         | 
| 14 | 
            +
                "text_cfg": {
         | 
| 15 | 
            +
                    "context_length": 77,
         | 
| 16 | 
            +
                    "vocab_size": 49408,
         | 
| 17 | 
            +
                    "width": 512,
         | 
| 18 | 
            +
                    "heads": 8,
         | 
| 19 | 
            +
                    "layers": 12
         | 
| 20 | 
            +
                }
         | 
| 21 | 
            +
            }
         | 
    	
        audioldm/clap/open_clip/model_configs/RN50-quickgelu.json
    ADDED
    
    | @@ -0,0 +1,22 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "embed_dim": 1024,
         | 
| 3 | 
            +
                "quick_gelu": true,
         | 
| 4 | 
            +
                "vision_cfg": {
         | 
| 5 | 
            +
                    "image_size": 224,
         | 
| 6 | 
            +
                    "layers": [
         | 
| 7 | 
            +
                        3,
         | 
| 8 | 
            +
                        4,
         | 
| 9 | 
            +
                        6,
         | 
| 10 | 
            +
                        3
         | 
| 11 | 
            +
                    ],
         | 
| 12 | 
            +
                    "width": 64,
         | 
| 13 | 
            +
                    "patch_size": null
         | 
| 14 | 
            +
                },
         | 
| 15 | 
            +
                "text_cfg": {
         | 
| 16 | 
            +
                    "context_length": 77,
         | 
| 17 | 
            +
                    "vocab_size": 49408,
         | 
| 18 | 
            +
                    "width": 512,
         | 
| 19 | 
            +
                    "heads": 8,
         | 
| 20 | 
            +
                    "layers": 12
         | 
| 21 | 
            +
                }
         | 
| 22 | 
            +
            }
         | 
    	
        audioldm/clap/open_clip/model_configs/RN50.json
    ADDED
    
    | @@ -0,0 +1,21 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "embed_dim": 1024,
         | 
| 3 | 
            +
                "vision_cfg": {
         | 
| 4 | 
            +
                    "image_size": 224,
         | 
| 5 | 
            +
                    "layers": [
         | 
| 6 | 
            +
                        3,
         | 
| 7 | 
            +
                        4,
         | 
| 8 | 
            +
                        6,
         | 
| 9 | 
            +
                        3
         | 
| 10 | 
            +
                    ],
         | 
| 11 | 
            +
                    "width": 64,
         | 
| 12 | 
            +
                    "patch_size": null
         | 
| 13 | 
            +
                },
         | 
| 14 | 
            +
                "text_cfg": {
         | 
| 15 | 
            +
                    "context_length": 77,
         | 
| 16 | 
            +
                    "vocab_size": 49408,
         | 
| 17 | 
            +
                    "width": 512,
         | 
| 18 | 
            +
                    "heads": 8,
         | 
| 19 | 
            +
                    "layers": 12
         | 
| 20 | 
            +
                }
         | 
| 21 | 
            +
            }
         | 
    	
        audioldm/clap/open_clip/model_configs/RN50x16.json
    ADDED
    
    | @@ -0,0 +1,21 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "embed_dim": 768,
         | 
| 3 | 
            +
                "vision_cfg": {
         | 
| 4 | 
            +
                    "image_size": 384,
         | 
| 5 | 
            +
                    "layers": [
         | 
| 6 | 
            +
                        6,
         | 
| 7 | 
            +
                        8,
         | 
| 8 | 
            +
                        18,
         | 
| 9 | 
            +
                        8
         | 
| 10 | 
            +
                    ],
         | 
| 11 | 
            +
                    "width": 96,
         | 
| 12 | 
            +
                    "patch_size": null
         | 
| 13 | 
            +
                },
         | 
| 14 | 
            +
                "text_cfg": {
         | 
| 15 | 
            +
                    "context_length": 77,
         | 
| 16 | 
            +
                    "vocab_size": 49408,
         | 
| 17 | 
            +
                    "width": 768,
         | 
| 18 | 
            +
                    "heads": 12,
         | 
| 19 | 
            +
                    "layers": 12
         | 
| 20 | 
            +
                }
         | 
| 21 | 
            +
            }
         | 
    	
        audioldm/clap/open_clip/model_configs/RN50x4.json
    ADDED
    
    | @@ -0,0 +1,21 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "embed_dim": 640,
         | 
| 3 | 
            +
                "vision_cfg": {
         | 
| 4 | 
            +
                    "image_size": 288,
         | 
| 5 | 
            +
                    "layers": [
         | 
| 6 | 
            +
                        4,
         | 
| 7 | 
            +
                        6,
         | 
| 8 | 
            +
                        10,
         | 
| 9 | 
            +
                        6
         | 
| 10 | 
            +
                    ],
         | 
| 11 | 
            +
                    "width": 80,
         | 
| 12 | 
            +
                    "patch_size": null
         | 
| 13 | 
            +
                },
         | 
| 14 | 
            +
                "text_cfg": {
         | 
| 15 | 
            +
                    "context_length": 77,
         | 
| 16 | 
            +
                    "vocab_size": 49408,
         | 
| 17 | 
            +
                    "width": 640,
         | 
| 18 | 
            +
                    "heads": 10,
         | 
| 19 | 
            +
                    "layers": 12
         | 
| 20 | 
            +
                }
         | 
| 21 | 
            +
            }
         | 
    	
        audioldm/clap/open_clip/model_configs/ViT-B-16.json
    ADDED
    
    | @@ -0,0 +1,16 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "embed_dim": 512,
         | 
| 3 | 
            +
                "vision_cfg": {
         | 
| 4 | 
            +
                    "image_size": 224,
         | 
| 5 | 
            +
                    "layers": 12,
         | 
| 6 | 
            +
                    "width": 768,
         | 
| 7 | 
            +
                    "patch_size": 16
         | 
| 8 | 
            +
                },
         | 
| 9 | 
            +
                "text_cfg": {
         | 
| 10 | 
            +
                    "context_length": 77,
         | 
| 11 | 
            +
                    "vocab_size": 49408,
         | 
| 12 | 
            +
                    "width": 512,
         | 
| 13 | 
            +
                    "heads": 8,
         | 
| 14 | 
            +
                    "layers": 12
         | 
| 15 | 
            +
                }
         | 
| 16 | 
            +
            }
         | 
    	
        audioldm/clap/open_clip/model_configs/ViT-B-32-quickgelu.json
    ADDED
    
    | @@ -0,0 +1,17 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "embed_dim": 512,
         | 
| 3 | 
            +
                "quick_gelu": true,
         | 
| 4 | 
            +
                "vision_cfg": {
         | 
| 5 | 
            +
                    "image_size": 224,
         | 
| 6 | 
            +
                    "layers": 12,
         | 
| 7 | 
            +
                    "width": 768,
         | 
| 8 | 
            +
                    "patch_size": 32
         | 
| 9 | 
            +
                },
         | 
| 10 | 
            +
                "text_cfg": {
         | 
| 11 | 
            +
                    "context_length": 77,
         | 
| 12 | 
            +
                    "vocab_size": 49408,
         | 
| 13 | 
            +
                    "width": 512,
         | 
| 14 | 
            +
                    "heads": 8,
         | 
| 15 | 
            +
                    "layers": 12
         | 
| 16 | 
            +
                }
         | 
| 17 | 
            +
            }
         | 
    	
        audioldm/clap/open_clip/model_configs/ViT-B-32.json
    ADDED
    
    | @@ -0,0 +1,16 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "embed_dim": 512,
         | 
| 3 | 
            +
                "vision_cfg": {
         | 
| 4 | 
            +
                    "image_size": 224,
         | 
| 5 | 
            +
                    "layers": 12,
         | 
| 6 | 
            +
                    "width": 768,
         | 
| 7 | 
            +
                    "patch_size": 32
         | 
| 8 | 
            +
                },
         | 
| 9 | 
            +
                "text_cfg": {
         | 
| 10 | 
            +
                    "context_length": 77,
         | 
| 11 | 
            +
                    "vocab_size": 49408,
         | 
| 12 | 
            +
                    "width": 512,
         | 
| 13 | 
            +
                    "heads": 8,
         | 
| 14 | 
            +
                    "layers": 12
         | 
| 15 | 
            +
                }
         | 
| 16 | 
            +
            }
         | 
    	
        audioldm/clap/open_clip/model_configs/ViT-L-14.json
    ADDED
    
    | @@ -0,0 +1,16 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "embed_dim": 768,
         | 
| 3 | 
            +
                "vision_cfg": {
         | 
| 4 | 
            +
                    "image_size": 224,
         | 
| 5 | 
            +
                    "layers": 24,
         | 
| 6 | 
            +
                    "width": 1024,
         | 
| 7 | 
            +
                    "patch_size": 14
         | 
| 8 | 
            +
                },
         | 
| 9 | 
            +
                "text_cfg": {
         | 
| 10 | 
            +
                    "context_length": 77,
         | 
| 11 | 
            +
                    "vocab_size": 49408,
         | 
| 12 | 
            +
                    "width": 768,
         | 
| 13 | 
            +
                    "heads": 12,
         | 
| 14 | 
            +
                    "layers": 12
         | 
| 15 | 
            +
                }
         | 
| 16 | 
            +
            }
         | 
    	
        audioldm/clap/open_clip/openai.py
    ADDED
    
    | @@ -0,0 +1,159 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            """ OpenAI pretrained model functions
         | 
| 2 | 
            +
             | 
| 3 | 
            +
            Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
         | 
| 4 | 
            +
            """
         | 
| 5 | 
            +
             | 
| 6 | 
            +
            import os
         | 
| 7 | 
            +
            import warnings
         | 
| 8 | 
            +
            from typing import Union, List
         | 
| 9 | 
            +
             | 
| 10 | 
            +
            import torch
         | 
| 11 | 
            +
             | 
| 12 | 
            +
            from .model import build_model_from_openai_state_dict
         | 
| 13 | 
            +
            from .pretrained import (
         | 
| 14 | 
            +
                get_pretrained_url,
         | 
| 15 | 
            +
                list_pretrained_tag_models,
         | 
| 16 | 
            +
                download_pretrained,
         | 
| 17 | 
            +
            )
         | 
| 18 | 
            +
             | 
| 19 | 
            +
            __all__ = ["list_openai_models", "load_openai_model"]
         | 
| 20 | 
            +
             | 
| 21 | 
            +
            CACHE_DIR = os.getenv("AUDIOLDM_CACHE_DIR", "~/.cache")
         | 
| 22 | 
            +
             | 
| 23 | 
            +
             | 
| 24 | 
            +
             | 
| 25 | 
            +
            def list_openai_models() -> List[str]:
         | 
| 26 | 
            +
                """Returns the names of available CLIP models"""
         | 
| 27 | 
            +
                return list_pretrained_tag_models("openai")
         | 
| 28 | 
            +
             | 
| 29 | 
            +
             | 
| 30 | 
            +
            def load_openai_model(
         | 
| 31 | 
            +
                name: str,
         | 
| 32 | 
            +
                model_cfg,
         | 
| 33 | 
            +
                device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu",
         | 
| 34 | 
            +
                jit=True,
         | 
| 35 | 
            +
                cache_dir=os.path.expanduser(f"{CACHE_DIR}/clip"),
         | 
| 36 | 
            +
                enable_fusion: bool = False,
         | 
| 37 | 
            +
                fusion_type: str = "None",
         | 
| 38 | 
            +
            ):
         | 
| 39 | 
            +
                """Load a CLIP model, preserve its text pretrained part, and set in the CLAP model
         | 
| 40 | 
            +
             | 
| 41 | 
            +
                Parameters
         | 
| 42 | 
            +
                ----------
         | 
| 43 | 
            +
                name : str
         | 
| 44 | 
            +
                    A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
         | 
| 45 | 
            +
                device : Union[str, torch.device]
         | 
| 46 | 
            +
                    The device to put the loaded model
         | 
| 47 | 
            +
                jit : bool
         | 
| 48 | 
            +
                    Whether to load the optimized JIT model (default) or more hackable non-JIT model.
         | 
| 49 | 
            +
             | 
| 50 | 
            +
                Returns
         | 
| 51 | 
            +
                -------
         | 
| 52 | 
            +
                model : torch.nn.Module
         | 
| 53 | 
            +
                    The CLAP model
         | 
| 54 | 
            +
                preprocess : Callable[[PIL.Image], torch.Tensor]
         | 
| 55 | 
            +
                    A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
         | 
| 56 | 
            +
                """
         | 
| 57 | 
            +
                if get_pretrained_url(name, "openai"):
         | 
| 58 | 
            +
                    model_path = download_pretrained(
         | 
| 59 | 
            +
                        get_pretrained_url(name, "openai"), root=cache_dir
         | 
| 60 | 
            +
                    )
         | 
| 61 | 
            +
                elif os.path.isfile(name):
         | 
| 62 | 
            +
                    model_path = name
         | 
| 63 | 
            +
                else:
         | 
| 64 | 
            +
                    raise RuntimeError(
         | 
| 65 | 
            +
                        f"Model {name} not found; available models = {list_openai_models()}"
         | 
| 66 | 
            +
                    )
         | 
| 67 | 
            +
             | 
| 68 | 
            +
                try:
         | 
| 69 | 
            +
                    # loading JIT archive
         | 
| 70 | 
            +
                    model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
         | 
| 71 | 
            +
                    state_dict = None
         | 
| 72 | 
            +
                except RuntimeError:
         | 
| 73 | 
            +
                    # loading saved state dict
         | 
| 74 | 
            +
                    if jit:
         | 
| 75 | 
            +
                        warnings.warn(
         | 
| 76 | 
            +
                            f"File {model_path} is not a JIT archive. Loading as a state dict instead"
         | 
| 77 | 
            +
                        )
         | 
| 78 | 
            +
                        jit = False
         | 
| 79 | 
            +
                    state_dict = torch.load(model_path, map_location="cpu")
         | 
| 80 | 
            +
             | 
| 81 | 
            +
                if not jit:
         | 
| 82 | 
            +
                    try:
         | 
| 83 | 
            +
                        model = build_model_from_openai_state_dict(
         | 
| 84 | 
            +
                            state_dict or model.state_dict(), model_cfg, enable_fusion, fusion_type
         | 
| 85 | 
            +
                        ).to(device)
         | 
| 86 | 
            +
                    except KeyError:
         | 
| 87 | 
            +
                        sd = {k[7:]: v for k, v in state_dict["state_dict"].items()}
         | 
| 88 | 
            +
                        model = build_model_from_openai_state_dict(
         | 
| 89 | 
            +
                            sd, model_cfg, enable_fusion, fusion_type
         | 
| 90 | 
            +
                        ).to(device)
         | 
| 91 | 
            +
             | 
| 92 | 
            +
                    if str(device) == "cpu":
         | 
| 93 | 
            +
                        model.float()
         | 
| 94 | 
            +
                    return model
         | 
| 95 | 
            +
             | 
| 96 | 
            +
                # patch the device names
         | 
| 97 | 
            +
                device_holder = torch.jit.trace(
         | 
| 98 | 
            +
                    lambda: torch.ones([]).to(torch.device(device)), example_inputs=[]
         | 
| 99 | 
            +
                )
         | 
| 100 | 
            +
                device_node = [
         | 
| 101 | 
            +
                    n
         | 
| 102 | 
            +
                    for n in device_holder.graph.findAllNodes("prim::Constant")
         | 
| 103 | 
            +
                    if "Device" in repr(n)
         | 
| 104 | 
            +
                ][-1]
         | 
| 105 | 
            +
             | 
| 106 | 
            +
                def patch_device(module):
         | 
| 107 | 
            +
                    try:
         | 
| 108 | 
            +
                        graphs = [module.graph] if hasattr(module, "graph") else []
         | 
| 109 | 
            +
                    except RuntimeError:
         | 
| 110 | 
            +
                        graphs = []
         | 
| 111 | 
            +
             | 
| 112 | 
            +
                    if hasattr(module, "forward1"):
         | 
| 113 | 
            +
                        graphs.append(module.forward1.graph)
         | 
| 114 | 
            +
             | 
| 115 | 
            +
                    for graph in graphs:
         | 
| 116 | 
            +
                        for node in graph.findAllNodes("prim::Constant"):
         | 
| 117 | 
            +
                            if "value" in node.attributeNames() and str(node["value"]).startswith(
         | 
| 118 | 
            +
                                "cuda"
         | 
| 119 | 
            +
                            ):
         | 
| 120 | 
            +
                                node.copyAttributes(device_node)
         | 
| 121 | 
            +
             | 
| 122 | 
            +
                model.apply(patch_device)
         | 
| 123 | 
            +
                patch_device(model.encode_audio)
         | 
| 124 | 
            +
                patch_device(model.encode_text)
         | 
| 125 | 
            +
             | 
| 126 | 
            +
                # patch dtype to float32 on CPU
         | 
| 127 | 
            +
                if str(device) == "cpu":
         | 
| 128 | 
            +
                    float_holder = torch.jit.trace(
         | 
| 129 | 
            +
                        lambda: torch.ones([]).float(), example_inputs=[]
         | 
| 130 | 
            +
                    )
         | 
| 131 | 
            +
                    float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
         | 
| 132 | 
            +
                    float_node = float_input.node()
         | 
| 133 | 
            +
             | 
| 134 | 
            +
                    def patch_float(module):
         | 
| 135 | 
            +
                        try:
         | 
| 136 | 
            +
                            graphs = [module.graph] if hasattr(module, "graph") else []
         | 
| 137 | 
            +
                        except RuntimeError:
         | 
| 138 | 
            +
                            graphs = []
         | 
| 139 | 
            +
             | 
| 140 | 
            +
                        if hasattr(module, "forward1"):
         | 
| 141 | 
            +
                            graphs.append(module.forward1.graph)
         | 
| 142 | 
            +
             | 
| 143 | 
            +
                        for graph in graphs:
         | 
| 144 | 
            +
                            for node in graph.findAllNodes("aten::to"):
         | 
| 145 | 
            +
                                inputs = list(node.inputs())
         | 
| 146 | 
            +
                                for i in [
         | 
| 147 | 
            +
                                    1,
         | 
| 148 | 
            +
                                    2,
         | 
| 149 | 
            +
                                ]:  # dtype can be the second or third argument to aten::to()
         | 
| 150 | 
            +
                                    if inputs[i].node()["value"] == 5:
         | 
| 151 | 
            +
                                        inputs[i].node().copyAttributes(float_node)
         | 
| 152 | 
            +
             | 
| 153 | 
            +
                    model.apply(patch_float)
         | 
| 154 | 
            +
                    patch_float(model.encode_audio)
         | 
| 155 | 
            +
                    patch_float(model.encode_text)
         | 
| 156 | 
            +
                    model.float()
         | 
| 157 | 
            +
             | 
| 158 | 
            +
                model.audio_branch.audio_length = model.audio_cfg.audio_length
         | 
| 159 | 
            +
                return model
         | 
    	
        audioldm/clap/open_clip/pann_model.py
    ADDED
    
    | @@ -0,0 +1,704 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            # PANNs: Large-Scale Pretrained Audio Neural Networks for Audio Pattern Recognition
         | 
| 2 | 
            +
            # Reference from https://github.com/qiuqiangkong/audioset_tagging_cnn
         | 
| 3 | 
            +
            # Some layers are re-designed for CLAP
         | 
| 4 | 
            +
            import os
         | 
| 5 | 
            +
             | 
| 6 | 
            +
            os.environ["NUMBA_CACHE_DIR"] = "/tmp/"
         | 
| 7 | 
            +
             | 
| 8 | 
            +
            import torch
         | 
| 9 | 
            +
            import torch.nn as nn
         | 
| 10 | 
            +
            import torch.nn.functional as F
         | 
| 11 | 
            +
            from torchlibrosa.stft import Spectrogram, LogmelFilterBank
         | 
| 12 | 
            +
            from torchlibrosa.augmentation import SpecAugmentation
         | 
| 13 | 
            +
             | 
| 14 | 
            +
            from .utils import do_mixup, interpolate, pad_framewise_output
         | 
| 15 | 
            +
            from .feature_fusion import iAFF, AFF, DAF
         | 
| 16 | 
            +
             | 
| 17 | 
            +
             | 
| 18 | 
            +
            def init_layer(layer):
         | 
| 19 | 
            +
                """Initialize a Linear or Convolutional layer."""
         | 
| 20 | 
            +
                nn.init.xavier_uniform_(layer.weight)
         | 
| 21 | 
            +
             | 
| 22 | 
            +
                if hasattr(layer, "bias"):
         | 
| 23 | 
            +
                    if layer.bias is not None:
         | 
| 24 | 
            +
                        layer.bias.data.fill_(0.0)
         | 
| 25 | 
            +
             | 
| 26 | 
            +
             | 
| 27 | 
            +
            def init_bn(bn):
         | 
| 28 | 
            +
                """Initialize a Batchnorm layer."""
         | 
| 29 | 
            +
                bn.bias.data.fill_(0.0)
         | 
| 30 | 
            +
                bn.weight.data.fill_(1.0)
         | 
| 31 | 
            +
             | 
| 32 | 
            +
             | 
| 33 | 
            +
            class ConvBlock(nn.Module):
         | 
| 34 | 
            +
                def __init__(self, in_channels, out_channels):
         | 
| 35 | 
            +
             | 
| 36 | 
            +
                    super(ConvBlock, self).__init__()
         | 
| 37 | 
            +
             | 
| 38 | 
            +
                    self.conv1 = nn.Conv2d(
         | 
| 39 | 
            +
                        in_channels=in_channels,
         | 
| 40 | 
            +
                        out_channels=out_channels,
         | 
| 41 | 
            +
                        kernel_size=(3, 3),
         | 
| 42 | 
            +
                        stride=(1, 1),
         | 
| 43 | 
            +
                        padding=(1, 1),
         | 
| 44 | 
            +
                        bias=False,
         | 
| 45 | 
            +
                    )
         | 
| 46 | 
            +
             | 
| 47 | 
            +
                    self.conv2 = nn.Conv2d(
         | 
| 48 | 
            +
                        in_channels=out_channels,
         | 
| 49 | 
            +
                        out_channels=out_channels,
         | 
| 50 | 
            +
                        kernel_size=(3, 3),
         | 
| 51 | 
            +
                        stride=(1, 1),
         | 
| 52 | 
            +
                        padding=(1, 1),
         | 
| 53 | 
            +
                        bias=False,
         | 
| 54 | 
            +
                    )
         | 
| 55 | 
            +
             | 
| 56 | 
            +
                    self.bn1 = nn.BatchNorm2d(out_channels)
         | 
| 57 | 
            +
                    self.bn2 = nn.BatchNorm2d(out_channels)
         | 
| 58 | 
            +
             | 
| 59 | 
            +
                    self.init_weight()
         | 
| 60 | 
            +
             | 
| 61 | 
            +
                def init_weight(self):
         | 
| 62 | 
            +
                    init_layer(self.conv1)
         | 
| 63 | 
            +
                    init_layer(self.conv2)
         | 
| 64 | 
            +
                    init_bn(self.bn1)
         | 
| 65 | 
            +
                    init_bn(self.bn2)
         | 
| 66 | 
            +
             | 
| 67 | 
            +
                def forward(self, input, pool_size=(2, 2), pool_type="avg"):
         | 
| 68 | 
            +
             | 
| 69 | 
            +
                    x = input
         | 
| 70 | 
            +
                    x = F.relu_(self.bn1(self.conv1(x)))
         | 
| 71 | 
            +
                    x = F.relu_(self.bn2(self.conv2(x)))
         | 
| 72 | 
            +
                    if pool_type == "max":
         | 
| 73 | 
            +
                        x = F.max_pool2d(x, kernel_size=pool_size)
         | 
| 74 | 
            +
                    elif pool_type == "avg":
         | 
| 75 | 
            +
                        x = F.avg_pool2d(x, kernel_size=pool_size)
         | 
| 76 | 
            +
                    elif pool_type == "avg+max":
         | 
| 77 | 
            +
                        x1 = F.avg_pool2d(x, kernel_size=pool_size)
         | 
| 78 | 
            +
                        x2 = F.max_pool2d(x, kernel_size=pool_size)
         | 
| 79 | 
            +
                        x = x1 + x2
         | 
| 80 | 
            +
                    else:
         | 
| 81 | 
            +
                        raise Exception("Incorrect argument!")
         | 
| 82 | 
            +
             | 
| 83 | 
            +
                    return x
         | 
| 84 | 
            +
             | 
| 85 | 
            +
             | 
| 86 | 
            +
            class ConvBlock5x5(nn.Module):
         | 
| 87 | 
            +
                def __init__(self, in_channels, out_channels):
         | 
| 88 | 
            +
             | 
| 89 | 
            +
                    super(ConvBlock5x5, self).__init__()
         | 
| 90 | 
            +
             | 
| 91 | 
            +
                    self.conv1 = nn.Conv2d(
         | 
| 92 | 
            +
                        in_channels=in_channels,
         | 
| 93 | 
            +
                        out_channels=out_channels,
         | 
| 94 | 
            +
                        kernel_size=(5, 5),
         | 
| 95 | 
            +
                        stride=(1, 1),
         | 
| 96 | 
            +
                        padding=(2, 2),
         | 
| 97 | 
            +
                        bias=False,
         | 
| 98 | 
            +
                    )
         | 
| 99 | 
            +
             | 
| 100 | 
            +
                    self.bn1 = nn.BatchNorm2d(out_channels)
         | 
| 101 | 
            +
             | 
| 102 | 
            +
                    self.init_weight()
         | 
| 103 | 
            +
             | 
| 104 | 
            +
                def init_weight(self):
         | 
| 105 | 
            +
                    init_layer(self.conv1)
         | 
| 106 | 
            +
                    init_bn(self.bn1)
         | 
| 107 | 
            +
             | 
| 108 | 
            +
                def forward(self, input, pool_size=(2, 2), pool_type="avg"):
         | 
| 109 | 
            +
             | 
| 110 | 
            +
                    x = input
         | 
| 111 | 
            +
                    x = F.relu_(self.bn1(self.conv1(x)))
         | 
| 112 | 
            +
                    if pool_type == "max":
         | 
| 113 | 
            +
                        x = F.max_pool2d(x, kernel_size=pool_size)
         | 
| 114 | 
            +
                    elif pool_type == "avg":
         | 
| 115 | 
            +
                        x = F.avg_pool2d(x, kernel_size=pool_size)
         | 
| 116 | 
            +
                    elif pool_type == "avg+max":
         | 
| 117 | 
            +
                        x1 = F.avg_pool2d(x, kernel_size=pool_size)
         | 
| 118 | 
            +
                        x2 = F.max_pool2d(x, kernel_size=pool_size)
         | 
| 119 | 
            +
                        x = x1 + x2
         | 
| 120 | 
            +
                    else:
         | 
| 121 | 
            +
                        raise Exception("Incorrect argument!")
         | 
| 122 | 
            +
             | 
| 123 | 
            +
                    return x
         | 
| 124 | 
            +
             | 
| 125 | 
            +
             | 
| 126 | 
            +
            class AttBlock(nn.Module):
         | 
| 127 | 
            +
                def __init__(self, n_in, n_out, activation="linear", temperature=1.0):
         | 
| 128 | 
            +
                    super(AttBlock, self).__init__()
         | 
| 129 | 
            +
             | 
| 130 | 
            +
                    self.activation = activation
         | 
| 131 | 
            +
                    self.temperature = temperature
         | 
| 132 | 
            +
                    self.att = nn.Conv1d(
         | 
| 133 | 
            +
                        in_channels=n_in,
         | 
| 134 | 
            +
                        out_channels=n_out,
         | 
| 135 | 
            +
                        kernel_size=1,
         | 
| 136 | 
            +
                        stride=1,
         | 
| 137 | 
            +
                        padding=0,
         | 
| 138 | 
            +
                        bias=True,
         | 
| 139 | 
            +
                    )
         | 
| 140 | 
            +
                    self.cla = nn.Conv1d(
         | 
| 141 | 
            +
                        in_channels=n_in,
         | 
| 142 | 
            +
                        out_channels=n_out,
         | 
| 143 | 
            +
                        kernel_size=1,
         | 
| 144 | 
            +
                        stride=1,
         | 
| 145 | 
            +
                        padding=0,
         | 
| 146 | 
            +
                        bias=True,
         | 
| 147 | 
            +
                    )
         | 
| 148 | 
            +
             | 
| 149 | 
            +
                    self.bn_att = nn.BatchNorm1d(n_out)
         | 
| 150 | 
            +
                    self.init_weights()
         | 
| 151 | 
            +
             | 
| 152 | 
            +
                def init_weights(self):
         | 
| 153 | 
            +
                    init_layer(self.att)
         | 
| 154 | 
            +
                    init_layer(self.cla)
         | 
| 155 | 
            +
                    init_bn(self.bn_att)
         | 
| 156 | 
            +
             | 
| 157 | 
            +
                def forward(self, x):
         | 
| 158 | 
            +
                    # x: (n_samples, n_in, n_time)
         | 
| 159 | 
            +
                    norm_att = torch.softmax(torch.clamp(self.att(x), -10, 10), dim=-1)
         | 
| 160 | 
            +
                    cla = self.nonlinear_transform(self.cla(x))
         | 
| 161 | 
            +
                    x = torch.sum(norm_att * cla, dim=2)
         | 
| 162 | 
            +
                    return x, norm_att, cla
         | 
| 163 | 
            +
             | 
| 164 | 
            +
                def nonlinear_transform(self, x):
         | 
| 165 | 
            +
                    if self.activation == "linear":
         | 
| 166 | 
            +
                        return x
         | 
| 167 | 
            +
                    elif self.activation == "sigmoid":
         | 
| 168 | 
            +
                        return torch.sigmoid(x)
         | 
| 169 | 
            +
             | 
| 170 | 
            +
             | 
| 171 | 
            +
            class Cnn14(nn.Module):
         | 
| 172 | 
            +
                def __init__(
         | 
| 173 | 
            +
                    self,
         | 
| 174 | 
            +
                    sample_rate,
         | 
| 175 | 
            +
                    window_size,
         | 
| 176 | 
            +
                    hop_size,
         | 
| 177 | 
            +
                    mel_bins,
         | 
| 178 | 
            +
                    fmin,
         | 
| 179 | 
            +
                    fmax,
         | 
| 180 | 
            +
                    classes_num,
         | 
| 181 | 
            +
                    enable_fusion=False,
         | 
| 182 | 
            +
                    fusion_type="None",
         | 
| 183 | 
            +
                ):
         | 
| 184 | 
            +
             | 
| 185 | 
            +
                    super(Cnn14, self).__init__()
         | 
| 186 | 
            +
             | 
| 187 | 
            +
                    window = "hann"
         | 
| 188 | 
            +
                    center = True
         | 
| 189 | 
            +
                    pad_mode = "reflect"
         | 
| 190 | 
            +
                    ref = 1.0
         | 
| 191 | 
            +
                    amin = 1e-10
         | 
| 192 | 
            +
                    top_db = None
         | 
| 193 | 
            +
             | 
| 194 | 
            +
                    self.enable_fusion = enable_fusion
         | 
| 195 | 
            +
                    self.fusion_type = fusion_type
         | 
| 196 | 
            +
             | 
| 197 | 
            +
                    # Spectrogram extractor
         | 
| 198 | 
            +
                    self.spectrogram_extractor = Spectrogram(
         | 
| 199 | 
            +
                        n_fft=window_size,
         | 
| 200 | 
            +
                        hop_length=hop_size,
         | 
| 201 | 
            +
                        win_length=window_size,
         | 
| 202 | 
            +
                        window=window,
         | 
| 203 | 
            +
                        center=center,
         | 
| 204 | 
            +
                        pad_mode=pad_mode,
         | 
| 205 | 
            +
                        freeze_parameters=True,
         | 
| 206 | 
            +
                    )
         | 
| 207 | 
            +
             | 
| 208 | 
            +
                    # Logmel feature extractor
         | 
| 209 | 
            +
                    self.logmel_extractor = LogmelFilterBank(
         | 
| 210 | 
            +
                        sr=sample_rate,
         | 
| 211 | 
            +
                        n_fft=window_size,
         | 
| 212 | 
            +
                        n_mels=mel_bins,
         | 
| 213 | 
            +
                        fmin=fmin,
         | 
| 214 | 
            +
                        fmax=fmax,
         | 
| 215 | 
            +
                        ref=ref,
         | 
| 216 | 
            +
                        amin=amin,
         | 
| 217 | 
            +
                        top_db=top_db,
         | 
| 218 | 
            +
                        freeze_parameters=True,
         | 
| 219 | 
            +
                    )
         | 
| 220 | 
            +
             | 
| 221 | 
            +
                    # Spec augmenter
         | 
| 222 | 
            +
                    self.spec_augmenter = SpecAugmentation(
         | 
| 223 | 
            +
                        time_drop_width=64,
         | 
| 224 | 
            +
                        time_stripes_num=2,
         | 
| 225 | 
            +
                        freq_drop_width=8,
         | 
| 226 | 
            +
                        freq_stripes_num=2,
         | 
| 227 | 
            +
                    )
         | 
| 228 | 
            +
             | 
| 229 | 
            +
                    self.bn0 = nn.BatchNorm2d(64)
         | 
| 230 | 
            +
             | 
| 231 | 
            +
                    if (self.enable_fusion) and (self.fusion_type == "channel_map"):
         | 
| 232 | 
            +
                        self.conv_block1 = ConvBlock(in_channels=4, out_channels=64)
         | 
| 233 | 
            +
                    else:
         | 
| 234 | 
            +
                        self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
         | 
| 235 | 
            +
                    self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
         | 
| 236 | 
            +
                    self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
         | 
| 237 | 
            +
                    self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
         | 
| 238 | 
            +
                    self.conv_block5 = ConvBlock(in_channels=512, out_channels=1024)
         | 
| 239 | 
            +
                    self.conv_block6 = ConvBlock(in_channels=1024, out_channels=2048)
         | 
| 240 | 
            +
             | 
| 241 | 
            +
                    self.fc1 = nn.Linear(2048, 2048, bias=True)
         | 
| 242 | 
            +
                    self.fc_audioset = nn.Linear(2048, classes_num, bias=True)
         | 
| 243 | 
            +
             | 
| 244 | 
            +
                    if (self.enable_fusion) and (
         | 
| 245 | 
            +
                        self.fusion_type in ["daf_1d", "aff_1d", "iaff_1d"]
         | 
| 246 | 
            +
                    ):
         | 
| 247 | 
            +
                        self.mel_conv1d = nn.Sequential(
         | 
| 248 | 
            +
                            nn.Conv1d(64, 64, kernel_size=5, stride=3, padding=2),
         | 
| 249 | 
            +
                            nn.BatchNorm1d(64),  # No Relu
         | 
| 250 | 
            +
                        )
         | 
| 251 | 
            +
                        if self.fusion_type == "daf_1d":
         | 
| 252 | 
            +
                            self.fusion_model = DAF()
         | 
| 253 | 
            +
                        elif self.fusion_type == "aff_1d":
         | 
| 254 | 
            +
                            self.fusion_model = AFF(channels=64, type="1D")
         | 
| 255 | 
            +
                        elif self.fusion_type == "iaff_1d":
         | 
| 256 | 
            +
                            self.fusion_model = iAFF(channels=64, type="1D")
         | 
| 257 | 
            +
             | 
| 258 | 
            +
                    if (self.enable_fusion) and (
         | 
| 259 | 
            +
                        self.fusion_type in ["daf_2d", "aff_2d", "iaff_2d"]
         | 
| 260 | 
            +
                    ):
         | 
| 261 | 
            +
                        self.mel_conv2d = nn.Sequential(
         | 
| 262 | 
            +
                            nn.Conv2d(1, 64, kernel_size=(5, 5), stride=(6, 2), padding=(2, 2)),
         | 
| 263 | 
            +
                            nn.BatchNorm2d(64),
         | 
| 264 | 
            +
                            nn.ReLU(inplace=True),
         | 
| 265 | 
            +
                        )
         | 
| 266 | 
            +
             | 
| 267 | 
            +
                        if self.fusion_type == "daf_2d":
         | 
| 268 | 
            +
                            self.fusion_model = DAF()
         | 
| 269 | 
            +
                        elif self.fusion_type == "aff_2d":
         | 
| 270 | 
            +
                            self.fusion_model = AFF(channels=64, type="2D")
         | 
| 271 | 
            +
                        elif self.fusion_type == "iaff_2d":
         | 
| 272 | 
            +
                            self.fusion_model = iAFF(channels=64, type="2D")
         | 
| 273 | 
            +
                    self.init_weight()
         | 
| 274 | 
            +
             | 
| 275 | 
            +
                def init_weight(self):
         | 
| 276 | 
            +
                    init_bn(self.bn0)
         | 
| 277 | 
            +
                    init_layer(self.fc1)
         | 
| 278 | 
            +
                    init_layer(self.fc_audioset)
         | 
| 279 | 
            +
             | 
| 280 | 
            +
                def forward(self, input, mixup_lambda=None, device=None):
         | 
| 281 | 
            +
                    """
         | 
| 282 | 
            +
                    Input: (batch_size, data_length)"""
         | 
| 283 | 
            +
             | 
| 284 | 
            +
                    if self.enable_fusion and input["longer"].sum() == 0:
         | 
| 285 | 
            +
                        # if no audio is longer than 10s, then randomly select one audio to be longer
         | 
| 286 | 
            +
                        input["longer"][torch.randint(0, input["longer"].shape[0], (1,))] = True
         | 
| 287 | 
            +
             | 
| 288 | 
            +
                    if not self.enable_fusion:
         | 
| 289 | 
            +
                        x = self.spectrogram_extractor(
         | 
| 290 | 
            +
                            input["waveform"].to(device=device, non_blocking=True)
         | 
| 291 | 
            +
                        )  # (batch_size, 1, time_steps, freq_bins)
         | 
| 292 | 
            +
                        x = self.logmel_extractor(x)  # (batch_size, 1, time_steps, mel_bins)
         | 
| 293 | 
            +
             | 
| 294 | 
            +
                        x = x.transpose(1, 3)
         | 
| 295 | 
            +
                        x = self.bn0(x)
         | 
| 296 | 
            +
                        x = x.transpose(1, 3)
         | 
| 297 | 
            +
                    else:
         | 
| 298 | 
            +
                        longer_list = input["longer"].to(device=device, non_blocking=True)
         | 
| 299 | 
            +
                        x = input["mel_fusion"].to(device=device, non_blocking=True)
         | 
| 300 | 
            +
                        longer_list_idx = torch.where(longer_list)[0]
         | 
| 301 | 
            +
                        x = x.transpose(1, 3)
         | 
| 302 | 
            +
                        x = self.bn0(x)
         | 
| 303 | 
            +
                        x = x.transpose(1, 3)
         | 
| 304 | 
            +
                        if self.fusion_type in ["daf_1d", "aff_1d", "iaff_1d"]:
         | 
| 305 | 
            +
                            new_x = x[:, 0:1, :, :].clone().contiguous()
         | 
| 306 | 
            +
                            # local processing
         | 
| 307 | 
            +
                            if len(longer_list_idx) > 0:
         | 
| 308 | 
            +
                                fusion_x_local = x[longer_list_idx, 1:, :, :].clone().contiguous()
         | 
| 309 | 
            +
                                FB, FC, FT, FF = fusion_x_local.size()
         | 
| 310 | 
            +
                                fusion_x_local = fusion_x_local.view(FB * FC, FT, FF)
         | 
| 311 | 
            +
                                fusion_x_local = torch.permute(
         | 
| 312 | 
            +
                                    fusion_x_local, (0, 2, 1)
         | 
| 313 | 
            +
                                ).contiguous()
         | 
| 314 | 
            +
                                fusion_x_local = self.mel_conv1d(fusion_x_local)
         | 
| 315 | 
            +
                                fusion_x_local = fusion_x_local.view(
         | 
| 316 | 
            +
                                    FB, FC, FF, fusion_x_local.size(-1)
         | 
| 317 | 
            +
                                )
         | 
| 318 | 
            +
                                fusion_x_local = (
         | 
| 319 | 
            +
                                    torch.permute(fusion_x_local, (0, 2, 1, 3))
         | 
| 320 | 
            +
                                    .contiguous()
         | 
| 321 | 
            +
                                    .flatten(2)
         | 
| 322 | 
            +
                                )
         | 
| 323 | 
            +
                                if fusion_x_local.size(-1) < FT:
         | 
| 324 | 
            +
                                    fusion_x_local = torch.cat(
         | 
| 325 | 
            +
                                        [
         | 
| 326 | 
            +
                                            fusion_x_local,
         | 
| 327 | 
            +
                                            torch.zeros(
         | 
| 328 | 
            +
                                                (FB, FF, FT - fusion_x_local.size(-1)),
         | 
| 329 | 
            +
                                                device=device,
         | 
| 330 | 
            +
                                            ),
         | 
| 331 | 
            +
                                        ],
         | 
| 332 | 
            +
                                        dim=-1,
         | 
| 333 | 
            +
                                    )
         | 
| 334 | 
            +
                                else:
         | 
| 335 | 
            +
                                    fusion_x_local = fusion_x_local[:, :, :FT]
         | 
| 336 | 
            +
                                # 1D fusion
         | 
| 337 | 
            +
                                new_x = new_x.squeeze(1).permute((0, 2, 1)).contiguous()
         | 
| 338 | 
            +
                                new_x[longer_list_idx] = self.fusion_model(
         | 
| 339 | 
            +
                                    new_x[longer_list_idx], fusion_x_local
         | 
| 340 | 
            +
                                )
         | 
| 341 | 
            +
                                x = new_x.permute((0, 2, 1)).contiguous()[:, None, :, :]
         | 
| 342 | 
            +
                            else:
         | 
| 343 | 
            +
                                x = new_x
         | 
| 344 | 
            +
                        elif self.fusion_type in ["daf_2d", "aff_2d", "iaff_2d", "channel_map"]:
         | 
| 345 | 
            +
                            x = x  # no change
         | 
| 346 | 
            +
             | 
| 347 | 
            +
                    if self.training:
         | 
| 348 | 
            +
                        x = self.spec_augmenter(x)
         | 
| 349 | 
            +
                    # Mixup on spectrogram
         | 
| 350 | 
            +
                    if self.training and mixup_lambda is not None:
         | 
| 351 | 
            +
                        x = do_mixup(x, mixup_lambda)
         | 
| 352 | 
            +
                    if (self.enable_fusion) and (
         | 
| 353 | 
            +
                        self.fusion_type in ["daf_2d", "aff_2d", "iaff_2d"]
         | 
| 354 | 
            +
                    ):
         | 
| 355 | 
            +
                        global_x = x[:, 0:1, :, :]
         | 
| 356 | 
            +
             | 
| 357 | 
            +
                        # global processing
         | 
| 358 | 
            +
                        B, C, H, W = global_x.shape
         | 
| 359 | 
            +
                        global_x = self.conv_block1(global_x, pool_size=(2, 2), pool_type="avg")
         | 
| 360 | 
            +
                        if len(longer_list_idx) > 0:
         | 
| 361 | 
            +
                            local_x = x[longer_list_idx, 1:, :, :].contiguous()
         | 
| 362 | 
            +
                            TH = global_x.size(-2)
         | 
| 363 | 
            +
                            # local processing
         | 
| 364 | 
            +
                            B, C, H, W = local_x.shape
         | 
| 365 | 
            +
                            local_x = local_x.view(B * C, 1, H, W)
         | 
| 366 | 
            +
                            local_x = self.mel_conv2d(local_x)
         | 
| 367 | 
            +
                            local_x = local_x.view(
         | 
| 368 | 
            +
                                B, C, local_x.size(1), local_x.size(2), local_x.size(3)
         | 
| 369 | 
            +
                            )
         | 
| 370 | 
            +
                            local_x = local_x.permute((0, 2, 1, 3, 4)).contiguous().flatten(2, 3)
         | 
| 371 | 
            +
                            TB, TC, _, TW = local_x.size()
         | 
| 372 | 
            +
                            if local_x.size(-2) < TH:
         | 
| 373 | 
            +
                                local_x = torch.cat(
         | 
| 374 | 
            +
                                    [
         | 
| 375 | 
            +
                                        local_x,
         | 
| 376 | 
            +
                                        torch.zeros(
         | 
| 377 | 
            +
                                            (TB, TC, TH - local_x.size(-2), TW),
         | 
| 378 | 
            +
                                            device=global_x.device,
         | 
| 379 | 
            +
                                        ),
         | 
| 380 | 
            +
                                    ],
         | 
| 381 | 
            +
                                    dim=-2,
         | 
| 382 | 
            +
                                )
         | 
| 383 | 
            +
                            else:
         | 
| 384 | 
            +
                                local_x = local_x[:, :, :TH, :]
         | 
| 385 | 
            +
             | 
| 386 | 
            +
                            global_x[longer_list_idx] = self.fusion_model(
         | 
| 387 | 
            +
                                global_x[longer_list_idx], local_x
         | 
| 388 | 
            +
                            )
         | 
| 389 | 
            +
                        x = global_x
         | 
| 390 | 
            +
                    else:
         | 
| 391 | 
            +
                        x = self.conv_block1(x, pool_size=(2, 2), pool_type="avg")
         | 
| 392 | 
            +
             | 
| 393 | 
            +
                    x = F.dropout(x, p=0.2, training=self.training)
         | 
| 394 | 
            +
                    x = self.conv_block2(x, pool_size=(2, 2), pool_type="avg")
         | 
| 395 | 
            +
                    x = F.dropout(x, p=0.2, training=self.training)
         | 
| 396 | 
            +
                    x = self.conv_block3(x, pool_size=(2, 2), pool_type="avg")
         | 
| 397 | 
            +
                    x = F.dropout(x, p=0.2, training=self.training)
         | 
| 398 | 
            +
                    x = self.conv_block4(x, pool_size=(2, 2), pool_type="avg")
         | 
| 399 | 
            +
                    x = F.dropout(x, p=0.2, training=self.training)
         | 
| 400 | 
            +
                    x = self.conv_block5(x, pool_size=(2, 2), pool_type="avg")
         | 
| 401 | 
            +
                    x = F.dropout(x, p=0.2, training=self.training)
         | 
| 402 | 
            +
                    x = self.conv_block6(x, pool_size=(1, 1), pool_type="avg")
         | 
| 403 | 
            +
                    x = F.dropout(x, p=0.2, training=self.training)
         | 
| 404 | 
            +
                    x = torch.mean(x, dim=3)
         | 
| 405 | 
            +
             | 
| 406 | 
            +
                    latent_x1 = F.max_pool1d(x, kernel_size=3, stride=1, padding=1)
         | 
| 407 | 
            +
                    latent_x2 = F.avg_pool1d(x, kernel_size=3, stride=1, padding=1)
         | 
| 408 | 
            +
                    latent_x = latent_x1 + latent_x2
         | 
| 409 | 
            +
                    latent_x = latent_x.transpose(1, 2)
         | 
| 410 | 
            +
                    latent_x = F.relu_(self.fc1(latent_x))
         | 
| 411 | 
            +
                    latent_output = interpolate(latent_x, 32)
         | 
| 412 | 
            +
             | 
| 413 | 
            +
                    (x1, _) = torch.max(x, dim=2)
         | 
| 414 | 
            +
                    x2 = torch.mean(x, dim=2)
         | 
| 415 | 
            +
                    x = x1 + x2
         | 
| 416 | 
            +
                    x = F.dropout(x, p=0.5, training=self.training)
         | 
| 417 | 
            +
                    x = F.relu_(self.fc1(x))
         | 
| 418 | 
            +
                    embedding = F.dropout(x, p=0.5, training=self.training)
         | 
| 419 | 
            +
                    clipwise_output = torch.sigmoid(self.fc_audioset(x))
         | 
| 420 | 
            +
             | 
| 421 | 
            +
                    output_dict = {
         | 
| 422 | 
            +
                        "clipwise_output": clipwise_output,
         | 
| 423 | 
            +
                        "embedding": embedding,
         | 
| 424 | 
            +
                        "fine_grained_embedding": latent_output,
         | 
| 425 | 
            +
                    }
         | 
| 426 | 
            +
                    return output_dict
         | 
| 427 | 
            +
             | 
| 428 | 
            +
             | 
| 429 | 
            +
            class Cnn6(nn.Module):
         | 
| 430 | 
            +
                def __init__(
         | 
| 431 | 
            +
                    self,
         | 
| 432 | 
            +
                    sample_rate,
         | 
| 433 | 
            +
                    window_size,
         | 
| 434 | 
            +
                    hop_size,
         | 
| 435 | 
            +
                    mel_bins,
         | 
| 436 | 
            +
                    fmin,
         | 
| 437 | 
            +
                    fmax,
         | 
| 438 | 
            +
                    classes_num,
         | 
| 439 | 
            +
                    enable_fusion=False,
         | 
| 440 | 
            +
                    fusion_type="None",
         | 
| 441 | 
            +
                ):
         | 
| 442 | 
            +
             | 
| 443 | 
            +
                    super(Cnn6, self).__init__()
         | 
| 444 | 
            +
             | 
| 445 | 
            +
                    window = "hann"
         | 
| 446 | 
            +
                    center = True
         | 
| 447 | 
            +
                    pad_mode = "reflect"
         | 
| 448 | 
            +
                    ref = 1.0
         | 
| 449 | 
            +
                    amin = 1e-10
         | 
| 450 | 
            +
                    top_db = None
         | 
| 451 | 
            +
             | 
| 452 | 
            +
                    self.enable_fusion = enable_fusion
         | 
| 453 | 
            +
                    self.fusion_type = fusion_type
         | 
| 454 | 
            +
             | 
| 455 | 
            +
                    # Spectrogram extractor
         | 
| 456 | 
            +
                    self.spectrogram_extractor = Spectrogram(
         | 
| 457 | 
            +
                        n_fft=window_size,
         | 
| 458 | 
            +
                        hop_length=hop_size,
         | 
| 459 | 
            +
                        win_length=window_size,
         | 
| 460 | 
            +
                        window=window,
         | 
| 461 | 
            +
                        center=center,
         | 
| 462 | 
            +
                        pad_mode=pad_mode,
         | 
| 463 | 
            +
                        freeze_parameters=True,
         | 
| 464 | 
            +
                    )
         | 
| 465 | 
            +
             | 
| 466 | 
            +
                    # Logmel feature extractor
         | 
| 467 | 
            +
                    self.logmel_extractor = LogmelFilterBank(
         | 
| 468 | 
            +
                        sr=sample_rate,
         | 
| 469 | 
            +
                        n_fft=window_size,
         | 
| 470 | 
            +
                        n_mels=mel_bins,
         | 
| 471 | 
            +
                        fmin=fmin,
         | 
| 472 | 
            +
                        fmax=fmax,
         | 
| 473 | 
            +
                        ref=ref,
         | 
| 474 | 
            +
                        amin=amin,
         | 
| 475 | 
            +
                        top_db=top_db,
         | 
| 476 | 
            +
                        freeze_parameters=True,
         | 
| 477 | 
            +
                    )
         | 
| 478 | 
            +
             | 
| 479 | 
            +
                    # Spec augmenter
         | 
| 480 | 
            +
                    self.spec_augmenter = SpecAugmentation(
         | 
| 481 | 
            +
                        time_drop_width=64,
         | 
| 482 | 
            +
                        time_stripes_num=2,
         | 
| 483 | 
            +
                        freq_drop_width=8,
         | 
| 484 | 
            +
                        freq_stripes_num=2,
         | 
| 485 | 
            +
                    )
         | 
| 486 | 
            +
             | 
| 487 | 
            +
                    self.bn0 = nn.BatchNorm2d(64)
         | 
| 488 | 
            +
             | 
| 489 | 
            +
                    self.conv_block1 = ConvBlock5x5(in_channels=1, out_channels=64)
         | 
| 490 | 
            +
                    self.conv_block2 = ConvBlock5x5(in_channels=64, out_channels=128)
         | 
| 491 | 
            +
                    self.conv_block3 = ConvBlock5x5(in_channels=128, out_channels=256)
         | 
| 492 | 
            +
                    self.conv_block4 = ConvBlock5x5(in_channels=256, out_channels=512)
         | 
| 493 | 
            +
             | 
| 494 | 
            +
                    self.fc1 = nn.Linear(512, 512, bias=True)
         | 
| 495 | 
            +
                    self.fc_audioset = nn.Linear(512, classes_num, bias=True)
         | 
| 496 | 
            +
             | 
| 497 | 
            +
                    self.init_weight()
         | 
| 498 | 
            +
             | 
| 499 | 
            +
                def init_weight(self):
         | 
| 500 | 
            +
                    init_bn(self.bn0)
         | 
| 501 | 
            +
                    init_layer(self.fc1)
         | 
| 502 | 
            +
                    init_layer(self.fc_audioset)
         | 
| 503 | 
            +
             | 
| 504 | 
            +
                def forward(self, input, mixup_lambda=None, device=None):
         | 
| 505 | 
            +
                    """
         | 
| 506 | 
            +
                    Input: (batch_size, data_length)"""
         | 
| 507 | 
            +
             | 
| 508 | 
            +
                    x = self.spectrogram_extractor(input)  # (batch_size, 1, time_steps, freq_bins)
         | 
| 509 | 
            +
                    x = self.logmel_extractor(x)  # (batch_size, 1, time_steps, mel_bins)
         | 
| 510 | 
            +
             | 
| 511 | 
            +
                    x = x.transpose(1, 3)
         | 
| 512 | 
            +
                    x = self.bn0(x)
         | 
| 513 | 
            +
                    x = x.transpose(1, 3)
         | 
| 514 | 
            +
             | 
| 515 | 
            +
                    if self.training:
         | 
| 516 | 
            +
                        x = self.spec_augmenter(x)
         | 
| 517 | 
            +
             | 
| 518 | 
            +
                    # Mixup on spectrogram
         | 
| 519 | 
            +
                    if self.training and mixup_lambda is not None:
         | 
| 520 | 
            +
                        x = do_mixup(x, mixup_lambda)
         | 
| 521 | 
            +
             | 
| 522 | 
            +
                    x = self.conv_block1(x, pool_size=(2, 2), pool_type="avg")
         | 
| 523 | 
            +
                    x = F.dropout(x, p=0.2, training=self.training)
         | 
| 524 | 
            +
                    x = self.conv_block2(x, pool_size=(2, 2), pool_type="avg")
         | 
| 525 | 
            +
                    x = F.dropout(x, p=0.2, training=self.training)
         | 
| 526 | 
            +
                    x = self.conv_block3(x, pool_size=(2, 2), pool_type="avg")
         | 
| 527 | 
            +
                    x = F.dropout(x, p=0.2, training=self.training)
         | 
| 528 | 
            +
                    x = self.conv_block4(x, pool_size=(2, 2), pool_type="avg")
         | 
| 529 | 
            +
                    x = F.dropout(x, p=0.2, training=self.training)
         | 
| 530 | 
            +
                    x = torch.mean(x, dim=3)
         | 
| 531 | 
            +
             | 
| 532 | 
            +
                    latent_x1 = F.max_pool1d(x, kernel_size=3, stride=1, padding=1)
         | 
| 533 | 
            +
                    latent_x2 = F.avg_pool1d(x, kernel_size=3, stride=1, padding=1)
         | 
| 534 | 
            +
                    latent_x = latent_x1 + latent_x2
         | 
| 535 | 
            +
                    latent_x = latent_x.transpose(1, 2)
         | 
| 536 | 
            +
                    latent_x = F.relu_(self.fc1(latent_x))
         | 
| 537 | 
            +
                    latent_output = interpolate(latent_x, 16)
         | 
| 538 | 
            +
             | 
| 539 | 
            +
                    (x1, _) = torch.max(x, dim=2)
         | 
| 540 | 
            +
                    x2 = torch.mean(x, dim=2)
         | 
| 541 | 
            +
                    x = x1 + x2
         | 
| 542 | 
            +
                    x = F.dropout(x, p=0.5, training=self.training)
         | 
| 543 | 
            +
                    x = F.relu_(self.fc1(x))
         | 
| 544 | 
            +
                    embedding = F.dropout(x, p=0.5, training=self.training)
         | 
| 545 | 
            +
                    clipwise_output = torch.sigmoid(self.fc_audioset(x))
         | 
| 546 | 
            +
             | 
| 547 | 
            +
                    output_dict = {
         | 
| 548 | 
            +
                        "clipwise_output": clipwise_output,
         | 
| 549 | 
            +
                        "embedding": embedding,
         | 
| 550 | 
            +
                        "fine_grained_embedding": latent_output,
         | 
| 551 | 
            +
                    }
         | 
| 552 | 
            +
             | 
| 553 | 
            +
                    return output_dict
         | 
| 554 | 
            +
             | 
| 555 | 
            +
             | 
| 556 | 
            +
            class Cnn10(nn.Module):
         | 
| 557 | 
            +
                def __init__(
         | 
| 558 | 
            +
                    self,
         | 
| 559 | 
            +
                    sample_rate,
         | 
| 560 | 
            +
                    window_size,
         | 
| 561 | 
            +
                    hop_size,
         | 
| 562 | 
            +
                    mel_bins,
         | 
| 563 | 
            +
                    fmin,
         | 
| 564 | 
            +
                    fmax,
         | 
| 565 | 
            +
                    classes_num,
         | 
| 566 | 
            +
                    enable_fusion=False,
         | 
| 567 | 
            +
                    fusion_type="None",
         | 
| 568 | 
            +
                ):
         | 
| 569 | 
            +
             | 
| 570 | 
            +
                    super(Cnn10, self).__init__()
         | 
| 571 | 
            +
             | 
| 572 | 
            +
                    window = "hann"
         | 
| 573 | 
            +
                    center = True
         | 
| 574 | 
            +
                    pad_mode = "reflect"
         | 
| 575 | 
            +
                    ref = 1.0
         | 
| 576 | 
            +
                    amin = 1e-10
         | 
| 577 | 
            +
                    top_db = None
         | 
| 578 | 
            +
             | 
| 579 | 
            +
                    self.enable_fusion = enable_fusion
         | 
| 580 | 
            +
                    self.fusion_type = fusion_type
         | 
| 581 | 
            +
             | 
| 582 | 
            +
                    # Spectrogram extractor
         | 
| 583 | 
            +
                    self.spectrogram_extractor = Spectrogram(
         | 
| 584 | 
            +
                        n_fft=window_size,
         | 
| 585 | 
            +
                        hop_length=hop_size,
         | 
| 586 | 
            +
                        win_length=window_size,
         | 
| 587 | 
            +
                        window=window,
         | 
| 588 | 
            +
                        center=center,
         | 
| 589 | 
            +
                        pad_mode=pad_mode,
         | 
| 590 | 
            +
                        freeze_parameters=True,
         | 
| 591 | 
            +
                    )
         | 
| 592 | 
            +
             | 
| 593 | 
            +
                    # Logmel feature extractor
         | 
| 594 | 
            +
                    self.logmel_extractor = LogmelFilterBank(
         | 
| 595 | 
            +
                        sr=sample_rate,
         | 
| 596 | 
            +
                        n_fft=window_size,
         | 
| 597 | 
            +
                        n_mels=mel_bins,
         | 
| 598 | 
            +
                        fmin=fmin,
         | 
| 599 | 
            +
                        fmax=fmax,
         | 
| 600 | 
            +
                        ref=ref,
         | 
| 601 | 
            +
                        amin=amin,
         | 
| 602 | 
            +
                        top_db=top_db,
         | 
| 603 | 
            +
                        freeze_parameters=True,
         | 
| 604 | 
            +
                    )
         | 
| 605 | 
            +
             | 
| 606 | 
            +
                    # Spec augmenter
         | 
| 607 | 
            +
                    self.spec_augmenter = SpecAugmentation(
         | 
| 608 | 
            +
                        time_drop_width=64,
         | 
| 609 | 
            +
                        time_stripes_num=2,
         | 
| 610 | 
            +
                        freq_drop_width=8,
         | 
| 611 | 
            +
                        freq_stripes_num=2,
         | 
| 612 | 
            +
                    )
         | 
| 613 | 
            +
             | 
| 614 | 
            +
                    self.bn0 = nn.BatchNorm2d(64)
         | 
| 615 | 
            +
             | 
| 616 | 
            +
                    self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
         | 
| 617 | 
            +
                    self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
         | 
| 618 | 
            +
                    self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
         | 
| 619 | 
            +
                    self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
         | 
| 620 | 
            +
                    self.conv_block5 = ConvBlock(in_channels=512, out_channels=1024)
         | 
| 621 | 
            +
             | 
| 622 | 
            +
                    self.fc1 = nn.Linear(1024, 1024, bias=True)
         | 
| 623 | 
            +
                    self.fc_audioset = nn.Linear(1024, classes_num, bias=True)
         | 
| 624 | 
            +
             | 
| 625 | 
            +
                    self.init_weight()
         | 
| 626 | 
            +
             | 
| 627 | 
            +
                def init_weight(self):
         | 
| 628 | 
            +
                    init_bn(self.bn0)
         | 
| 629 | 
            +
                    init_layer(self.fc1)
         | 
| 630 | 
            +
                    init_layer(self.fc_audioset)
         | 
| 631 | 
            +
             | 
| 632 | 
            +
                def forward(self, input, mixup_lambda=None, device=None):
         | 
| 633 | 
            +
                    """
         | 
| 634 | 
            +
                    Input: (batch_size, data_length)"""
         | 
| 635 | 
            +
             | 
| 636 | 
            +
                    x = self.spectrogram_extractor(input)  # (batch_size, 1, time_steps, freq_bins)
         | 
| 637 | 
            +
                    x = self.logmel_extractor(x)  # (batch_size, 1, time_steps, mel_bins)
         | 
| 638 | 
            +
             | 
| 639 | 
            +
                    x = x.transpose(1, 3)
         | 
| 640 | 
            +
                    x = self.bn0(x)
         | 
| 641 | 
            +
                    x = x.transpose(1, 3)
         | 
| 642 | 
            +
             | 
| 643 | 
            +
                    if self.training:
         | 
| 644 | 
            +
                        x = self.spec_augmenter(x)
         | 
| 645 | 
            +
             | 
| 646 | 
            +
                    # Mixup on spectrogram
         | 
| 647 | 
            +
                    if self.training and mixup_lambda is not None:
         | 
| 648 | 
            +
                        x = do_mixup(x, mixup_lambda)
         | 
| 649 | 
            +
             | 
| 650 | 
            +
                    x = self.conv_block1(x, pool_size=(2, 2), pool_type="avg")
         | 
| 651 | 
            +
                    x = F.dropout(x, p=0.2, training=self.training)
         | 
| 652 | 
            +
                    x = self.conv_block2(x, pool_size=(2, 2), pool_type="avg")
         | 
| 653 | 
            +
                    x = F.dropout(x, p=0.2, training=self.training)
         | 
| 654 | 
            +
                    x = self.conv_block3(x, pool_size=(2, 2), pool_type="avg")
         | 
| 655 | 
            +
                    x = F.dropout(x, p=0.2, training=self.training)
         | 
| 656 | 
            +
                    x = self.conv_block4(x, pool_size=(2, 2), pool_type="avg")
         | 
| 657 | 
            +
                    x = F.dropout(x, p=0.2, training=self.training)
         | 
| 658 | 
            +
                    x = self.conv_block5(x, pool_size=(2, 2), pool_type="avg")
         | 
| 659 | 
            +
                    x = F.dropout(x, p=0.2, training=self.training)
         | 
| 660 | 
            +
                    x = torch.mean(x, dim=3)
         | 
| 661 | 
            +
             | 
| 662 | 
            +
                    latent_x1 = F.max_pool1d(x, kernel_size=3, stride=1, padding=1)
         | 
| 663 | 
            +
                    latent_x2 = F.avg_pool1d(x, kernel_size=3, stride=1, padding=1)
         | 
| 664 | 
            +
                    latent_x = latent_x1 + latent_x2
         | 
| 665 | 
            +
                    latent_x = latent_x.transpose(1, 2)
         | 
| 666 | 
            +
                    latent_x = F.relu_(self.fc1(latent_x))
         | 
| 667 | 
            +
                    latent_output = interpolate(latent_x, 32)
         | 
| 668 | 
            +
             | 
| 669 | 
            +
                    (x1, _) = torch.max(x, dim=2)
         | 
| 670 | 
            +
                    x2 = torch.mean(x, dim=2)
         | 
| 671 | 
            +
                    x = x1 + x2
         | 
| 672 | 
            +
                    x = F.dropout(x, p=0.5, training=self.training)
         | 
| 673 | 
            +
                    x = F.relu_(self.fc1(x))
         | 
| 674 | 
            +
                    embedding = F.dropout(x, p=0.5, training=self.training)
         | 
| 675 | 
            +
                    clipwise_output = torch.sigmoid(self.fc_audioset(x))
         | 
| 676 | 
            +
             | 
| 677 | 
            +
                    output_dict = {
         | 
| 678 | 
            +
                        "clipwise_output": clipwise_output,
         | 
| 679 | 
            +
                        "embedding": embedding,
         | 
| 680 | 
            +
                        "fine_grained_embedding": latent_output,
         | 
| 681 | 
            +
                    }
         | 
| 682 | 
            +
             | 
| 683 | 
            +
                    return output_dict
         | 
| 684 | 
            +
             | 
| 685 | 
            +
             | 
| 686 | 
            +
            def create_pann_model(audio_cfg, enable_fusion=False, fusion_type="None"):
         | 
| 687 | 
            +
                try:
         | 
| 688 | 
            +
                    ModelProto = eval(audio_cfg.model_name)
         | 
| 689 | 
            +
                    model = ModelProto(
         | 
| 690 | 
            +
                        sample_rate=audio_cfg.sample_rate,
         | 
| 691 | 
            +
                        window_size=audio_cfg.window_size,
         | 
| 692 | 
            +
                        hop_size=audio_cfg.hop_size,
         | 
| 693 | 
            +
                        mel_bins=audio_cfg.mel_bins,
         | 
| 694 | 
            +
                        fmin=audio_cfg.fmin,
         | 
| 695 | 
            +
                        fmax=audio_cfg.fmax,
         | 
| 696 | 
            +
                        classes_num=audio_cfg.class_num,
         | 
| 697 | 
            +
                        enable_fusion=enable_fusion,
         | 
| 698 | 
            +
                        fusion_type=fusion_type,
         | 
| 699 | 
            +
                    )
         | 
| 700 | 
            +
                    return model
         | 
| 701 | 
            +
                except:
         | 
| 702 | 
            +
                    raise RuntimeError(
         | 
| 703 | 
            +
                        f"Import Model for {audio_cfg.model_name} not found, or the audio cfg parameters are not enough."
         | 
| 704 | 
            +
                    )
         | 
