diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..133d2f3a0195f06922a8243f8482276f7406c4df 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,3 +1,6 @@ +*.png filter=lfs diff=lfs merge=lfs -text +*.jpg filter=lfs diff=lfs merge=lfs -text +*.blend filter=lfs diff=lfs merge=lfs -text *.7z filter=lfs diff=lfs merge=lfs -text *.arrow filter=lfs diff=lfs merge=lfs -text *.bin filter=lfs diff=lfs merge=lfs -text @@ -33,3 +36,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +assets/axis_ref.png filter=lfs diff=lfs merge=lfs -text +assets/axis_tgt.png filter=lfs diff=lfs merge=lfs -text +assets/axis_render.blend filter=lfs diff=lfs merge=lfs -text diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..d94f7257fabd1ad5b2938f7754947635ff30a52b --- /dev/null +++ b/.gitignore @@ -0,0 +1,210 @@ +test_demo/ +test_demo_output/ + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[codz] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py.cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# UV +# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +#uv.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock +#poetry.toml + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +# pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python. +# https://pdm-project.org/en/latest/usage/project/#working-with-version-control +#pdm.lock +#pdm.toml +.pdm-python +.pdm-build/ + +# pixi +# Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control. +#pixi.lock +# Pixi creates a virtual environment in the .pixi directory, just like venv module creates one +# in the .venv directory. It is recommended not to include this directory in version control. +.pixi + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.envrc +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ + +# Abstra +# Abstra is an AI-powered process automation framework. +# Ignore directories containing user credentials, local state, and settings. +# Learn more at https://abstra.io/docs +.abstra/ + +# Visual Studio Code +# Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore +# that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore +# and can be added to the global gitignore or merged into this file. However, if you prefer, +# you could uncomment the following to ignore the entire vscode folder +# .vscode/ + +# Ruff stuff: +.ruff_cache/ + +# PyPI configuration file +.pypirc + +# Cursor +# Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to +# exclude from AI features like autocomplete and code analysis. Recommended for sensitive data +# refer to https://docs.cursor.com/context/ignore-files +.cursorignore +.cursorindexingignore + +# Marimo +marimo/_static/ +marimo/_lsp/ +__marimo__/ diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..4ea99c213c5c0c005ae4e80df8e52169d06896ec --- /dev/null +++ b/LICENSE @@ -0,0 +1,395 @@ +Attribution 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution 4.0 International Public License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution 4.0 International Public License ("Public License"). To the +extent this Public License may be interpreted as a contract, You are +granted the Licensed Rights in consideration of Your acceptance of +these terms and conditions, and the Licensor grants You such rights in +consideration of benefits the Licensor receives from making the +Licensed Material available under these terms and conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + d. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + e. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + f. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + g. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + h. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + i. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + j. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + k. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + 4. If You Share Adapted Material You produce, the Adapter's + License You apply must not prevent recipients of the Adapted + Material from complying with this Public License. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material; and + + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public +licenses. Notwithstanding, Creative Commons may elect to apply one of +its public licenses to material it publishes and in those instances +will be considered the “Licensor.” The text of the Creative Commons +public licenses is dedicated to the public domain under the CC0 Public +Domain Dedication. Except for the limited purpose of indicating that +material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the +public licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/app.py b/app.py new file mode 100644 index 0000000000000000000000000000000000000000..aff012c65d17d53b1441c44adb3d57d1149ac3d4 --- /dev/null +++ b/app.py @@ -0,0 +1,199 @@ +import gradio as gr +import numpy as np +from PIL import Image +import torch + +# ====== 你的原有导入和模型加载保持不变 ====== +from paths import * +from vision_tower import VGGT_OriAny_Ref +from inference import * +from app_utils import * +from axis_renderer import BlendRenderer + +from huggingface_hub import hf_hub_download +ckpt_path = hf_hub_download(repo_id=ORIANY_V2, filename=REMOTE_CKPT_PATH, repo_type="model", cache_dir='./', resume_download=True) +print(ckpt_path) + + +mark_dtype = torch.bfloat16 if torch.cuda.get_device_capability()[0] >= 8 else torch.float16 +# device = 'cuda:0' +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +model = VGGT_OriAny_Ref(out_dim=900, dtype=mark_dtype, nopretrain=True) +model.load_state_dict(torch.load(ckpt_path, map_location='cpu')) +model.eval() +model = model.to(device) +print('Model loaded.') + +axis_renderer = BlendRenderer(RENDER_FILE) + + +# ====== 工具函数:安全图像处理 ====== +def safe_image_input(image): + """确保返回合法的 numpy 数组或 None""" + if image is None: + return None + if isinstance(image, np.ndarray): + return image + try: + return np.array(image) + except Exception: + return None + + +# ====== 推理函数 ====== +@torch.no_grad() +def run_inference(image_ref, image_tgt, do_rm_bkg): + image_ref = safe_image_input(image_ref) + image_tgt = safe_image_input(image_tgt) + + if image_ref is None: + raise gr.Error("Please upload a reference image before running inference.") + + # 转为 PIL(用于背景去除和后续叠加) + pil_ref = Image.fromarray(image_ref.astype(np.uint8)).convert("RGB") + pil_tgt = None + + if image_tgt is not None: + pil_tgt = Image.fromarray(image_tgt.astype(np.uint8)).convert("RGB") + if do_rm_bkg: + pil_ref = background_preprocess(pil_ref, True) + pil_tgt = background_preprocess(pil_tgt, True) + else: + if do_rm_bkg: + pil_ref = background_preprocess(pil_ref, True) + + try: + ans_dict = inf_single_case(model, pil_ref, pil_tgt) + except Exception as e: + print("Inference error:", e) + raise gr.Error(f"Inference failed: {str(e)}") + + def safe_float(val, default=0.0): + try: + return float(val) + except: + return float(default) + + az = safe_float(ans_dict.get('ref_az_pred', 0)) + el = safe_float(ans_dict.get('ref_el_pred', 0)) + ro = safe_float(ans_dict.get('ref_ro_pred', 0)) + alpha = int(ans_dict.get('ref_alpha_pred', 1)) # 注意:target 默认 alpha=1,但 ref 可能不是 + + # ===== 渲染参考图的坐标轴 ===== + axis_renderer.render_axis(az, el, ro, alpha, save_path=REF_AXIS_IMAGE) + axis_ref = Image.open(REF_AXIS_IMAGE).convert("RGBA") + + # 叠加坐标轴到参考图 + # 确保尺寸一致 + if axis_ref.size != pil_ref.size: + axis_ref = axis_ref.resize(pil_ref.size, Image.LANCZOS) + pil_ref_rgba = pil_ref.convert("RGBA") + overlaid_ref = Image.alpha_composite(pil_ref_rgba, axis_ref).convert("RGB") + + # ===== 处理目标图(如果有)===== + if pil_tgt is not None: + rel_az = safe_float(ans_dict.get('rel_az_pred', 0)) + rel_el = safe_float(ans_dict.get('rel_el_pred', 0)) + rel_ro = safe_float(ans_dict.get('rel_ro_pred', 0)) + + tgt_azi, tgt_ele, tgt_rot = Get_target_azi_ele_rot(az, el, ro, rel_az, rel_el, rel_ro) + print("Target: Azi",tgt_azi,"Ele",tgt_ele,"Rot",tgt_rot) + + # target 默认 alpha=1(根据你的说明) + axis_renderer.render_axis(tgt_azi, tgt_ele, tgt_rot, alpha=1, save_path=TGT_AXIS_IMAGE) + axis_tgt = Image.open(TGT_AXIS_IMAGE).convert("RGBA") + + if axis_tgt.size != pil_tgt.size: + axis_tgt = axis_tgt.resize(pil_tgt.size, Image.LANCZOS) + pil_tgt_rgba = pil_tgt.convert("RGBA") + overlaid_tgt = Image.alpha_composite(pil_tgt_rgba, axis_tgt).convert("RGB") + else: + overlaid_tgt = None + rel_az = rel_el = rel_ro = 0.0 + + return [ + overlaid_ref, # 渲染+叠加后的参考图 + overlaid_tgt, # 渲染+叠加后的目标图(可能为 None) + f"{az:.2f}", + f"{el:.2f}", + f"{ro:.2f}", + str(alpha), + f"{rel_az:.2f}", + f"{rel_el:.2f}", + f"{rel_ro:.2f}", + ] + + +# ====== Gradio Blocks UI ====== +with gr.Blocks(title="Orient-Anything Demo") as demo: + gr.Markdown("# Orient-Anything Demo") + gr.Markdown("Upload a **reference image** (required). Optionally upload a **target image** for relative pose.") + + with gr.Row(): + # 左侧:输入图像(参考图 + 目标图,同一行) + with gr.Column(): + with gr.Row(): + ref_img = gr.Image( + label="Reference Image (required)", + type="numpy", + height=256, + width=256, + value=None, + interactive=True + ) + tgt_img = gr.Image( + label="Target Image (optional)", + type="numpy", + height=256, + width=256, + value=None, + interactive=True + ) + rm_bkg = gr.Checkbox(label="Remove Background", value=True) + run_btn = gr.Button("Run Inference", variant="primary") + + # 右侧:结果图像 + 文本输出 + with gr.Column(): + # 结果图像:参考结果 + 目标结果(可选) + with gr.Row(): + res_ref_img = gr.Image( + label="Rendered Reference", + type="pil", + height=256, + width=256, + interactive=False + ) + res_tgt_img = gr.Image( + label="Rendered Target (if provided)", + type="pil", + height=256, + width=256, + interactive=False + ) + + # 文本输出放在图像下方 + with gr.Row(): + with gr.Column(): + gr.Markdown("### Absolute Pose (Reference)") + az_out = gr.Textbox(label="Azimuth (0~360°)") + el_out = gr.Textbox(label="Polar (-90~90°)") + ro_out = gr.Textbox(label="Rotation (-90~90°)") + alpha_out = gr.Textbox(label="Number of Directions (0/1/2/4)") + with gr.Column(): + gr.Markdown("### Relative Pose (Target w.r.t Reference)") + rel_az_out = gr.Textbox(label="Relative Azimuth (0~360°)") + rel_el_out = gr.Textbox(label="Relative Polar (-90~90°)") + rel_ro_out = gr.Textbox(label="Relative Rotation (-90~90°)") + + # 绑定点击事件 + run_btn.click( + fn=run_inference, + inputs=[ref_img, tgt_img, rm_bkg], + outputs=[res_ref_img, res_tgt_img, az_out, el_out, ro_out, alpha_out, rel_az_out, rel_el_out, rel_ro_out], + preprocess=True, + postprocess=True + ) + +# 启动(禁用 API 避免 schema 错误) +demo.launch(show_api=False) diff --git a/app_utils.py b/app_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8d296f9bf9b63a8dc0ca26ee80c257c79f4b6217 --- /dev/null +++ b/app_utils.py @@ -0,0 +1,202 @@ +import rembg +import random +import torch +import numpy as np +from PIL import Image, ImageOps +import PIL +from typing import Any +import matplotlib.pyplot as plt +import io + +def resize_foreground( + image: Image, + ratio: float, +) -> Image: + image = np.array(image) + assert image.shape[-1] == 4 + alpha = np.where(image[..., 3] > 0) + y1, y2, x1, x2 = ( + alpha[0].min(), + alpha[0].max(), + alpha[1].min(), + alpha[1].max(), + ) + # crop the foreground + fg = image[y1:y2, x1:x2] + # pad to square + size = max(fg.shape[0], fg.shape[1]) + ph0, pw0 = (size - fg.shape[0]) // 2, (size - fg.shape[1]) // 2 + ph1, pw1 = size - fg.shape[0] - ph0, size - fg.shape[1] - pw0 + new_image = np.pad( + fg, + ((ph0, ph1), (pw0, pw1), (0, 0)), + mode="constant", + constant_values=((0, 0), (0, 0), (0, 0)), + ) + + # compute padding according to the ratio + new_size = int(new_image.shape[0] / ratio) + # pad to size, double side + ph0, pw0 = (new_size - size) // 2, (new_size - size) // 2 + ph1, pw1 = new_size - size - ph0, new_size - size - pw0 + new_image = np.pad( + new_image, + ((ph0, ph1), (pw0, pw1), (0, 0)), + mode="constant", + constant_values=((0, 0), (0, 0), (0, 0)), + ) + new_image = Image.fromarray(new_image) + return new_image + +def remove_background(image: Image, + rembg_session: Any = None, + force: bool = False, + **rembg_kwargs, +) -> Image: + do_remove = True + if image.mode == "RGBA" and image.getextrema()[3][0] < 255: + do_remove = False + do_remove = do_remove or force + if do_remove: + image = rembg.remove(image, session=rembg_session, **rembg_kwargs) + return image + +def background_preprocess(input_image, do_remove_background): + if input_image is None: + return None + rembg_session = rembg.new_session() if do_remove_background else None + + if do_remove_background: + input_image = remove_background(input_image, rembg_session) + input_image = resize_foreground(input_image, 0.85) + + return input_image + +def axis_angle_rotation_batch(axis: torch.Tensor, theta: torch.Tensor, homogeneous: bool = False) -> torch.Tensor: + """ + 支持batch输入的版本: + Args: + axis: (3,) or (N,3) + theta: scalar or (N,) + homogeneous: 是否输出 4x4 齐次矩阵 + + Returns: + (N,3,3) or (N,4,4) + """ + axis = torch.as_tensor(axis).float() + theta = torch.as_tensor(theta).float() + + if axis.ndim == 1: + axis = axis.unsqueeze(0) # (1,3) + if theta.ndim == 0: + theta = theta.unsqueeze(0) # (1,) + + N = axis.shape[0] + + # normalize axis + axis = axis / torch.norm(axis, dim=1, keepdim=True) + + x, y, z = axis[:, 0], axis[:, 1], axis[:, 2] + cos_t = torch.cos(theta) + sin_t = torch.sin(theta) + one_minus_cos = 1 - cos_t + + # 公式展开 + rot = torch.zeros((N, 3, 3), dtype=axis.dtype, device=axis.device) + rot[:, 0, 0] = cos_t + x*x*one_minus_cos + rot[:, 0, 1] = x*y*one_minus_cos - z*sin_t + rot[:, 0, 2] = x*z*one_minus_cos + y*sin_t + rot[:, 1, 0] = y*x*one_minus_cos + z*sin_t + rot[:, 1, 1] = cos_t + y*y*one_minus_cos + rot[:, 1, 2] = y*z*one_minus_cos - x*sin_t + rot[:, 2, 0] = z*x*one_minus_cos - y*sin_t + rot[:, 2, 1] = z*y*one_minus_cos + x*sin_t + rot[:, 2, 2] = cos_t + z*z*one_minus_cos + + if homogeneous: + rot_homo = torch.eye(4, dtype=axis.dtype, device=axis.device).unsqueeze(0).repeat(N, 1, 1) + rot_homo[:, :3, :3] = rot + return rot_homo + + return rot + +def azi_ele_rot_to_Obj_Rmatrix_batch(azi: torch.Tensor, ele: torch.Tensor, rot: torch.Tensor) -> torch.Tensor: + """支持batch输入的: (azi, ele, rot) -> R matrix (N,3,3)""" + # 转成tensor + azi = torch.as_tensor(azi).float() * torch.pi / 180. + ele = torch.as_tensor(ele).float() * torch.pi / 180. + rot = torch.as_tensor(rot).float() * torch.pi / 180. + + # 保证有batch维度 + if azi.ndim == 0: + azi = azi.unsqueeze(0) + if ele.ndim == 0: + ele = ele.unsqueeze(0) + if rot.ndim == 0: + rot = rot.unsqueeze(0) + + N = azi.shape[0] + + device = azi.device + dtype = azi.dtype + + z0_axis = torch.tensor([0.,0.,1.], device=device, dtype=dtype).expand(N, -1) + y0_axis = torch.tensor([0.,1.,0.], device=device, dtype=dtype).expand(N, -1) + x0_axis = torch.tensor([1.,0.,0.], device=device, dtype=dtype).expand(N, -1) + # print(z0_axis.shape, azi.shape) + R_azi = axis_angle_rotation_batch(z0_axis, -1 * azi) + R_ele = axis_angle_rotation_batch(y0_axis, ele) + R_rot = axis_angle_rotation_batch(x0_axis, rot) + + R_res = R_rot @ R_ele @ R_azi + return R_res + +def Cam_Rmatrix_to_azi_ele_rot_batch(R: torch.Tensor): + """支持batch输入的: R matrix -> (azi, ele, rot),角度制 (度)""" + R = torch.as_tensor(R).float() + + # 如果是(3,3),补batch维度 + if R.ndim == 2: + R = R.unsqueeze(0) + + r0 = R[:, :, 0] # shape (N,3) + r1 = R[:, :, 1] + r2 = R[:, :, 2] + + ele = torch.asin(r0[:, 2]) # r0.z + cos_ele = torch.cos(ele) + + # 创建默认azi、rot + azi = torch.zeros_like(ele) + rot = torch.zeros_like(ele) + + # 正常情况 + normal_mask = (cos_ele.abs() >= 1e-6) + if normal_mask.any(): + azi[normal_mask] = torch.atan2(r0[normal_mask, 1], r0[normal_mask, 0]) + rot[normal_mask] = torch.atan2(-r1[normal_mask, 2], r2[normal_mask, 2]) + + # Gimbal lock特殊情况 + gimbal_mask = ~normal_mask + if gimbal_mask.any(): + # 这里设azi为0 + azi[gimbal_mask] = 0.0 + rot[gimbal_mask] = torch.atan2(-r1[gimbal_mask, 0], r1[gimbal_mask, 1]) + + # 弧度转角度 + azi = azi * 180. / torch.pi + ele = ele * 180. / torch.pi + rot = rot * 180. / torch.pi + + return azi, ele, rot + +def Get_target_azi_ele_rot(azi: torch.Tensor, ele: torch.Tensor, rot: torch.Tensor, rel_azi: torch.Tensor, rel_ele: torch.Tensor, rel_rot: torch.Tensor): + Rmat0 = azi_ele_rot_to_Obj_Rmatrix_batch(azi = azi , ele = ele , rot = rot) + Rmat_rel = azi_ele_rot_to_Obj_Rmatrix_batch(azi = rel_azi, ele = rel_ele, rot = rel_rot) + # Rmat_rel = Rmat1 @ Rmat0.permute(0, 2, 1) + # azi_out, ele_out, rot_out = Cam_Rmatrix_to_azi_ele_rot_batch(Rmat_rel.permute(0, 2, 1)) + + Rmat1 = Rmat_rel @ Rmat0 + azi_out, ele_out, rot_out = Cam_Rmatrix_to_azi_ele_rot_batch(Rmat1.permute(0, 2, 1)) + + return azi_out, ele_out, rot_out diff --git a/assets/axis_ref.png b/assets/axis_ref.png new file mode 100644 index 0000000000000000000000000000000000000000..595d29d663570f9b7aa9ddb5420fc48c24199284 --- /dev/null +++ b/assets/axis_ref.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ac0eb370f3d33fb8d6fc5c4e309b35f38a879d4a51e34ab490a35d39d09b1fa +size 139793 diff --git a/assets/axis_render.blend b/assets/axis_render.blend new file mode 100644 index 0000000000000000000000000000000000000000..2ca11e19cb27e33da7354d0f1332458de2f3bfa9 --- /dev/null +++ b/assets/axis_render.blend @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:76f8fd3b4ce574a6973ed9637a6c8194fcf46edf72f9266786036c21cf7023a1 +size 2136460 diff --git a/assets/axis_tgt.png b/assets/axis_tgt.png new file mode 100644 index 0000000000000000000000000000000000000000..bfb0108af3e564d4a17f415a083e0c9953694656 --- /dev/null +++ b/assets/axis_tgt.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39fb1b1e9ef7e16ff25c4d1d9df8fd53ec14f9e7006356db0609ab9c2ee9c048 +size 132931 diff --git a/axis_renderer.py b/axis_renderer.py new file mode 100644 index 0000000000000000000000000000000000000000..8f8f27047be255b8b2fa7f23c52890264cc7f41e --- /dev/null +++ b/axis_renderer.py @@ -0,0 +1,136 @@ +import bpy +import math +import os +from paths import * + +class BlendRenderer: + def __init__(self, blend_file_path=RENDER_FILE): + """ + 初始化渲染器,加载指定的 .blend 文件并进行基础设置。 + + :param blend_file_path: 要加载的 .blend 文件的完整路径 + """ + if not os.path.isfile(blend_file_path): + raise FileNotFoundError(f"Blend file not found: {blend_file_path}") + + # 加载 blend 文件 + bpy.ops.wm.open_mainfile(filepath=blend_file_path) + + # 设置渲染引擎为 Cycles + bpy.context.scene.render.engine = 'CYCLES' + + # 使用 CPU 渲染 + bpy.context.scene.cycles.device = 'CPU' + + # 设置采样数为 4 + bpy.context.scene.cycles.samples = 4 + + # 设置所有反弹次数为 4(包括 diffuse, glossy, transmission, etc.) + bpy.context.scene.cycles.max_bounces = 4 + + # 设置渲染分辨率 + bpy.context.scene.render.resolution_x = 512 + bpy.context.scene.render.resolution_y = 512 + bpy.context.scene.render.resolution_percentage = 100 + + # 启用透明背景(RGBA) + bpy.context.scene.render.film_transparent = True + + # 遍历所有对象,初始化渲染可见性 + for obj in bpy.data.objects: + if obj.type == 'LIGHT': + obj.hide_render = False + elif obj.type == 'CAMERA': + obj.hide_render = False + elif obj.type == 'MESH': + obj.hide_render = True # 默认所有网格不参与渲染 + + # 设置活动摄像机(选第一个) + cameras = [obj for obj in bpy.data.objects if obj.type == 'CAMERA'] + if cameras: + bpy.context.scene.camera = cameras[0] + + print(f"Loaded blend file: {blend_file_path}") + print("Render settings applied: 512x512, CPU, samples=4, bounces=4, transparent background.") + + self.alpha_axis_map = { + 0: "单轴平面", + 1: "三轴", + 2: "双向标注", + 4: "四向标注" + } + + + def _get_all_children(self, obj): + """递归获取对象的所有子对象(包括嵌套子级)""" + children = [] + for child in obj.children: + children.append(child) + children.extend(self._get_all_children(child)) + return children + + def render_axis(self, azi, ele, rot, alpha, save_path): + """ + 渲染特定方向的图像。 + + :param azi: 方位角(绕 Z 轴旋转,弧度) + :param ele: 仰角(绕 Y 轴旋转,弧度) + :param rot: 自转(绕 X 轴旋转,弧度) + :param save_path: 渲染结果保存路径(如 '/output/render.png') + """ + # 遍历所有对象,初始化渲染可见性 + for obj in bpy.data.objects: + if obj.type == 'LIGHT': + obj.hide_render = False + elif obj.type == 'CAMERA': + obj.hide_render = False + elif obj.type == 'MESH': + obj.hide_render = True # 默认所有网格不参与渲染 + # 根据 alpha 选择目标对象 + target_name = self.alpha_axis_map.get(alpha, "单轴平面") + target_obj = None + for obj in bpy.data.objects: + # if obj.type == 'MESH' and obj.name == target_name: + if obj.name == target_name: + target_obj = obj + break + + if target_obj is None: + raise ValueError(f'Object named "{target_name}" not found in the scene.') + + # 获取该对象及其所有子对象 + all_objects_to_render = [target_obj] + self._get_all_children(target_obj) + + # 设置它们参与渲染 + for obj in all_objects_to_render: + if obj.type == 'MESH': + obj.hide_render = False + + # 设置旋转(ZYX 顺序:Z=azi, Y=ele, X=rot → Euler XYZ = (rot, ele, azi)) + # 注意:Blender 使用弧度 + target_obj.rotation_mode = 'ZYX' # 确保使用欧拉角 ZYX 模式 + target_obj.rotation_euler = (rot*math.pi/180, ele*math.pi/180, -azi*math.pi/180) + + # 确保路径目录存在 + os.makedirs(os.path.dirname(save_path), exist_ok=True) + + # 设置输出路径 + bpy.context.scene.render.filepath = save_path + + # 执行渲染并保存 + bpy.ops.render.render(write_still=True) + + print(f"Rendered and saved to: {save_path}") + + +if __name__ == "__main__": + renderer = BlendRenderer(RENDER_FILE) + # Example usage: + renderer.render_axis(45, 0, 0, 1, "./test_demo_output/render_1_dir_azi45.png") + renderer.render_axis(0, 45, 0, 2, "./test_demo_output/render_2_dir_ele45.png") + renderer.render_axis(0, 0, 45, 4, "./test_demo_output/render_4_dir_rot45.png") + # renderer.render_1_dir() + # renderer.render_2_dir() + # renderer.render_4_dir() + + diff --git a/inference.py b/inference.py new file mode 100644 index 0000000000000000000000000000000000000000..c777e66d2f4e7bb31920ba5e02c95e2ee83ce615 --- /dev/null +++ b/inference.py @@ -0,0 +1,238 @@ +import torch +from PIL import Image +from app_utils import * +import torch.nn.functional as F +import numpy as np +from torchvision import transforms as TF + +from scipy.special import i0 +from scipy.optimize import curve_fit +from scipy.integrate import trapezoid +from functools import partial + +def von_mises_pdf_alpha_numpy(alpha, x, mu, kappa): + normalization = 2 * np.pi + pdf = np.exp(kappa * np.cos(alpha * (x - mu))) / normalization + return pdf + +def val_fit_alpha(distribute): + fit_alphas = [] + for y_noise in distribute: + x = np.linspace(0, 2 * np.pi, 360) + y_noise /= trapezoid(y_noise, x) + 1e-8 + + initial_guess = [x[np.argmax(y_noise)], 1] + + # support 1,2,4 + alphas = [1.0, 2.0, 4.0] + saved_params = [] + saved_r_squared = [] + + for alpha in alphas: + try: + von_mises_pdf_alpha_partial = partial(von_mises_pdf_alpha_numpy, alpha) + params, covariance = curve_fit(von_mises_pdf_alpha_partial, x, y_noise, p0=initial_guess) + + residuals = y_noise - von_mises_pdf_alpha_partial(x, *params) + ss_res = np.sum(residuals**2) + ss_tot = np.sum((y_noise - np.mean(y_noise))**2) + r_squared = 1 - (ss_res / (ss_tot+1e-8)) + + saved_params.append(params) + saved_r_squared.append(r_squared) + if r_squared > 0.8: + break + except: + saved_params.append((0.,0.)) + saved_r_squared.append(0.) + + max_index = np.argmax(saved_r_squared) + alpha = alphas[max_index] + mu_fit, kappa_fit = saved_params[max_index] + r_squared = saved_r_squared[max_index] + + if alpha == 1. and kappa_fit>=0.5 and r_squared>=0.5: + pass + elif alpha == 2. and kappa_fit>=0.35 and r_squared>=0.35: + pass + elif alpha == 4. and kappa_fit>=0.25 and r_squared>=0.25: + pass + else: + alpha=0. + fit_alphas.append(alpha) + return torch.tensor(fit_alphas) + +def preprocess_images(image_list, mode="crop"): + + # Check for empty list + if len(image_list) == 0: + raise ValueError("At least 1 image is required") + + # Validate mode + if mode not in ["crop", "pad"]: + raise ValueError("Mode must be either 'crop' or 'pad'") + + images = [] + shapes = set() + to_tensor = TF.ToTensor() + target_size = 518 + + # First process all images and collect their shapes + # for image_path in image_path_list: + for img in image_list: + # If there's an alpha channel, blend onto white background: + if img.mode == "RGBA": + # Create white background + background = Image.new("RGBA", img.size, (255, 255, 255, 255)) + # Alpha composite onto the white background + img = Image.alpha_composite(background, img) + + # Now convert to "RGB" (this step assigns white for transparent areas) + img = img.convert("RGB") + width, height = img.size + + if mode == "pad": + # Make the largest dimension 518px while maintaining aspect ratio + if width >= height: + new_width = target_size + new_height = round(height * (new_width / width) / 14) * 14 # Make divisible by 14 + else: + new_height = target_size + new_width = round(width * (new_height / height) / 14) * 14 # Make divisible by 14 + else: # mode == "crop" + # Original behavior: set width to 518px + new_width = target_size + # Calculate height maintaining aspect ratio, divisible by 14 + new_height = round(height * (new_width / width) / 14) * 14 + + # Resize with new dimensions (width, height) + try: + img = img.resize((new_width, new_height), Image.Resampling.BICUBIC) + img = to_tensor(img) # Convert to tensor (0, 1) + except Exception as e: + print(e) + print(width, height) + print(new_width, new_height) + assert False + + # Center crop height if it's larger than 518 (only in crop mode) + if mode == "crop" and new_height > target_size: + start_y = (new_height - target_size) // 2 + img = img[:, start_y : start_y + target_size, :] + + # For pad mode, pad to make a square of target_size x target_size + if mode == "pad": + h_padding = target_size - img.shape[1] + w_padding = target_size - img.shape[2] + + if h_padding > 0 or w_padding > 0: + pad_top = h_padding // 2 + pad_bottom = h_padding - pad_top + pad_left = w_padding // 2 + pad_right = w_padding - pad_left + + # Pad with white (value=1.0) + img = torch.nn.functional.pad( + img, (pad_left, pad_right, pad_top, pad_bottom), mode="constant", value=1.0 + ) + + shapes.add((img.shape[1], img.shape[2])) + images.append(img) + + # Check if we have different shapes + # In theory our model can also work well with different shapes + if len(shapes) > 1: + print(f"Warning: Found images with different shapes: {shapes}") + # Find maximum dimensions + max_height = max(shape[0] for shape in shapes) + max_width = max(shape[1] for shape in shapes) + + # Pad images if necessary + padded_images = [] + for img in images: + h_padding = max_height - img.shape[1] + w_padding = max_width - img.shape[2] + + if h_padding > 0 or w_padding > 0: + pad_top = h_padding // 2 + pad_bottom = h_padding - pad_top + pad_left = w_padding // 2 + pad_right = w_padding - pad_left + + img = torch.nn.functional.pad( + img, (pad_left, pad_right, pad_top, pad_bottom), mode="constant", value=1.0 + ) + padded_images.append(img) + images = padded_images + + images = torch.stack(images) # concatenate images + + # Ensure correct shape when single image + if len(image_list) == 1: + # Verify shape is (1, C, H, W) + if images.dim() == 3: + images = images.unsqueeze(0) + + return images + +@torch.no_grad() +def inf_single_batch(model, batch): + device = model.get_device() + batch_img_inputs = batch # (B, S, 3, H, W) + # print(batch_img_inputs.shape) + B, S, C, H, W = batch_img_inputs.shape + pose_enc = model(batch_img_inputs) # (B, S, D) S = 1 + + pose_enc = pose_enc.view(B*S, -1) + angle_az_pred = torch.argmax(pose_enc[:, 0:360] , dim=-1) + angle_el_pred = torch.argmax(pose_enc[:, 360:360+180] , dim=-1) - 90 + angle_ro_pred = torch.argmax(pose_enc[:, 360+180:360+180+360] , dim=-1) - 180 + + # ori_val + # trained with BCE loss + distribute = F.sigmoid(pose_enc[:, 0:360]).cpu().float().numpy() + # trained with CE loss + # distribute = pose_enc[:, 0:360].cpu().float().numpy() + alpha_pred = val_fit_alpha(distribute = distribute) + + # ref_val + if S > 1: + ref_az_pred = angle_az_pred.reshape(B,S)[:,0] + ref_el_pred = angle_el_pred.reshape(B,S)[:,0] + ref_ro_pred = angle_ro_pred.reshape(B,S)[:,0] + ref_alpha_pred = alpha_pred.reshape(B,S)[:,0] + rel_az_pred = angle_az_pred.reshape(B,S)[:,1] + rel_el_pred = angle_el_pred.reshape(B,S)[:,1] + rel_ro_pred = angle_ro_pred.reshape(B,S)[:,1] + else: + ref_az_pred = angle_az_pred[0] + ref_el_pred = angle_el_pred[0] + ref_ro_pred = angle_ro_pred[0] + ref_alpha_pred = alpha_pred[0] + rel_az_pred = 0. + rel_el_pred = 0. + rel_ro_pred = 0. + + ans_dict = { + 'ref_az_pred': ref_az_pred, + 'ref_el_pred': ref_el_pred, + 'ref_ro_pred': ref_ro_pred, + 'ref_alpha_pred' : ref_alpha_pred, + 'rel_az_pred' : rel_az_pred, + 'rel_el_pred' : rel_el_pred, + 'rel_ro_pred' : rel_ro_pred, + } + + return ans_dict + +# input PIL Image +@torch.no_grad() +def inf_single_case(model, image_ref, image_tgt): + if image_tgt is None: + image_list = [image_ref] + else: + image_list = [image_ref, image_tgt] + image_tensors = preprocess_images(image_list, mode="pad").to(model.get_device()) + ans_dict = inf_single_batch(model=model, batch=image_tensors.unsqueeze(0)) + print(ans_dict) + return ans_dict diff --git a/orianyV2_demo.ipynb b/orianyV2_demo.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..c39d2597f8020b9ee9683b3a68f2743218b6a872 --- /dev/null +++ b/orianyV2_demo.ipynb @@ -0,0 +1,492 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "load over\n" + ] + } + ], + "source": [ + "import torch\n", + "from vision_tower import VGGT_OriAny_Ref\n", + "import os\n", + "from app_utils import *\n", + "from paths import *\n", + "\n", + "device = 'cuda:0'\n", + "\n", + "mark_dtype = torch.bfloat16 if torch.cuda.get_device_capability()[0] >= 8 else torch.float16\n", + "model = VGGT_OriAny_Ref(\n", + " out_dim = 900,\n", + " dtype = mark_dtype,\n", + " nopretrain = True\n", + " )\n", + "\n", + "ckpt = torch.load(LOCAL_CKPT_PATH, map_location='cpu')\n", + "# ckpt = torch.load('verwoIN3D0.pt', map_location='cpu')\n", + "\n", + "model.load_state_dict(ckpt)\n", + "model.eval()\n", + "model = model.to(device)\n", + "image_root = '/mnt/workspace/muang/repos/OriAnyV2_Train/demo/'\n", + "\n", + "print('load over')\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import rembg\n", + "from PIL import Image, ImageOps\n", + "from typing import Any, Optional, List, Dict, Union\n", + "from torchvision import transforms as TF\n", + "import torch.nn.functional as F\n", + "\n", + "rembg_session = rembg.new_session()\n", + "\n", + "def load_and_preprocess_images(image_path_list, mode=\"crop\"):\n", + " \"\"\"\n", + " A quick start function to load and preprocess images for model input.\n", + " This assumes the images should have the same shape for easier batching, but our model can also work well with different shapes.\n", + "\n", + " Args:\n", + " image_path_list (list): List of paths to image files\n", + " mode (str, optional): Preprocessing mode, either \"crop\" or \"pad\".\n", + " - \"crop\" (default): Sets width to 518px and center crops height if needed.\n", + " - \"pad\": Preserves all pixels by making the largest dimension 518px\n", + " and padding the smaller dimension to reach a square shape.\n", + "\n", + " Returns:\n", + " torch.Tensor: Batched tensor of preprocessed images with shape (N, 3, H, W)\n", + "\n", + " Raises:\n", + " ValueError: If the input list is empty or if mode is invalid\n", + "\n", + " Notes:\n", + " - Images with different dimensions will be padded with white (value=1.0)\n", + " - A warning is printed when images have different shapes\n", + " - When mode=\"crop\": The function ensures width=518px while maintaining aspect ratio\n", + " and height is center-cropped if larger than 518px\n", + " - When mode=\"pad\": The function ensures the largest dimension is 518px while maintaining aspect ratio\n", + " and the smaller dimension is padded to reach a square shape (518x518)\n", + " - Dimensions are adjusted to be divisible by 14 for compatibility with model requirements\n", + " \"\"\"\n", + " # Check for empty list\n", + " if len(image_path_list) == 0:\n", + " raise ValueError(\"At least 1 image is required\")\n", + " \n", + " # Validate mode\n", + " if mode not in [\"crop\", \"pad\"]:\n", + " raise ValueError(\"Mode must be either 'crop' or 'pad'\")\n", + "\n", + " images = []\n", + " shapes = set()\n", + " to_tensor = TF.ToTensor()\n", + " target_size = 518\n", + "\n", + " # First process all images and collect their shapes\n", + " for item in image_path_list:\n", + " if isinstance(item, Image.Image):\n", + " img = item # 已经是 PIL Image,直接使用\n", + " else:\n", + " img = Image.open(item) # 否则认为是路径,打开它\n", + "\n", + " # If there's an alpha channel, blend onto white background:\n", + " if img.mode == \"RGBA\":\n", + " # Create white background\n", + " background = Image.new(\"RGBA\", img.size, (255, 255, 255, 255))\n", + " # Alpha composite onto the white background\n", + " img = Image.alpha_composite(background, img)\n", + "\n", + " # Now convert to \"RGB\" (this step assigns white for transparent areas)\n", + " img = img.convert(\"RGB\")\n", + "\n", + " width, height = img.size\n", + " \n", + " if mode == \"pad\":\n", + " # Make the largest dimension 518px while maintaining aspect ratio\n", + " if width >= height:\n", + " new_width = target_size\n", + " new_height = round(height * (new_width / width) / 14) * 14 # Make divisible by 14\n", + " else:\n", + " new_height = target_size\n", + " new_width = round(width * (new_height / height) / 14) * 14 # Make divisible by 14\n", + " else: # mode == \"crop\"\n", + " # Original behavior: set width to 518px\n", + " new_width = target_size\n", + " # Calculate height maintaining aspect ratio, divisible by 14\n", + " new_height = round(height * (new_width / width) / 14) * 14\n", + "\n", + " # Resize with new dimensions (width, height)\n", + " img = img.resize((new_width, new_height), Image.Resampling.BICUBIC)\n", + " img = to_tensor(img) # Convert to tensor (0, 1)\n", + "\n", + " # Center crop height if it's larger than 518 (only in crop mode)\n", + " if mode == \"crop\" and new_height > target_size:\n", + " start_y = (new_height - target_size) // 2\n", + " img = img[:, start_y : start_y + target_size, :]\n", + " \n", + " # For pad mode, pad to make a square of target_size x target_size\n", + " if mode == \"pad\":\n", + " h_padding = target_size - img.shape[1]\n", + " w_padding = target_size - img.shape[2]\n", + " \n", + " if h_padding > 0 or w_padding > 0:\n", + " pad_top = h_padding // 2\n", + " pad_bottom = h_padding - pad_top\n", + " pad_left = w_padding // 2\n", + " pad_right = w_padding - pad_left\n", + " \n", + " # Pad with white (value=1.0)\n", + " img = torch.nn.functional.pad(\n", + " img, (pad_left, pad_right, pad_top, pad_bottom), mode=\"constant\", value=1.0\n", + " )\n", + "\n", + " shapes.add((img.shape[1], img.shape[2]))\n", + " images.append(img)\n", + "\n", + " # Check if we have different shapes\n", + " # In theory our model can also work well with different shapes\n", + " if len(shapes) > 1:\n", + " print(f\"Warning: Found images with different shapes: {shapes}\")\n", + " # Find maximum dimensions\n", + " max_height = max(shape[0] for shape in shapes)\n", + " max_width = max(shape[1] for shape in shapes)\n", + "\n", + " # Pad images if necessary\n", + " padded_images = []\n", + " for img in images:\n", + " h_padding = max_height - img.shape[1]\n", + " w_padding = max_width - img.shape[2]\n", + "\n", + " if h_padding > 0 or w_padding > 0:\n", + " pad_top = h_padding // 2\n", + " pad_bottom = h_padding - pad_top\n", + " pad_left = w_padding // 2\n", + " pad_right = w_padding - pad_left\n", + "\n", + " img = torch.nn.functional.pad(\n", + " img, (pad_left, pad_right, pad_top, pad_bottom), mode=\"constant\", value=1.0\n", + " )\n", + " padded_images.append(img)\n", + " images = padded_images\n", + "\n", + " images = torch.stack(images) # concatenate images\n", + "\n", + " # Ensure correct shape when single image\n", + " if len(image_path_list) == 1:\n", + " # Verify shape is (1, C, H, W)\n", + " if images.dim() == 3:\n", + " images = images.unsqueeze(0)\n", + "\n", + " return images\n", + "\n", + "def remove_background(image: Image, rembg_session: Any=None, force: bool = False, **rembg_kwargs) -> Image :\n", + " do_remove = True\n", + " if image.mode == 'RGBA' and image.getextrema()[3][0] < 255:\n", + " do_remove = False\n", + " do_remove = do_remove or force\n", + " if do_remove:\n", + " image = rembg.remove(image, session = rembg_session, **rembg_kwargs)\n", + " return image\n", + "\n", + "\n", + "from scipy.special import i0\n", + "from scipy.optimize import curve_fit\n", + "from scipy.integrate import trapezoid\n", + "from functools import partial\n", + "\n", + "def von_mises_pdf_alpha_numpy(alpha, x, mu, kappa):\n", + " normalization = 2 * np.pi\n", + " pdf = np.exp(kappa * np.cos(alpha * (x - mu))) / normalization\n", + " return pdf\n", + "\n", + "def val_fit_alpha(distribute):\n", + " fit_alphas = []\n", + " for y_noise in distribute:\n", + " x = np.linspace(0, 2 * np.pi, 360)\n", + " y_noise /= trapezoid(y_noise, x) + 1e-8\n", + " \n", + " initial_guess = [x[np.argmax(y_noise)], 1]\n", + "\n", + " alphas = [1.0, 2.0, 4.0]\n", + " saved_params = []\n", + " saved_r_squared = []\n", + "\n", + " for alpha in alphas:\n", + " try:\n", + " von_mises_pdf_alpha_partial = partial(von_mises_pdf_alpha_numpy, alpha)\n", + " params, covariance = curve_fit(von_mises_pdf_alpha_partial, x, y_noise, p0=initial_guess)\n", + "\n", + " residuals = y_noise - von_mises_pdf_alpha_partial(x, *params)\n", + " ss_res = np.sum(residuals**2)\n", + " ss_tot = np.sum((y_noise - np.mean(y_noise))**2)\n", + " r_squared = 1 - (ss_res / (ss_tot+1e-8))\n", + "\n", + " saved_params.append(params)\n", + " saved_r_squared.append(r_squared)\n", + " if r_squared > 0.8:\n", + " break\n", + " except:\n", + " saved_params.append((0.,0.))\n", + " saved_r_squared.append(0.)\n", + "\n", + " max_index = np.argmax(saved_r_squared)\n", + " alpha = alphas[max_index]\n", + " mu_fit, kappa_fit = saved_params[max_index]\n", + " r_squared = saved_r_squared[max_index]\n", + " \n", + " print(alpha, mu_fit, kappa_fit, r_squared)\n", + " if alpha == 1. and kappa_fit>=0.5 and r_squared>=0.5:\n", + " pass\n", + " elif alpha == 2. and kappa_fit>=0.35 and r_squared>=0.35:\n", + " pass\n", + " elif alpha == 4. and kappa_fit>=0.25 and r_squared>=0.25:\n", + " pass\n", + " else:\n", + " alpha=0.\n", + " fit_alphas.append(alpha)\n", + " return torch.tensor(fit_alphas)\n", + "\n", + "@torch.no_grad()\n", + "def ref_single(ref_name, tgt_name, remove_bkg = True, softmax = False):\n", + " ref_img = Image.open(ref_name)\n", + " tgt_img = Image.open(tgt_name)\n", + " if remove_bkg:\n", + " ref_img = remove_background(ref_img, rembg_session, force=True)\n", + " tgt_img = remove_background(tgt_img, rembg_session, force=True)\n", + " \n", + " batch_img_inputs = load_and_preprocess_images([ref_img, tgt_img], mode=\"pad\")\n", + " \n", + " batch_img_inputs = batch_img_inputs.unsqueeze(0).to(device)\n", + " # print(batch_img_inputs.shape)\n", + " B, S, C, H, W = batch_img_inputs.shape\n", + " pose_enc = model(batch_img_inputs) # (B, S, D) S = 1\n", + "\n", + " pose_enc = pose_enc.view(B*S, -1)\n", + "\n", + " angle_az_pred = torch.argmax(pose_enc[:, 0:360] , dim=-1)\n", + " angle_el_pred = torch.argmax(pose_enc[:, 360:360+180] , dim=-1) - 90\n", + " angle_ro_pred = torch.argmax(pose_enc[:, 360+180:360+180+360] , dim=-1) - 180\n", + " if softmax:\n", + " alpha_pred = val_fit_alpha(distribute = F.softmax(pose_enc[:, 0:360], dim=-1).cpu().float().numpy())\n", + " else:\n", + " alpha_pred = val_fit_alpha(distribute = F.sigmoid(pose_enc[:, 0:360]).cpu().float().numpy())\n", + "\n", + " ori_az = (angle_az_pred.reshape(B,S)[:,0]).cpu().float().numpy()\n", + " ori_el = (angle_el_pred.reshape(B,S)[:,0]).cpu().float().numpy()\n", + " ori_ro = (angle_ro_pred.reshape(B,S)[:,0]).cpu().float().numpy()\n", + " rel_az = (angle_az_pred.reshape(B,S)[:,1]).cpu().float().numpy()\n", + " rel_el = (angle_el_pred.reshape(B,S)[:,1]).cpu().float().numpy()\n", + " rel_ro = (angle_ro_pred.reshape(B,S)[:,1]).cpu().float().numpy()\n", + " \n", + " print('ori_az', ori_az)\n", + " print('ori_el', ori_el)\n", + " print('ori_ro', ori_ro)\n", + " print('alpha' , alpha_pred)\n", + " print('rel_az', rel_az)\n", + " print('rel_el', rel_el)\n", + " print('rel_ro', rel_ro)\n", + " \n", + " # return pose_enc\n", + " \n", + " return ori_az, ori_el, ori_ro, alpha_pred, rel_az, rel_el, rel_ro, pose_enc\n", + "\n", + "@torch.no_grad()\n", + "def ori_single(ref_name, remove_bkg = True, softmax=True):\n", + " ref_img = Image.open(ref_name)\n", + " if remove_bkg:\n", + " ref_img = remove_background(ref_img, rembg_session, force=True)\n", + "\n", + " batch_img_inputs = load_and_preprocess_images([ref_img], mode=\"pad\")\n", + " \n", + " batch_img_inputs = batch_img_inputs.unsqueeze(0).to(device)\n", + " # print(batch_img_inputs.shape)\n", + " B, S, C, H, W = batch_img_inputs.shape\n", + " pose_enc = model(batch_img_inputs) # (B, S, D) S = 1\n", + "\n", + " pose_enc = pose_enc.view(B*S, -1)\n", + " gaus_az_pred = pose_enc[:, 0:360]\n", + " gaus_el_pred = pose_enc[:, 360:360+180]\n", + " gaus_ro_pred = pose_enc[:, 360+180:360+180+360]\n", + " \n", + " \n", + " if softmax:\n", + " gaus_az_pred = F.relu(gaus_az_pred)\n", + " gaus_el_pred = F.relu(gaus_el_pred)\n", + " gaus_ro_pred = F.relu(gaus_ro_pred)\n", + " gaus_az_pred = F.softmax(gaus_az_pred)\n", + " gaus_el_pred = F.softmax(gaus_el_pred)\n", + " gaus_ro_pred = F.softmax(gaus_ro_pred)\n", + "\n", + " angle_az_pred = (torch.argmax(gaus_az_pred, dim=-1)).cpu().float().numpy()\n", + " angle_el_pred = (torch.argmax(gaus_el_pred, dim=-1) - 90).cpu().float().numpy()\n", + " angle_ro_pred = (torch.argmax(gaus_ro_pred, dim=-1) - 180).cpu().float().numpy()\n", + "\n", + " alpha_pred = val_fit_alpha(distribute = F.sigmoid(gaus_az_pred).cpu().float().numpy())\n", + " \n", + " print('ori_az', angle_az_pred)\n", + " print('ori_el', angle_el_pred)\n", + " print('ori_ro', angle_ro_pred)\n", + " print('alpha' , alpha_pred)\n", + "\n", + " # return pose_enc\n", + " return angle_az_pred, angle_el_pred, angle_ro_pred, alpha_pred, pose_enc" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "def vis_distribution(image_paths, dists, titles=None, save_path=None):\n", + " dists = dists.cpu()\n", + " n_samples = len(image_paths)\n", + " \n", + " # 创建子图:每行 2 列(img + plot),高度自适应\n", + " fig, axes = plt.subplots(n_samples, 2, figsize=(10, 3 * n_samples))\n", + " if n_samples == 1:\n", + " axes = [axes] # 统一维度\n", + "\n", + " x = np.arange(360) # 0 到 359\n", + "\n", + " for row in range(n_samples):\n", + " img_path = image_paths[row]\n", + " ax_img = axes[row][0] if n_samples > 1 else axes[0][0]\n", + " ax_plot = axes[row][1] if n_samples > 1 else axes[0][1]\n", + "\n", + " # --- 显示图像 ---\n", + " img = plt.imread(img_path)\n", + " ax_img.imshow(img)\n", + " ax_img.set_title(titles[row] if titles else f\"Image {row+1}\")\n", + " ax_img.axis('on') # 保留坐标轴(显示刻度和边框)\n", + " ax_img.set_xticks([])\n", + " ax_img.set_yticks([])\n", + " # 可选:保留边框\n", + " for spine in ax_img.spines.values():\n", + " spine.set_linewidth(1.5)\n", + " spine.set_color('black')\n", + "\n", + " # --- 显示分布 ---\n", + " ref_dis = dists[row][:360].float().numpy()\n", + " ax_plot.plot(x, ref_dis, color='blue', linewidth=1.5)\n", + "\n", + " ax_plot.set_title(f\"Azimuth {row+1}\")\n", + " ax_plot.set_xlabel(\"Angle (degrees)\")\n", + " ax_plot.set_ylabel(\"Value\")\n", + " ax_plot.grid(True, alpha=0.3)\n", + "\n", + " plt.tight_layout()\n", + " if save_path != None:\n", + " plt.savefig(save_path, format='jpg', dpi=300, bbox_inches='tight')\n", + " plt.show()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "1.0 6.335738976087225 0.3758221538851055 0.46655786181987247\n", + "ori_az [349.]\n", + "ori_el [11.]\n", + "ori_ro [0.]\n", + "alpha tensor([0.])\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA60AAAEiCAYAAAAS3DT7AAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjcsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvTLEjVAAAAAlwSFlzAAAPYQAAD2EBqD+naQABAABJREFUeJzs3Xm8JVV56P3fGmrYw5l6BhqaSRAQQSXiEITEgdw4BG9AiSFBFMXkBY1o9KMhEtSoUaN4jYLGiBE7FydQQ6IoiMMFjQoaEYKgMjY9nXFPNa3h/aPOOd2nB+hGoAfWt7s+u3btqtqratc5p5691nqW8N57giAIgiAIgiAIgmA3JHd1AYIgCIIgCIIgCIJge0LQGgRBEARBEARBEOy2QtAaBEEQBEEQBEEQ7LZC0BoEQRAEQRAEQRDstkLQGgRBEARBEARBEOy2QtAaBEEQBEEQBEEQ7LZC0BoEQRAEQRAEQRDstkLQGgRBEARBEARBEOy2QtAaBEEQBEEQBEEQ7LZ2WdD6d3/3dwghHnK9z3zmMwgh5qfx8fHHoHQP37HHHjtf1he96EWP2H5POukkTjrppEdsf7vDef3Zz362oAxf+tKXHvMyPJSTTjrpUfk8v/KVryw49p/85CePyH7nPte77777YW/7SJXl8ajX67Fs2TJWr149v2xXXEPPeMYzeMtb3vKIvVcQBMFj4Tvf+Q5CCL7zne/s6qIsMHfPurvfgwbB3myPqWn98Ic/zOWXX87Q0ND8squuuoqTTz6ZfffdlyRJWLlyJaeeeiq/+MUvHnRfv/71r0nTdLs36N/61rf43d/9XZrNJmNjY5x66qk7HAS85z3v4fLLL2fJkiU7dXy7yrbO6y9/+Uve+MY38qxnPWv+PG3v+D//+c9zxhln8IQnPAEhxE4F1qtWreLyyy/n7W9/+295FDvu4x//OJ/5zGd2apsnPvGJXH755bz5zW/e6rWvfe1rPPWpTyVNUw444AAuvPBCjDEPuc/jjjuOyy+/nNe+9rU7VZa9xXve8x6+8pWv7OpiPOI+8pGPMDQ0xOmnn75g+WN9Db31rW/lYx/7GOvWrXv4BxMEQbATPv7xjyOE4Pjjj9/VRXnYHq2/TTtzXxUEwbbtsqD1ggsuIMuyHV7/lFNO4YwzziBJkvllt9xyC2NjY7zhDW/g4x//OH/xF3/BT3/6U57+9Kfz3//939vd1xvf+Ea01tt87eqrr+YP/uAPKIqC973vfbzpTW/iu9/9Lr/7u7/Lxo0bH7Kcf/iHf8gZZ5xBq9Xa4WPblbZ1Xn/wgx/wf/7P/6Hb7XLEEUc86PaXXHIJX/3qV9l///0ZGxvbqfceGxvjjDPO4PnPf/7DKvvD8XCC1uXLl3PGGWdsFZB//etf55RTTmF0dJSPfvSjnHLKKbz73e/mvPPOe8h9rly5kjPOOINnPvOZO1WWvcXeGLRWVcVHPvIRzj77bJRSC157rK+hP/qjP2J4eJiPf/zjv9UxBUEQ7KjVq1dz4IEH8qMf/Yhf/epXD2sfz3nOc8iyjOc85zmPcOl2zKP1t2ln7quCINi2bUduj8Uba73dwHFHveMd79hq2dlnn83KlSu55JJLuPTSS7d6/ZprruGaa67hLW95C+9+97u3ev2tb30rBx98MDfccANxHAPw4he/mKc+9am8733v4x//8R9/qzLvCV7ykpcwPT3N0NAQH/zgB/nZz3623XUvv/xy9ttvP6SUPOlJT3rsCjmr3+/vsi8I3vzmN/PkJz+Zb37zm/PX8vDwMO95z3t4wxvewBOf+MRdUq7gseO9J89zGo0GV199NRs3buRlL3vZDm//aF1DUkpOPfVUPvvZz3LRRRftUFeMIAiCh+uuu+7ixhtv5Morr+Scc85h9erVXHjhhTu9HyklaZo+CiXctXbmvioIgm3bqZrWe+65h7/8y7/k8MMPp9FosHjxYk477bStmjhs3s9qy2lu3W31aR0fH+f2229nMBg87ANatmwZzWaT6enprV6rqoo3vOENvOENb+CQQw7Z6vXJyUluu+02XvrSl84HrADHHHMMRxxxBFdcccXDLtf2OOe4+OKLOeqoo0jTlOXLl3POOecwNTX1kNsWRcGFF17IoYceSpIk7L///rzlLW+hKIoF6+3seV20aNGC5sIPZv/990fKx6bCfu6aue2223jFK17B2NgYv/u7vwuAMYZ3vetdHHLIISRJwoEHHsjb3/72BefiwAMP5NZbb+W73/3u/PW4ec3Xr3/9a37961/vUFluu+02brvtNl772tcu+PLlL//yL/HeP+L9c3/+85/zyle+koMPPpg0TVmxYgWvetWrmJiYeMhtDzzwQF70ohfxzW9+k2OPPZY0TTnyyCO58sort7l+URScf/75LF26lFarxUtf+tKtWhl89atf5YUvfOF80/xDDjmEd73rXVhrF6w3GAy4/fbbF/QDEkLQ7/f513/91/nP4ZWvfOX862vWrOFVr3oVy5cvJ0kSjjrqKD796U8v2O9cv6cvfOEL/P3f/z0rV64kTVOe+9znbvUN/5133skf//Efs2LFCtI0ZeXKlZx++unMzMzMr7Mj18/m5/Kaa67huOOOo9Fo8IlPfAKo+5geeOCB2/zdsi2P9jX0/Oc/n3vuuSfcHAVB8KhbvXo1Y2NjvPCFL+TUU09d0K9/zub9+7ec5lpAbatP60knncSTnvQkfv7zn3PiiSfSbDY59NBD539Hfve73+X444+n0Whw+OGHc+211y5431e+8pUceOCBW5Vny/vQh/rbBDA9Pc0rX/lKRkdHGRkZ4ayzztqhe6udua8KgmDbdqqq88c//jE33ngjp59+OitXruTuu+/mkksu4aSTTuK2226j2WwCde3bli644AI2bNhAu93e7v7/6Z/+iYsuuojrr79+p/pGTk9PU1UV69at4+KLL6bT6fDc5z53q/UuvvhipqamuOCCC7Z5wz53g9poNLZ6rdlscuutt7Ju3TpWrFixw2V7KOeccw6f+cxnOOuss3j961/PXXfdxT/90z/x05/+lBtuuIEoira5nXOOl7zkJfy///f/eO1rX8sRRxzBLbfcwoc//GHuuOOOBc1bHu553V2ddtppPOEJT+A973kP3nugrmH/13/9V0499VTe9KY38V//9V+8973v5X/+53+46qqrgPrzP++882i32/zN3/wNUDfbnDN3zexIP5Of/vSnQN2vcHP77rsvK1eunH/9kfKtb32L3/zmN5x11lmsWLGCW2+9lU9+8pPceuut/PCHP3zImrQ777yTl7/85bzuda/jzDPP5LLLLuO0007jG9/4xlbNs8877zzGxsa48MILufvuu7n44os599xz+fznPz+/zmc+8xna7Tbnn38+7Xabb3/727zjHe+g0+nwgQ98YH69H/3oR/ze7/0eF154IX/3d38H1L8fzj77bJ7+9KfP98ecC/TWr1/PM57xDIQQnHvuuSxdupSvf/3rvPrVr6bT6fBXf/VXC8r6vve9Dyklb37zm5mZmeH9738/f/qnf8p//dd/AVCWJSeffDJFUXDeeeexYsUK1qxZw9VXX8309DQjIyPAjl0/c375y1/yJ3/yJ5xzzjm85jWv4fDDDwfgxhtv5KlPfeqOfJzAo38NPe1pTwPghhtu4ClPecpvta8gCIIHs3r1av73//7fxHHMn/zJn3DJJZfw4x//mN/5nd+ZX+dv/uZvOPvssxds97nPfY5rrrmGZcuWPej+p6ameNGLXsTpp5/OaaedxiWXXMLpp5/O6tWr+au/+ite97rX8YpXvIIPfOADnHrqqdx33307HSQ+2N+mOS972cs46KCDeO9738vNN9/Mpz71KZYtW8Y//MM/7NR7BUHwMPidMBgMtlr2gx/8wAP+s5/97Ha3e//737/VOhdeeKHf8u3nll1//fXzyy677DIP+Lvuumu7+z/88MM94AHfbrf9BRdc4K21C9ZZu3atHxoa8p/4xCcW7PfHP/7x/DrWWj86Ouqf+9znLth2fHzct1otD/if/OQn2y3H5latWuVf+MIXPug63//+9z3gV69evWD5N77xja2Wn3jiif7EE0+cf3755Zd7KaX//ve/v2DbSy+91AP+hhtumF/2cM+r995/4AMf2KH1vPf+qKOOWlDGHXX99dd7wH/xi1980PXmjuNP/uRPFiz/2c9+5gF/9tlnL1j+5je/2QP+29/+9g6VcdWqVX7VqlULlm153ufMnZd77713q9d+53d+xz/jGc940GOZs63rcFu29bP3f//v//WA/973vrfV/jb/vFatWuUB/+Uvf3l+2czMjN9nn338U57ylK22fd7znuedc/PL3/jGN3qllJ+enn7Q8pxzzjm+2Wz6PM/nl819thdeeOGCdVutlj/zzDO32serX/1qv88++/jx8fEFy08//XQ/MjIy/75z+z3iiCN8URTz633kIx/xgL/lllu8997/9Kc/fchra2eun7lz+Y1vfGPBulVVeSGEf9Ob3rTV/nflNRTHsf+Lv/iLHdpPEATBw/GTn/zEA/5b3/qW995755xfuXKlf8Mb3vCg291www0+iiL/qle9an7Z3O/2ze9XTjzxRA/4f/u3f5tfdvvtt3vASyn9D3/4w/nl11xzjQf8ZZddNr/szDPP3Opvu/fbvg/d3t+muXU3L6v33r/0pS/1ixcvftDj3NLO3FcFQbDJTrXr3LwGsqoqJiYmOPTQQxkdHeXmm2/e5jbXX389b3vb2zjvvPP4sz/7swfd/9/93d/hvd/p2sDLLruMb3zjG3z84x/niCOOIMuyrZopzvVV3fJbvs1JKTnnnHO47rrreNvb3sadd97JTTfdxMte9jLKsgTYqeRRD+WLX/wiIyMjPP/5z2d8fHx+etrTnka73eb6669/0G2POOIInvjEJy7Y9vd///cBFmz7cM/r7up1r3vdguf/+Z//CcD555+/YPmb3vQmAP7jP/5jh/Z7991373A2v7nrYPMEVnPSNH1ErxNY+LOX5znj4+M84xnPANjuz97m9t13X1760pfOPx8eHubP//zP+elPf7pVhtnXvva1C2puTzjhBKy13HPPPdssT7fbZXx8nBNOOGG+OfCck046Ce/9fC3rg/He8+Uvf5kXv/jFeO8XXNcnn3wyMzMzWx3rWWedtaAp/wknnADAb37zG4D5mtRrrrlmu024dvb6Oeiggzj55JMXLJucnMR7v1PJyB6La2hsbCwM0RAEwaNq9erVLF++nN/7vd8D6ma2L3/5y7niiiu2uhebs27dOk499VSOPfbYHUoY1263F2RlP/zwwxkdHeWII45YkK14bn7ub8Ajbcv7jxNOOIGJiQk6nc6j8n5BEGyyU82Dsyzjve99L5dddhlr1qyZb5oJLOgfNuf+++/n5S9/Oc9+9rP50Ic+9NuXdjs2z555+umnz2dm++AHPwjAD3/4Qy6//HKuu+66h+x/+c53vpPx8XHe//738773vQ+AF7zgBbz61a/m0ksvfdDmzTvrzjvvZGZmZrvNYjZs2PCg2/7P//wPS5cu3elt93QHHXTQguf33HMPUkoOPfTQBctXrFjB6OjogmDrkTIXtG3Z5xGYT8zzSJqcnOSiiy7iiiuu2Oqz3dbP3pYOPfTQrZoQH3bYYUAdrG/e5P2AAw5YsN5cILZ5P+tbb72VCy64gG9/+9tb/bHekfJsy8aNG5menuaTn/wkn/zkJ7e5zpbH/lBlPeiggzj//PP50Ic+xOrVqznhhBN4yUtewhlnnDEf0O7s9bPl9be5zX8nPpTH4hry3ockTEEQPGqstVxxxRX83u/9Hnfdddf88uOPP55//Md/5LrrruMFL3jBgm2MMbzsZS/DWsuVV165zS/utrRy5cqtfpeNjIyw//77b7UM2KG8IA/Hg/3NGR4eflTeMwiC2k4Freeddx6XXXYZf/VXf8Uzn/lMRkZGEEJw+umn45xbsG5Zlpx66qkkScIXvvCF3zpT8I4aGxvj93//91m9evV80PqWt7yFE044gYMOOmi+Jm2u9mHt2rXce++987+I4jjmU5/6FH//93/PHXfcwfLlyznssMN4xStesc0b29+Gc45ly5ZtM2EBsN2AdG7bo48+ertfBmz5i3xvsr2b+cfy5nyfffYB6utny3O9du1anv70pz+i7/eyl72MG2+8kb/+67/m2GOPpd1u45zjD/7gD7b62fttbTlcy5y5gGx6epoTTzyR4eFh3vnOd3LIIYeQpik333wzb33rWx92eea2O+OMMzjzzDO3uc6Tn/zknSorwD/+4z/yyle+kq9+9at885vf5PWvfz3vfe97+eEPf8jKlSvn19vR62db19+iRYsQQuzUjdJjcQ1NT0/vMWNGB0Gw5/n2t7/N2rVrueKKK7aZrHL16tVbBa1//dd/zQ9+8AOuvfbaBb+DH8z2ftfvyN+A7f1u314t8MMpx858YRkEwcOzU5Hkl770Jc4888wFw77keb7NTL2vf/3r+dnPfsb3vve9BcluHgtZli2o7bn33nu55557tllD8pKXvISRkZGtjmH58uXz5bbW8p3vfIfjjz/+Ea1pPeSQQ7j22mt59rOfvdO1Kocccgj//d//zXOf+9zHfU3KqlWrcM5x5513Lhj/bP369UxPT7Nq1ar5ZY/UuTr22GMB+MlPfrIguHjggQe4//7755M4PBKmpqa47rrruOiiixYM83TnnXfu8D5+9atfbVXrdscddwBsM6vig/nOd77DxMQEV1555YKx9Db/lv2hbOtzWLp0KUNDQ1hred7znrdTZXooRx99NEcffTQXXHABN954I89+9rO59NJLefe7371T18/2aK055JBDduocPNrX0Jo1ayjLMowJGATBo2b16tUsW7aMj33sY1u9duWVV3LVVVdx6aWXzt/jXHHFFVx88cVcfPHFnHjiiY9JGcfGxrZ5n7qtVliP9/upINid7VSfVqXUVt8mffSjH93q26rLLruMT3ziE3zsYx/bqdqCnR2aZVtNYO+++26uu+66BRk5P/nJT3LVVVctmM477zygbkK8vZrOOR/84AdZu3btfB+3OTszRMq2zDWPede73rXVa8aYbf6S3XzbNWvW8M///M9bvZZlGf1+f/75IzGU0G9rW0OfPFL+8A//EKizA29urhb6hS984fyyVqu13fO6M5/nUUcdxROf+EQ++clPLrj+L7nkEoQQnHrqqfPLZmZmuP322x92s9m5b3a3/Nnb8ngfzAMPPLAgC26n0+Gzn/0sxx577E5nw95Wecqy3Ga/pO197tv6HJRS/PEf/zFf/vKX+cUvfrHVvrYcdmdHdDodjDELlh199NFIKeeb5e7M9fNgnvnMZ/KTn/xkh8v2aF9DN910EwDPetazdnibIAiCHZVlGVdeeSUvetGLOPXUU7eazj33XLrdLl/72tcA+MUvfsHZZ5/NGWecwRve8IbHrJyHHHIIMzMz/PznP59ftnbt2q0yw8OD3yMEQbBr7VRN64te9CIuv/xyRkZGOPLII+ebdyxevHh+nfHxcf7yL/+SI488kiRJ+NznPrdgHy996UtptVrb3P/ODs1y9NFH89znPpdjjz2WsbEx7rzzTv7lX/6Fqqrm+6MCWzVNAeZ/KZ144okLAtzPfe5zfPnLX+Y5z3kO7Xaba6+9li984QucffbZ/PEf//GCfezMECnbcuKJJ3LOOefw3ve+l5/97Ge84AUvIIoi7rzzTr74xS/ykY98ZMGN6+b+7M/+jC984Qu87nWv4/rrr+fZz3421lpuv/12vvCFL8yPIwk7f15nZmb46Ec/CtTDZcztY3R0lNHRUc4999z5db/3ve/xve99D6iDin6/z7vf/W4AnvOc58zXxG1r6JNHyjHHHMOZZ57JJz/5yfmmqz/60Y/413/9V0455ZT55BBQDwNyySWX8O53v5tDDz2UZcuWzSev2tnP8wMf+AAveclLeMELXsDpp5/OL37xC/7pn/6Js88+e0Ht1lVXXcVZZ53FZZddttWYbztieHiY5zznObz//e+nqir2228/vvnNb+5Urd5hhx3Gq1/9an784x+zfPlyPv3pT7N+/Xouu+yynS7Ps571LMbGxjjzzDN5/etfjxCCyy+/fJvNo7b3uT/taU/j2muv5UMf+hD77rsvBx10EMcffzzve9/7uP766zn++ON5zWtew5FHHsnk5CQ333wz1157LZOTkztV1m9/+9uce+65nHbaaRx22GEYY7j88svnA2TYuevnwfzRH/0Rl19+OXfcccd8f+GH8mheQ9/61rc44IADwnA3QRA8Kr72ta/R7XZ5yUtess3Xn/GMZ7B06VJWr17Ny1/+cs466yygvjfY8t7wWc96FgcffPCjUs7TTz+dt771rbz0pS/l9a9/PYPBgEsuuYTDDjtsq+R+2/vb9NvamfuqIAi2baeC1o985CMopVi9ejV5nvPsZz+ba6+9dkEmzV6vR57n3HbbbdvMFnzXXXdtN2jdWX/xF3/Bf/zHf/CNb3yDbrfLsmXLeMELXsDb3/52jj766Ie1z8MOO4zJyUne9a53kWUZhx9+OJdeeukj2txzc5deeilPe9rT+MQnPsHb3/52tNYceOCBnHHGGTz72c/e7nZSSr7yla/w4Q9/mM9+9rNcddVVNJtNDj74YN7whjfs8E3ztkxNTfG3f/u3C5bNNQlftWrVgl+u3/72t7nooosWrDu37YUXXrig+eij6VOf+hQHH3wwn/nMZ7jqqqtYsWIFb3vb27jwwgsXrPeOd7yDe+65h/e///10u11OPPHE+aB1Z73oRS/iyiuv5KKLLuK8885j6dKlvP3tb1/QhPeR8m//9m+cd955fOxjH8N7zwte8AK+/vWvs+++++7Q9k94whP46Ec/yl//9V/zy1/+koMOOojPf/7zW2XB3RGLFy/m6quv5k1vehMXXHABY2NjnHHGGTz3uc/d4f196EMf4rWvfS0XXHABWZZx5plncvzxx7N8+XJ+9KMf8c53vpMrr7ySj3/84yxevJijjjrqYY2Dd8wxx3DyySfz7//+76xZs4Zms8kxxxzD17/+9fnsy7Dj18+DefGLX8ySJUv4whe+wAUXXLBD2zxa15Bzji9/+cu8+tWvDs3dgiB4VKxevZo0Tbca63uOlJIXvvCFrF69momJifkvtrd1P3XZZZc9akHr4sWLueqqqzj//PN5y1veMj/O6p133rlV0Lq9v02/rZ25rwqCYNuE3817j3/mM5/hrLPO4uabb2b//fdn8eLFu/VN2PT0NMYYnvrUp/LkJz+Zq6++elcXaZt2h/NqrWVqaoobbriBU045hS9+8YvbrVneVU466SSqquKrX/0qcRw/YtkBy7Kk0+lwxRVXcN555/HjH/94QY3/I+nAAw/kSU960m57Le5N3vWud3HZZZdx5513zjej3hXX0Fe+8hVe8YpX8Otf/3o+4VMQBEEQBMGeaqf6tO5KT33qU1m6dCkTExO7uigP6qSTTmLp0qXcd999u7ooO2RXntdbbrmFpUuXcsoppzzm770zbrzxRpYuXcorXvGKR2yf//mf/8nSpUvn+1YHe4c3vvGN9Hq9rbJoPtbX0D/8wz9w7rnnhoA1CIIgCIK9wm5f07p27VpuvfXW+ecnnngiURTtwhI9uP/6r/+i2+0CdTbUY445ZheXaNt2h/Pa6/X44Q9/OP/8yU9+8nbHrN1VbrrppvlhTB7Jz3Pjxo3893//9/zz448/nqGhoUdk31sKNa271t5wDQVBEARBEOxKu33QGgTBbycErUEQBEEQBMGeLAStQRAEQRAEQRAEwW5rj+nTGgRBEARBEARBEDz+7NSQN3sb5xzj4+MANJvN3TorcRAEjx7vPYPBAIAlS5YgZfg+LwiCIAiCYHfxuA5ax8fHWb58+a4uRhAEu5H169fvdgnBgt2Tc44HHniAoaGh8KVnEARBEOwk7z3dbpd99933ISsMHtdBaxAEQRA8XA888AD777//ri5GEARBEOzR7rvvPlauXPmg6zyug9Zmszk/v379elqt1i4sTRAEu0q/359vdbH574UgeDBzQwzdd999DA8PPyL7dM6xceNGli5dutc3Uw/HuncKx7p3Cse6d9rVx9rpdNh///13aMi+x3XQunlzrlarFYLWIAhCM89gh81dK8PDw49o0JrnOcPDw4+Lm6VwrHufcKx7p3Cse6fd5Vh35N5r7/4kgiAIgiAIgiAIgj1aCFqDIAiCIAiCIAiC3VYIWoMgCIIgCIIgCILdVghagyAIgiAIgiAIgt1WCFqDIAiCIAiCIAiC3VYIWoMgCIIgCIIgCPYyv/41XH/9tl974AG49trHtjy/jRC0BkEQBEEQBEEQ7EWmpuC44+D3fx9uumnr1489Fk4+WXLDDfFjXraHIwStQRAEQRAEQRAEe5ELLoDp6Xr+y19e+NqaNbBxYz3/3e+GoDUIgiAIgiAIgiB4DPV68IlPbHp+9dULX9/8+czMnhEO7hmlDIIgCIIgCIIgCB7S2rVgbT0vJdxyC9xzT/3885+H171u07q/+Y0C4Etfgttue4wLuhP0ri5AEARBEARBEARB8MjYsKF+PPhg2G8/+P734d//HVatgtNPX7jur3+tue46OO00OPRQuOMOEOKxL/NDCTWtQRAEQRAEQRAEe4m5oHXZMnjxi+v5L34RXv/6ev6AA+B976vn165VfO5zdZT6q1/BL37xGBd2B4WgNQDAb2Pa7gtbrbQz7+I2myx+weTw+K3ewm0xPejbP1R5d2R6FO0GRQiCIAiCIAj2YtsKWr/3Pbj7bli5Em69Fd76VliypL7r/OxnN1Wt/vu/P8aF3UEhaA3mg6Q6jNwsMPRzrzrwW0xza/nNgq3tRF/1rJ8NSt1sgGrwVAsmh8HhZtdgfjJbTHNldLPvOf++W0a0W0a7OzL9tlHjNs7BQwXhmx/rI1GEIAiCIAiC4PFr86D18MPrZr9zPvxhaLfr+cMP33rbf//3um/re94D/f6jX9YdFYLWAKiDpLnAaaugaVsRlN/+S9t/hy3Dtvpd/Rah6FyN66a5rWtf5+PpbZRxm0Hf9gLbx7h6c8sib68mOQiCIAiCIAgejs2DViHgpS+tn7/gBfDHf7xpvaOP3jR//PH1uj/8YT2269/8TV0bu7sIiZiCh0ngEXix+ZJt8PVyj6D+jsRv1rvbz77iEAjEbCTqZwPYet9iNoib26ZeTyAQMLsN85Gek3XgJ3z9dnOv12+5ZTjoZ/dfP9b/Z/f8KEWOYrNSzB3Rlt8c7YZ934MgCIIgCII9xFzQunx5/fi3f1v3Y33FKxYmWfrbv/WMjfVoNFqceabkoovg05+G9evr1y+5BF71KnjqUx/b8m9LqGkN5gm2FTAJEJL6UpmdhAQhtqolrCPEbVdf1qGgRKAQXs9O0ewUzz4qhAfpPdI7lHco79Heo3FoX08Kh/Qe4fym9sGufj+HwwiLkfWjlRYnLX6ryeClwUmDkxYrDVZYvPjt6zq92DTNxsJzIT4Cj8QjcfOPaotJzIb4QRDsnI997GMceOCBpGnK8ccfz49+9KPtrvuZz3wGIcSCKU3Tx7C0QRAEQfDo2LymFWBoCM49FxYtWrjeihXw+tf3+Zu/2ZScaWysfu3ww8E5+P/+v/pxVwtBazBvri50/nE7kdN8sCq2DE89Xrg68BPbuLq30UJYOIFwsq4i9Wq2qlTMTrPzm0eAzD3f3jHMBYdzwZ9FbNZjVGzVebVuhDy3bNsNkXd88sIv2M+mf/X7iPnJIrxDeLvF5BB+0xkNgmDHfP7zn+f888/nwgsv5Oabb+aYY47h5JNPZsPcX+5tGB4eZu3atfPTPXOD2AVBEATBHmzLoHVHLV0K118PV10F111X93394Q/hqKPq8V13pRC0BsCmWtbt1vBtES9uHrAu7I+5WR/U7cWX24r35qNgRd1qXQOb5v38pPBC4YXEiy1qf5GzdblsNkmk3zQJv/lzhWTzqa4PrsvtH9a0ecqpLf8tDIqZPejNTsqCrFJBEOyMD33oQ7zmNa/hrLPO4sgjj+TSSy+l2Wzy6U9/ervbCCFYsWLF/LR8rh1VEARBEOzBHm7QCnDMMXDKKfX4rhddVC+7/XY488xdm5gp9GkNZvtZ+s2+wZgNXbcTO/ntvOxgrqcpsOkbkW3141y4wy1Ls1lfWbHZ+4lNYd78vjfvHguAWtDfdb4r65aFnSvmXEIpseULm17e2ce5gNXWIfB8eT0SgdvsXGx+Zub2sNUJCYLgIZRlyU033cTb3va2+WVSSp73vOfxgx/8YLvb9Xo9Vq1ahXOOpz71qbznPe/hqKOO2u76RVFQFMX8806nA4BzDvcItZ1yzuG9f8T2tzsLx7p3Cse6dwrHunu75hq4+mrBBRd4Fi+GiYn6fnbJEvegTXsf6lhf//q69vXP/1xSFHDNNY5TTnnkyr0z5zgErQEwFy5tFlpuETftSBjlkAuq7ucDTLFZsOrnMiOx7UhWbLbybJA6F6wuqKMUCzfdPEAG+WDdaxfGhqJ+Sz87v61UTTvzWNc4b+qVOhdAu9lnc2dI4PFCbNYUWG4WgYcerUGwM8bHx7HWblVTunz5cm6//fZtbnP44Yfz6U9/mic/+cnMzMzwwQ9+kGc961nceuutrFy5cpvbvPe97+Wiua+dN7Nx40byPP/tD4T6D/jExAzee6TcuxtDOeeYmQnHurcJx7p3Cse6ezv//MXcfnvEV77i+OAHZ/B+EUJ4rN3Ag/SS2aFjff7z4dWvHuJf/qXFl76U86xndR6xcne73R1eNwStwUI7GKx6z2z/UJjPNuSZzQM8GwjC1v0zha/7u3o2JW6aC2S9ADTM15DWtb9Ogpyvs2T2vQRbhnjCeYSs37jetZ/LELWpnbJ3deGkqOPE2W0ls7mcNuvI65k7Jl8Xcza78JY1zXPrzfXrdd6DEHV4KhaG1X4uiPUe4akD17kDng/i50LdIAgeLc985jN55jOfOf/8Wc96FkcccQSf+MQneNe73rXNbd72trdx/vnnzz/vdDrsv//+LF26lOHh4d+6TGvWwJvfDMaM8ad/GnP11ZIPfMDPJ8XY2zjnEEKwdOnSPebG8OEKx7p3Cse6d9rTjrUs4Ve/qu8bH3hA8YpX1NmWliyBffZ58PbBO3qsp50G//Iv8O1vN1iyJOWROi07kwAxBK1BbSdapIrZINMaA8KhlQQvsV7WdYazQ9XMJUTy3uC9QQiPEA78XGIki3cW5wzOGLwBiBCz6aCEd3UAHCmEjhBCgPMIrUDqOiC1HqSq49KiBCXr9Ty4ssIbCyrCzyZ0EmUBWkMUIZRCaIlQEqRAzaeh2tR22Htwtg5EpZQI6bfbEXwuRrbGILRCC7kpIJ/dI0LgvMc6j7f1PpWSs69tWm/zxsJBEDy4JUuWoJRi/VyO/lnr169nxYoVO7SPKIp4ylOewq9+9avtrpMkCUmSbLVcSvmI3NisXw9f/KLH+wb/+Z+ePBcoJfjnf/6td73bEkI8YudvdxeOde8UjnXvtLsfa6cD55wDa9fCYYeBMXWG4Gc+E775zXqdJz5RIOVD303uyLH+3u/V+1+/XnDTTYLjj39kjmNnzm8IWoOd5qxFK0EUMRvsGpwHUymk1CgtZwPOueC0Al/gTIG1BX52wpXYIiPrdeh2Zsi7A1xWEcsYFcdESYyOY6IkQqoIJRXee6J2C5pDdcBa2ToIlbKuCu31kVGEzwuy6RnKfoErDa6y4Gd/IJVGxpqo0SBut1BDbWS7iYhSpE4RuoFQejZIdghr0FKi5n7w55vf+80e6nkFtKLZWlVvZ9vqz77vbDvkyliUVIhI1kmbZgPbuQbFC5pTB0HwkOI45mlPexrXXXcdp8x2tnHOcd1113Huuefu0D6stdxyyy384R/+4aNY0gd33HHw2tfCJz4BeV7/FvjUp+A1r4GnP32XFSsIgiDYzfzHf8AVV9Tz3/1u/fikJ9XLv/UtmJ6uA81HShzDxRfD/vvDU57yyO13Z4SgNXhQ890/N6sx1MoDBnwFrqJOMSRJowS8wLsS70qcM3hf1etRMRhfy2ByHUU2Q5F1sVWGcwWuqrBlgc9zfLdP4TYNRFMPwepxzuMF9Dt9kiRBCYWrDMI6pJQ4YzHG4Z1mbHQxJi/oT3eQXtQBq/E00gZ5XuCERMYRPorwWqEaCe2xUZqLltBcsYrWfgeTjC4mGWojlUar2QO3Jd77Bedimyx1E1+vUBLwsl5W92Ql0jHIuha3Mh5jHQiJ0gKl6mbDYnvjDQVBsE3nn38+Z555JscddxxPf/rTufjii+n3+5x11lkA/Pmf/zn77bcf733vewF45zvfyTOe8QwOPfRQpqen+cAHPsA999zD2WefvSsPg3e/23PllY7paclxxwl+8IP6xiQErUEQBMGcBx7YetmTnlTX4/yv//XovOerXvXo7HdHhaA12MJsNf02Ehc545C6rkHEl4gqA1eAliAVrpoN6rodutMT9GbGyfsdbJUhXI4p+rhqAL5A+gqJQbgCm2eU/T7kA3ReYIucvCgpqhJrLUiJEhqtNfsuXoQvu/S7PWxe0kxbCG+ZnpxhMNOnkQ4RDZZDVsDEJEpoZOWwZUXUGma0NUTlPNl0RSlBxBGqmWKn1zJxf8y9d96BX7KSRfsewP77r2RsyRJkktSBqjHYqqI3s6kDuhB1Jqe5IFMIwdDoaN1kOYoQUYLQMegIIRVCKCizeugbGRHriDhSm0b9cWwakjYIgh328pe/nI0bN/KOd7yDdevWceyxx/KNb3xjPjnTvffeu6AZ0tTUFK95zWtYt24dY2NjPO1pT+PGG2/kyCOP3FWHANQDv19zzQRpuoQbbqiD1ltv3aVFCoIgCHYzW/SGAeDoox/7cjyWQtAazHqQSGk+4a/DFjnSFYjIQ+TrLLnVAAYD1t/xa8penzzrk2VdbJGBq1DCEAmHlhZhS3yV4U3dRNhWOWW/S97r4vMBDWugKsE4lLNoIdBaE+kY7TXmgS5axciZDq6saAyPIr2n7M0gi4IRKRjNYvozA8qpCSKhsXlJPsjxSYuq0SBKG6RJTCUF2cBie4p0qM3o4iXEi9qofcZwicVsvI8H1v6GbJAxyAZUZYXH02q25k/KViPpeFBSItHoOCZptRkeHaO9ZDksWgxJA6kUqAgpHWDACSrr8UKilQrNg4PgYTr33HO32xz4O9/5zoLnH/7wh/nwhz/8GJRq5+2zj2PZMpiZqZ//6Edwxhlwwgl1H6YgCILg8W3duvrxJS+Br32tnj/iiF1XnsdCCFqDHSI8CCWRKqJuvFvgiwG2s5H+zCRlfwY5mKRhC2JZksoBBT3yvEs56FPkGSOtlEhB3u+SD7qYIsOZEluVUBaIqsBagxKeSAgiZF3DWZb4oqTy0IhiFo+1GRkapugPaDKb+UxpkiUtysJClSNcRqI8TQVSpVRqtjbUlmir0CiEkDjlEYlibLTN2H7LcaMNuvk0RdGlrAxFWWDKilgKhqKIKI5ppr4OVp3Heoe3rs4w7OrxWRMRU1UZ5cDR2biWCQ9ORxCl2DjiiCc/hcbSFcjRUdAxwgqwFq3j2YzDipCKKQiCuUrf6WlYvbqeQtAaBEEQzAWtp5xSt8aZmqrzIuzNQtC6F9qq9m9HN9hqkNItx7+xdU9TU1JNb2Biwz1MT6yjHHTRpo8ajJPYEikcOIcuC5IyQ1UDjMnprx+nbw0aRyoEWgi8UOSVo9/PyfMMKzxCCJRSKCXRWiOFREqB8JKqtEhilABvcioPJqso+gYlU+TwMD1nGSiomhFCRCQyxkURWT9n35X7Migr1s5MQ5KwZP+VrFh1AI2RNrlz9LqTVDpBCIn2IJwl8Q4pFInwJErSVAZXp1CisgbnLcZZnLHgPL3peswp5yVaSISMsFZjTI4vNTdc+3WGli5nv4OfwD6rDiZavASdxAjhQdjZz0HND62z5QcjFjzdbIieLT7oEPIGwZ5teBhWrYJ77tm0bHKybkIcBEEQPH7NBa377lu3xqkqGB3dpUV61IWgdS/xYKmBth3E+oWv+3qQmk2L5wYedXjvgAoGM+QzG5jacB/dyfWU2RTeZAhbIdyAuOiiTIavKqqqwJQFVZFjigJnKmxRYouSvKyTL+F8nZrIGKrS4JWi0WpjnMUYUweoxhNFmkYUo5SiKCrysiDv5+R5SdRugG5gXUlRwVCjTVb08QnoWBInilajgbcePw0bXQcjJXJRi6Q9RLQooUoLiD1WCYRRSOMRMp7t/yZQMiVNUxrNNk5pOuvX45zDuoqqqrCVoaqKuv+tcygvKEtDUZTkpcN5hUwa6EYblTRY0RqmO76BOzduZN1v7mTlwYew9MADiUZGESoC3QKVbvoIBHjcwgbccy/MfqJzAaudXbp7JmgPgmBnHX74wqD1jjvgGc/YdeUJgiAIdr25Pq0rVjx+vsgMQeteyLHtsT63qnnzbrN5Ua/v2TSSi6+HqqG7Ed/ZyMTaexCmS1QOWGRzkBmF6zHodaj6HUzeBVNibYm1Fd4ZlPdE0iMTSQmUeHJrGfT7GGNopk2GRodpCklRebxIiHWELwf0uzNI70jaEUJLiiJDJoLxmXGG0iHSkTalEyzbdz8GBsaWLqXT6yOEwZGRjjiSEYONcpRMSIah16kY9EDKlOElKY2lEa49Qz/qQSIZDFL62RBDwysYajWJoxQtI2wFU90OWX+Atw5jDGWRk+cDqiLDVMVstmSPQiCFYLaBMzbPKDpdCj+OlzHomKjdZqTdxhR97tnwAFO/+R8OfMKhtA49DIZjcDEGgcGCrEeulaIORtXch+xFnZlYCFD1UzP7cUZz6wVBsEdzbuHzELQGQRA8vhkDGzfW8zs4FPleIQSte6lttfjdZnNR78FanIOydDSaKfVwNgX0J+ivv4fB1Fp8Pk1LgzMdTD5NlfexRUY56FH0OlT9Pv3pGWLv0ZGcHZLUUpqcohhQmZIkTYiiCJVI0qGEIoeKim7Zq/tz6hSkpPIGKyy6oZA4rM8xQiISVzfBleCSFFs44liRywzRAKstCIOgxIoBPvK4hsRFHkNGL3eUSQNBilYa1RLYqKSwM/R66yl6BVLtR9pegleCgSnJrSOSMVrGyDhC2Zh1960hSWOajYTRsRYKcLakKDNcZej3uwx6Gb2pSVxhSURCWzdQaPBQliU6y+raUAmZsEz3JrmrN8HQA/ez5LCn0Vp1JLrZBO/IqhKkJNXJ9j9kv9VsEAR7gXe8A669dtPzX/5y15UlCIIg2PU2bqxv36WEJUt2dWkeOyFo3QvN1bJuyQHSe5z3eOdml3ik8EhliHUffAdvC4qp9Wxcew/9ibVI06epLJ1BH1t0qLIOtswRth6HVccFygnyPMZlJZ1un6LIqEyOEwYlQUqB9jESh9CCpJkglCDPS/IqJxKeRqLQWlJUOdKXxFFd14hwOAVJFFHlBc57ZBpRuIpGS2K0QA2VkPTRrsLZelxYH4GMG4gErBU4ZVCRwbkCkBSVRmY5NiqwQiJ0Ax21iJIGcZyilEY4Ac5TmYIyr6iKgtGxIbQUxJFESwfeImaH8PFUDGamiYRixegIwitEJfAluBIsnjLPKE0JRUalFJUWiCpnxlf0Bzm9MmJZIRk96CCi4RapgNI7HBaJwiMQYraGdYvq9NA0OAj2LiecUPdjvewyeNOb6prWIAiC4PFrrj/r0qWgHkfN6kLQupfYPFCdi2G2Clw9OO9n25s56iFGJUJ4PBYpC6reeqY2rKMzvoEymyYWBc3EkypLaQqsz7DVgCLv4qsKW5WYqsKXDm9BSEHUaCITTWRiyiqnrDKKqkAajVN1H1mlJVEzwkqHKxxeOZwweCnQkcMqB94ghEFJj1BghcXHFeDxqcGUObrtsHaAHjK4uKTVbiAKi5pJEDGIJEanCiUgNVBIW+8P8EpgfIoHZKTQSYO40SZNW0RRA4nEVHX/WldainxAlQ9I4whnDFlZ4suibg7tKpypkM7TADQSURlcXlJlBukUjbhFkjTIpjqUWUHle9hIQxqjhxvoSCNVzvS995APHCbrs+QJhxAtXkQkFZY6b7NAoeRsD9e5oFXMDrcjQuAaBHubsTF44hPr+S99qR5A/vvfr5cHQRAEjy+b92d9PAlB617DzQaroq6Jm10634LUzzYlQCCkoO4lWSdCAgs2o5y6j/7MGnqTG/Fln4awxNoiTUGZD0ikQEiFVRpUgvMKJzRKVDgs1niEFHWw6SReCpR0KGnroFR5CltQ2YpIaBqNBkmUIFKBdx4vHMZVxLFGOEWRG4RwxEmEkJ6s7KMigZYSqx00gCaYykACXkOrPYTINF2ZIxoGn8SIVBDHGq8VXnWIjCOONEnqEZGhwlN6S2kLtPNIFWEdlFWJKSuwDi0EzVTjVFonj8JhbUVpc4pBhyLvU+YDfOlY0liM6fUpejnOeGKZoEWMyXuUto/OK7RxGGtwSkBVYo2hGBT4aEAV9Sk6ObIqEaZg8RMOJVq6HBkleC8wvh7TVUhQ82Po1p/14+gLtyB4XDn88E3zt95aNxk+7bRdV54gCIJg15iraQ1Ba7CH21Tfunnt6/xSMVcx58FbsAZMgS1nmFhzD95Mo4ocX5VQVZRljjAloqoYn5rAFBkmr7MBCw/CeYwx2Mox1GhjvaEo6sy5pc3rGlwdkaQKpKOyOcY7wKB9RRRH6CjCWYetwJSeSOg6gEUhPHilcVhy42k0IkgicuGhGWMiiVd1EO50hGulqDgmkRXEHtGU2LhCpxBpj6xmwFl0VIByOKHxIkKrmEg18E7Q7fSJo4hISmIdoyJf/6AYgROeRAuqzDAoDK7q48oBVAOUN0RSUk51UCahRQxSIKzA5VXdZLr0GFMP6xMJgYoUopSAw5c5VlboVBCJmHLtA6y1Ff1ej6VPPILhlauQcYrxIOrvHPB1DqZNeYT9ps85CIK9x4EHwlFH1QErwE03haA1CILg8ejee+vH5ct3bTkeayFo3Su4zTIBC4QQWzUNlswFrB6BA2dwZUbV71MOZqj6GygmZ4go0BZ8KXCZxVeCyCdEMmX5yBC2yCnTEldV2MpSlSWZyyj9gOnpPkpapFakrTaamMoMKKo+WVmiI4+KNbGQeGHJKTHOoZQCAc5KHBFFoUBECNkCYSkqsF5gRYoREUk8ROkrGmmbUkaoOMZai4obzFiLjFP06FJEqkBXFL6PlwVe58iWqPvJSkNl+5RWgGiioxGiRGFygRlUNLWkGTVQSiC9w1cFeVGQd2folzmm6FP0Z6iyHlhDQwqSdoOmaDI900OXYLOSXjcn7w7wlSCNmzTSNv2pGSwSrxRECiKNSCJ0GhMlGqUscVlCv0exzjFRGpxXCCJa++5HnDbrLMGi7pWMnwtcN8/IJMNArUGwF1EKfvYz+NSn4C/+An7yk11doiAIguCxNj0NH/tYPX/ccbu0KI+5ELTuVWaDFs9sVdymZsL1o0NgwRuwBb7oU3Qn6E1tpOptxHS7RDi00qQ+RUQxHoswFmE9WbeHMwppUgQpSniErHAkOKkhBkuB8SWmMjhf4YRHRglponBUqNnxU6uqIMsHFFVFLGK00nWNqpDklUUqgVApXlTkpsR4h1AxRkQQJ5S5pZEmVEKi4ghXgYkk3aJPM40hSdGtlIqKIjcYV+GdQ6YxSSRJtK/7qw4seenIBgUM+rSUZ6w9QkM36uDaGFxVYosMlxVUg4yZyQ0oXyFtSeTAWYvNSvqdikHVpbumgExSDQy2tLjSY0tPp+hRVWtpNYZwUlE5T2ENRnh0mtAYbtMcajG2NKaRVPioJFIxebdPb80aJtMWutGmsbyBVmJ+eJv5KvX5AXc9oVdrEOx9tIanP72ev/nmustHaFURBEHw+HHhhbBhQ91l5JxzdnVpHlshaN1bODfb7neuZytY53DW4n3dFFh6i1YCfIkbdMgnN9LduI7e9Dgy77A4SRF5TtnP0FFKQ8d0BjO40pF1eyRRChV4L8B7sn5GpzcA54jSBk6UeDHbo9YL7GwFsCXHeofFYipfNyl2JVZ4dKQhUlQewCEVzMx0iNKI9lCDorIUZU7SiIkbKR5HbipEmjCd9YmjGBoRuhExsBlyuEER5ciogZEGoSRSNfHWgGwSaeh115NjiVSM0i2kjej3FIoGSXsEWQq6U5PgLFU+wJmKSDiKXpdy0CV2swF8kdNuxow2mjiZMOj1qDJL0SmJTANhFCYzlP0CW4K3AmcEFknlDNZDkqa0ogjnwExXdPvTpF5g+n0yKdCLFtNceQCikdFf8wBTSROlY+LFS9DSU1qPl6CErOPWsgLhkXH40Q6CvdGTngRxDFNTcNddcPDBu7pEQRAEwWPhv/8b/umf6vmPfrT+W/B4Eu5s9xZiix6svk7MVGeZpU685D24Ep91qaY2MJhYi+1N0iKnkYDK+1Bk+H6BVQW5lxQzPbSM0NZjqgyBpCwKet0BpamQUuKAfr9P7geI2BAnkihOUU5iC0OWVQyKLpYSnUha7Qbt4RGkFuRFTl7kCCRpnCA8RE0B0lC6jMJnFDbHlAWlihgdHUanGh1HGCxaK4g8TlRUrkJHAmKQaYTUMV4oMApTKZzRNKJhYm3AVNhS4m2DxA9D1ED4EbRtIYwjMo5ISAaVpzPTJS8zfFUQS2johKqCfnfATKdPoWf73lYGUUUcvP8h3P+r9UxPzJBGCUPNMTKXUxpHkqZYA94IvPeUVUEuKlACFUfEiaa7YYqoqSCJMSVIkTAUNYjTNuW6dczoiGVaI4dbJFJg/eznLzxCa7wz275GgiDY48UxPPnJdfPgH/84BK1BEASPF+efX9dRnXYaPP/5u7o0j70QtO4t5sbsnG8iyuxYnnXa4Lofa4XPu5jpDRTT6/C9caKqT+QrUkooOwymJ5menGao0aYRN4icweSOXm/AxPpxrIVe1ifLcnSkabbbCCUZlAOitkaKOrNthEBoSdxoMpoqhmgz1R2nN5ih7HQxOIZHh4gaLUoEWb+PqbqMtYYZiiOKqsS5DKUNsRZYLF5Bv5jBRSkN3UAnCp0I0BXWubrfqrR1DauXdcZkF1NVBluALyLyCkQ5gnaAUWgxTJquwOthbBmTmgQxsNhuH+csVdZD5wXeGAadHoNsgEkTIicZi0epiozeRId+t0eEpJWOUqWWZqONHfFUeUVRGJwXRFFKpCKyskLKCCUlzjtAghTgJCavqMqCpNR4nTPYOI0Yn2ZRP2dxYUiMoRCSXqtJ+8D9odVAC4kQFrzAKwFEu/JKDILgUXb88XXQeuON8PKX7+rSBEEQBI+2devg29+u59///l1bll0lBK17E0H9FYwHpEKIuSjWgzf4ok85tYFsYh0im6YpK7wsKLpT9LrT6DLD9nv43jRCQlXWQ7aUucFkOUsXDdEfDIjjFLm4hdIRXtTNU0eiMayE0tc1nnlZIoxDJYI4TdFpA688xls6/Rn6GycZlBWLly6m0RyiMpZBZwaJJY0jMpNTFDlRrBEanK0wzmGrCkpPlCoS3UBoiafCOIuxJbFQiNmsyN6XeK9QVqNoI0lJSo+qHMpJpI3QtFF2hKKMyboVvW6HqChoRRFaQjE+zczUBFp4hDUoY+jODGgnKa1Gg4aK8QLKyuCtxzhYM76W4aFFLFm+lO50j4mNMxSmQiNASow1COGRTtXZf5VESEllKvp5jygV9fk3OYOipJrsMugO6M902bcsSdOYbE1Ce6yFaC4HqfDUyay8l4i5wVqDINgrnXBCnYjj+9+Hfh+azdC3NQiCYG/2H/9RPx53XJ1N/vEoBK17GTcbo84PazM7tI3LM6qZSQbT45TdceKqR0KBy2eoZjYwmNxAXFYkStMQntgbOpPTGGORIqYdp1hnaUSCWMcIJchMTrfTobQlMo6Jh0awdXdWhJB15a93GONxhUPJiHZ7BAt0ejN0OgOQmuGRYZKkjW85+r0pkJ6iKhnkllQ4tNQYB+BIlK4z46IRMkGKBGsqqtJgjSCbqlBaE8UOpSUShTARPneIXIOViNLjS4fwMUIkVMZQ5BUmK2GQkXU7zPR7KAHWlGhniJTEFBW+MqRC05/o0bc9mlGDRDdZ2koo8jobso4dg7KPUDC8eASpFRvWjVMNKrSOiJoRAoH3YK3D+AKswOFQWuC9oMgt3noSH5F4QTXVZbr6NVqCjgDlyRcPk7ZSGBkBJTBeUDlPrKIFw+AEQbB3OeGE+vGnP4V2u84m/PGP79oyBUEQBI+ef//3+vHFL9615diVQtC61xDgPN7X8/MBi3W4MiOfmWIwNY7pzeCyLmU+hS+7uME0tjeBKjNiIqp+zvSGSSIfoawnjZrkWckg66LjmFaridKazBRUrqSRRsjKUTiDcwYZxURxTBQLpPZ4ZfHCYEVF5Qx4iRQarROKsmByskN/UDA8PESsI4SMsSiQGi8N1imE1XgvcZWjmxvKsgBREcV1cOysxhmP9gnldEUSpchmEx03kaKJKwS2Z3EDR5EZTOFxWYn2BkmJs4BUNHVEVRZ0NozTnZqkmcYkSYypCkpjaMQJi4fHmBmfopjOyfsFVWIYao6gdUxChJaOxrCkXw6w0kLsaI+2sN6R9wqk0FSFQyBRUiGFpKoqsjzHGkeUNCitx1qPFvUUSYkTlsEgo3PXXaAcNhb4ZsSiRkRLHwjDIzgUhvqHWlAPgxMEwd5n331h1Sq45576+SWXhKA1CIJgb5Xn8K1v1fMhaA32AnViH+89Qs6FrB5jSspej35nisHMFAw6iKyH6c9QZZP4wRS2P4PPDKV1lH1D1ilRiyK0TkjiFjMT65mamqY51MI7SZREVK5CoRhqj9D0hr4pGVuxLyKKQIEQDkdF5Usqm1N6gXYGKSPEbNDqkBRVSTYosaZDkkRIFMY6vNV4EspS4IhRSmKtxTuHLRW2SPFlE6Gb4AzSOmKZksiI2LWQeRNZxXgX43OPyBwis4i+JPESmzmqQZ9B3qes6uzEopmijSS2lnyqg9GC0ZEhkjhCq5hERmgrKXoF2kY0owhpFL3JAd4XNJMG6UiKjyytRgpWUtkcpKK9qEWr3aIqLWVWkQ1yAJrtBlI0kB3odjpYV+JljJIJ2jhkVRB5X9caO0O/02Hmvnux7Zh+DHkzYkkSM5w2IYoQUmIIAWsQ7O2e9KRNQSvUY/eNju6q0gRBEASPlh//GAYDWLYMjj12V5dm1wlB615h0yis3tcjdApsnXip7GEHk5jeBKY/Cf0JRH8anfeRVQ5lRtXv0Z/qs35NhwP2O4T99t2fkX1WsvGee5maXM/k5CRKRyRJSlGUlLYiaqa0GikyUjg8DSloNIcg0rMFcFhfIU2OR2CtII0Uqp0Q6yb9rEdWFJjIYIyhqkps16J0ijWWKIppS4VzjpSUNG5glWVs0Sh4T7OZMtwcIVKKvMypKkMqGkRVE19o8swwyPuYsoesBNorYqeQpaHZbuFI6RQdiukORdVDmpiyjIlp0BCSlkqoipxiZkBjeJiRseF6LFkDWiVEDYlzkrJf0Rv0qUoLQ5JkOEVHEb0qw1WeKGoQaVXX5iJJ4ohFSxaz5r41dDs9mqLJ8OgwMhZYX9If5PjS4XF4KZBKIfFIZxHGoJ2n7PSZ+NXd+FYTPTRKPDxGe8ly9KIE6wWVK4llXA/XC5uSSm/jstnypdCkOAj2DH/3d3D//fUQCAC33grPfvYuLVIQBEHwKPj+9+vH5zzn8Z2/IAStewGPwAvwUiERCFeAy8D2KafuJ9twD647ic6mEEUX2x+nP7EBulPEpsD2uuQzfZYsWcySfZbSSJrMdCdYN/4AWmtai1OU0jRHE4qqREYRUaJBCZz3eCFopE1s6YiVpigKHA7jPagEbxwKTRpp0I62rih0jmsYYh1hK4MpK2xZUeUlOlU04ibCQxLFSCWRStDNuixPlzDIejRlk2qipBEn+K5D5pKWbjCYyCg7Fa4wVDM9KCDRCbayVEi6ZQ6LhhkaaSGdrMdqFQ1SoTADg/QVkUlInEaTQOZwyuJiS6E8caPB0OgYpbFkeYmUEuEMrjcg8wX5xAZiF9EaazLIMoYjReUEcZwSNROy/gCRaNqLh/GRAC0pfYWMFEMjQ0RJSrdX0e9XFMbiAa0VzitwMREekUvsxgHurg24oQfo6IRWu82Sp7WIpcIJcGgEEumphzqyHqRACFEPnivrgNXN9n6G+rsG7zcLXB/HvxiDYHd33HHws5/BH/4hfP3r8ItfhKA1CIJgb/S979WPz3nOri3HrhaC1r2EBYSUCOFns+cO8P31uOm7cNP3oIoesuiTz0xQzczgutOYqRmkszRVRGPREoaW70+/7DM5PYHSioqc0bEl6Diqa+VkgcChk4hB1kEojZSaJG0iLUxMTpI26v6fKokw3qOTCG81WsU042Y9zIuryE0fkxc0owSMpcoLpJf16C9IUpeiUURWUWQFxuUI20BuMEQOhuOUtWsmiSOIS8GIGsYXnnxjiRmfRmQWPzGNKkBGKWVe4LTAapjOBmixHGJJszlEVSqcKan6Jd5WSBGhfYQt6ozAuqExuaX0JWl7GBFDYQeUyiOUQhrNcHOURpRy/wP3Mza8iCQaJlNgjEBphYwSWkPDFKYisxUijoibDaqyotPvIhFIJUkbCZ66D++gP6A0dUImRX1eNApZCWzpcPdN0dG/wQvB1NgQ6ZIh2gftRyIS8B4L4KkD19l5AGctQsq6Zn6za2i+Zna7C4Ig2N086UmbgtYgCIJg72JMPbwZbErC93gVgta9iAewFoQDW1BOb8B2xxFlFzuYpuzM0BnfgOsN0KUhVpo0immnKUjNurXrKKzHWMPY2CgHHngAcaQYDAYIpRjfuAGlY9QgJ88LhoZHaLWaSCfobJigP9WjSgdIHaNaTaw1uKyoa4GVAhcRRQll31Bs7NGf7lAKTSwVg16fSMU0G03yoqKiYNHwKN1+n/6gixMlo8uGia2mKj3VdIbvVVQiQ1SSZKjNxnUT+LyinOlDt8B1MlSlsMohrCVuJSRxSncwYGp8guZoi6QZUVowhSGSERhJVRniKCF3A0xVB42mcgglsMYyyHP6WYYDvLdkZc4+S5ezdHQx68fX02i2KQpDHKcYY4mSiCzPiZIYLwVC1eOyWu8wtgIn0EKBkEgpSJIYISIirerANS8oK0uEREiBQKC8p+r2ye5bS7R0lOl77oWRmMOWDKGHFuFwVFYgvEcpVY8DC+AdznvUNqLREJ8GwZ7nSU+qH0PQGgRBsPe55RbodmF4GI4+eleXZtcKQeveYm6YG+/AVdh+h2xqI0V3Gpt16U9NkE1PUQ76uKzAVpZIxcRpg6IyTE6O068gbbVoRk1arWEaUYS1JTYrSZsNIidQFnozM7TbQ4jCIiNHpzPB1OQ0IyNLiHWCjmO89RS9AShJnKRI4anyDh5F2cspJmYwMz2siiBOEL0CEXuK0pFlObGIMTJmMN2h3+8SNyUjjRaVKym6fdbfdz84EOkQVIqe6TCY7pDoGG8c3joaSYM4UrPNkyHSmpFFY5iuozvTRSUKpcBWBm88UZQQNSKKvsULiNMYgWem06XpHa2RNtNT0xTCooRAKk2e53VzaGfRWpMkCUVRkJUFQyPDOOdw1pLlOc57dKRx3tfNqv1ctmdf//MeBzjniHVENDSEVooeUFGC9ZTOIDyoKMa7egigbHqG/m/uwbZjugfuT/uIEWTkkEics6DkfCcIay1eCPxmzYLZbC4ErkGwZ5kLWm+5ZbZ5f/ghDoIg2GvM5S047jhQj/MsmyFo3Vt4wDm88LiiTzaxnnJmEpd1Mf0ZXNZDVAXSVGitSKIE7RWFdeT9nF5esWjxcrwHYy1aRlR5RZFn5P2cocYQw40WprTkxrN0aIyZmS7rNt5HVRlGh0ZpC41GohH0ugNcp0ecNhiKmnjrqIoSUxjcoCTuGyKraaCRPYMoBAooqgJZGpSS9MYnyXt98kGPNB7GFxXGZTSabe799W9QSHTqWdReQtXN0F6gHERCYZEkaUJsBbnN8HisnQ02PVR5QTXIsVqinETJCC01SdTAVjn9riFJEpSQTIyP44QnbTfpDjKaY8O02i2KOrsS7XYb6yxZnjEyNkLpHJWpKMsSpRRFWQJQVSWtdousn1MWBc45ECCQOO+xtsIbhyk8SdSg0Wgh2y0EkEcFtiipsgJrLVKCjhSLR8fo5RXdNevR7QZrFt3GwSPLiVaOEkmB83N9WCXgsd6iomj+kpkT7nODYM905JGgNUxMwH33wQEH7OoSBUEQBI+UuVY0c19QPp6FoHUvITx4a4GKfGqcmbX3I3ozyLyP63dJvEFricMTxwmNqEnWzZjpDkh0zPL9VqGkJoliep0O0+s2MDQ8DBZMXrH+/rVYa2nEDZaOLCJygsjA+nvuZ/GiJTSGNb3xCeI4Jk4Sep0uriyJvCJODGVeIvIS2y9wWYUqLLGXxDiymT7eWlwjxgiD1gppLBMb1lGVFaUrEaMtZjZO0BprsM/yfVh756/IegOqfs7QWINep0fkBFoKIqGojMN4A5bZcV0FwhrGN2xgw9QGSmHxWCIpSBsxadIiTRK8A6EkXtTBO1KQpCnWWnq9PjqNSaKURqNBOeiTJAntFS1MWZIVGSOjI2Rlhe11mZmeYWR0BOuh2Wzi8AghZms7QWmNVgrh5aag3pvZLw0MzlmUqoNvKSVVpFFKYaoKIRUqjogljCYNev0pNtx2J604ZmxkOctH9oNhjZICnMXL+jpxwqMEuM2i1Pk61+1lGQ6CYLeVpvXNzM9+Bj/5SQhagyAI9iYhaN1E7uoCBL894UEKj6TCZR164+voT27A9Dv4fIAoc0RVIqwhiTRKSrKiYGaQY1XM6LJ92WfVwbSGR0mabeK0Sdpoce+991FVFu8Fk5PTSCRjo2OMDY+y/oH1dKe7ZL0B3nruvvPXuLLAlyU2y/BFQVvHNIREFBVkJcVUl+66cbprxxlsmCKb6JBP9MgmuuRTPapOH5GXtHREU2koKiIvWDo8yuKhEbQXFN0B3XvXkPcGCGPRVqCsp+pn2H7OaGuEJaOLaCQp3jrysqiH1rGGLB9gjUF5QTNKqAYFZT/HV44oSkiStB7nVgiiJKYwFWVVMTwyTBwl9Hp9oigmz0v6vQH9fh8hBM1mEylFHRzjaTRS2u02AojjmEajQRRFaK0ZDAYA9VA2WqEijY40KtJILRFSEsdxHaRWJVVVIgToWJM2UlrDbYbHRmmPDBHHEYNej1YUM4RmcP96yns3sPbnt9G/624ocvD10EfUPXBBqbm5+Rh1u02DQ/VrEOwRjjuufrzppl1bjiAIguCRdcst9WMIWkNN617CI3EgLf2ZjQym1uHyLjbvoqs+kbdUZQHG0Wo0sTaiWxTEjTZLFi9nybJ9ER76GyfrYWccYB3t0UUMqopFS5dxyKFPoDs1Q5KmjG+cwMm6n+TSJUsYHR6hlaQ0Gyl5kdPvd8mygmQ0psoyTL/AO7BZAYXB5SW2sAgqpErQKLyHREZEzYTFY4vwxlP1c5z17LvfPrQXD1OSUZAzNTHOikVLKAY5utTYvERVsP6BDbSSFs56nDF479Fa0Rxq0WikVNbgnCfRuu5LKiOE8UQiIo2bxHEKXmCcQycKFWvKfg5CEqcN+jPT9PsZVIbCW4SSRFFEUeTkeUasNHmRMTQ8yqKxMaAOWp0XdLtdhkaGKYoCIRQe6kRMzqGcBFf3ZxVSEqcRWEFV1UPq6EijhMRLj4oUSiqcdfSnprAzJTqO8FXBsJNUa8fZqH5FetCtHL58KWLpErAGhAYkUknc7FVTj+cbalmDYE/3tKfBpz5V17QGQRAEe4fJSXjggXr+qKN2bVl2ByFo3UP52UQ+QgjAIXyFrQb0pjdS9Wbw5QBX9JHeILzHIUjTJlEyhPERKhomStuMjS1DqJhBp4dIGzRbLYQXKC1Ye/99jI0uJlKKQVmwbOVKpicm6ZclKw89nO7a9SAlXgoOOPhgBt0ZsjInLwtKU4/Vai1YY4hlTCNNiUYjCpVjBgWiEmgVkzYiyqIgbaSk7SZKCkpbN43VSpIkCXk2YCafYmowBdKy/wErWX/fWnrTfaq4YKjRYl1l6U5MI7WuM+xGGo1A+ToZkaks1ltazSZ5WSCFougXdMQMtrKoSJG0U4RW6Dii2Wpi8gJjHNFskqVup8fwkjGUVLSG2yTNhM70TN3PNNJYayjLklackiQJWZ5T5CUbJybqIDeO0UojlUTMfo5qtnmwEAKDQnlFZas6MZN39Tpa4oTHGFM3MZaerMjxwJp77kFHMSPtFvkDG6lKQ3LHnSw/9GDGFo3icWArkHGdyRmxsHbVh0rVINiTzdW0fvObcPXV9ditMrSjCoIg2GN1u/D2t9fzBxxQZw9+vAtB6x6sTsgjkdKBy8k23EsxW8tadKZIlaWZJnQ7UzgLw4sXIXWTQeFRSUJrZAmImKmZLkVesGy/lQjvMWWJEp5F3qGlwFUlOtLYSLN+epp4uM3/3Ppz+t0B+67Yl0baQqQRdiBojg4TNRsMBgOclEQ6Ju9lFPkA5QRJlBA1U+IoAQMY0FFC6S1WCqI0Yv34enwlyMuClfvtD1qwbuN67nngbkTi2X/VfnggTRJmqhnWrVlLg5Rm3KQ33UcoSbPVIE5itJZ478myAWo2k/DisUX0BwM2ToxTOUueZfQHGYuXL8VkGU55WkMtvHHoKKLVaFEVFcPDoxRTk0QqItIRSkhMWaGUqrMGVwV4Qb/fR6oIay1pmmKNY3hkmOnpafZduR95VqC1Jopj4ihCIinzklIUWOsoi4rKlkgtsdaS9zJarRZKR+RFRkxCmjZYtHiMXq+HKD3SOmy3j/LQtRvZ8Ms7uGWkxXMOOxiaDSgLSDUehfUOhUSIumn5NjMyhSg2CPYYRx8NcQxlCS9+MZxyCnzuc9Bq7eqSBUEQBA/H3/89fOIT9fzjfaibOSFo3UMJIZBS1jWt3oEvMNk0ouxSdCeIFQw3m1SDGZwT6KhBf1ChU4+ImijdoLQC5y0qSWknKSUC5xxpq0WZDxBpSmUrRoYXIz2sX78BF0f1+ktAp010u0mFZLzTRSfRbF/LmH5vQNbLKAYFpXeoSOGcwOuIJGmgvMTmBpNXoCO0akIE01kP3UwxmWWo1aISHl8VOCXxWjG2ZISo0UQqTXtkFLvEk0/luI7BVYY0bSAjjXMW6+xsbaUnbqVoG9GZmqLf65FnGdJBoiMq78FJ8PXwPC5yIKHZbiIc9Kb6xCpCSIUQkrKsEGWFriIkCoRDa4kUESAwlaUoCpRWZHlOmqYUzqAiTVVVjIyOUJYVtqro9ft4B1rWY6nGzZTcZ0ivcZVBSJBK0u100JFGqDrYtThkJOuxX+uUzygH2guaXtC9fw2tfZZR3nkn8dFHQawAh/Uei0Bt9qO/eXzqN1sQ4tYg2DMkCVx6KVx1FVxzDXzlK/DmN8Mll+zqkgVBEAQPxy9/uWn+wgt3XTl2J6EB0R7Cz4/ruWlSStWBqze4qfXoYhpl+kRUjLYbWFMx0+kRxS3S1hh5pSidRqfDRI1hKi8wHtJmi+bwCD6KGRhLJSSl0sTDQ0TtNo2xRbg4giRiZNlSoqEWqt0gGRkiGmrTXrwI2UrRw21opNg4QqQput1EtRr4NMYlGtGMEa0E0UqhmeBbEaYhySJHTxqKyJGOtnCxRA6lHPjUo1l0wD70qhyrBauecAjN0RFkosnKgqKqQNf9YY0xCCRVUc7WrGYU1lDhcUowNDbK4mVLGBoZRmmFlIpYx0Qyxlno93ImJmfo9jOE1nVTX++IkwSpJEopQLB06TKMsVRFSVGUGGOAOrCM4ohoNjAt54e5qUiShGazSavVqvu0KoXDE8Uxxlr6gwFFWVJUVZ2cKY4QkaK0FV542u0maRojBJRlxmDQw80GrXEjJk6j2aDZoyQkQpBvHKdz37385uaf4mdm6gEcZ1MwyblaVtiUkWmz2la/xWMQBLu/s86Cr32tnqD+hj70cQ2CINgzrVtXP155JfzO7+zasuwuQk3rHsJaO9t/lfmAdW6eqqQz/gBRMUPR2chQQ7Oo2WbjfZNUlSNqt5G6gfIenY4QN0dxQoHN0VoRzw7pInRE5aCfF0RRXaPXaDZxCGb6fSoEixaPoTy0hkZwlSOKIoSXDEyJTpsYa6ico5IC3WrRShp1TSqSRMXEOgYnUEIRCY0U9Xy/26Mo+1hXMDTSRskEKxwkmqF9lqDKjMrlLFu6giSNKXtdJmY2MigKnPMIpWk0FJ1Ol0ariXGWWAqsBONK0AIvBKU1uKLCVRacByGIowTvHZV15GXJSDJK0kjpTkwjKkGj2STrZBRFxX4H7E8vz3EOTGUQClRdwQre1WOuOocxBlXUiZS897RaLYwxZEVOv9+nLEtGhuoA2uMxzlLmBQBDIyPEaUmR5+SDHF9WRCqi0UjJqxIRKZx3iKgeMsfjKa3F2wq8IEbQspZi/QbW/M/tHHrsk9EjbYgiIgROqE0JmLawrazCQRDsOU4+Gf70T2H1avjgB+GKK3Z1iYIgCIKdNRe0rlixa8uxOwlB6x5ECDFfywqbal9dkePyDoOpdZj+FKOjw8SRRqsYHbconSaSDZpjdQ2riFs4a9GxoNFIEDj6vR5SRERRgnOWJE6xpkDHiqmZafqDAe12i9bQMGVe4EqDiCNUnCK9pDU8glAS5euhXRpeoIXClgapY5IoAutxxteZch0U3iEQSO/JtaXw4L0kszntJOKB8XW0Wm18oiktVIBsN4gXj6G1Ys296xjkOd5alPV4B1EUkSQJqWgSJwmVsPSLnMnuNMJ4sjxHVBbhwFqHx5E0m0RakWMYFDmT01MY08J4Ryw1SZqQd3KSOEEIhVYRHoGzFmcESFDS45zDudncvN6T5zlJu0lZlgwPjdHv94G69nWQZ2itKa1BziZ+youCQZ4RN1IclvZoGx9H2Kyk3WgRaY3pGZzwOCxCCoSUiEQhSoWzBmscOMeQVvQ6A7KNk6z7zV2sfOITQMeoOMJ6v92AdC5oDQFrEOy55oLWufH9giAIgj2H97B+fT0fgtZNQtC6hxBCLKhpncscbK2lzAdUgx79ifWMtBJarZTO5BTGOBya0imajRHi1igqbmGRGAQq0chIUWY9iiJHCUe70cKaColikBWURUF3pkccpwwPjQJQliWTMx0SnYCQaBkxPDZKXhQUeY7wkjRJSJOUMssxvS5VNmCoNcTQcBMpJEVe0u/1KfO8Dva0RSUxQsV0JibROKxytJoxVVVhY0WaDnPf+gc4pBljvEXEmubwMFJaiiJHVHV2XWNs3W820pTeIZQkNwbt6ma8tqyQ1F8A4D1xFOOThMrkICuKoqCLZ6jVRBtF1a1rQNM0Zd269QhZ13xb4xDKE8cRzH8+ljiuf6ycc8Q6IssLjDE4PCqK6iFqBp5Op4MxhjROiZOEhrH1kEFFhrOOVhSTjiSohmO0NYypDJPTU2SlIdYtnLMIJfBSIGKNMKZ+H5MTqRidVzAYsPbee9l3uoNoDQEO6wyRnK1t3bxqVTKbXTgIgj3Z4YfXj7/6FVgLsw1zgiAIgj1AtwtZVs8vX75ry7I7CUHrbmDr5K1bhw1zzYE3r22dC1rzbEA+6KB9xdBwG+lK8k6HwSDDWEkaNZFRk8prrJcY63FeEEtBURYUeYFCohCkSUJuLd1Op85q2x8ggNHhEaqqwhSeOGnQbrdJo5Q4jpme6syOryAw1uNxaFvXphrnyUuDKSuQWT0GqlJUVUVFhWzo2QA3wTpLP+9zwKKD8ZWjM9PHasnyffZjzJas27gGk4PxDq8ki5cuoTWSko/3mConIDP0O12KoiButkEKQBI3U3SkaUYRoqiY6fbxpcN7SaQ0kY7wUUSiIRpt01is8c4ircYUFXmW118OlIbp6RkaQy1KY8GDVDExde2yBKxVaCHpdjOiOMa5eiibXq9XD8GjNWVZEukI6yyxTtA6xuFJ0qReJ40xxlL0BlSFIbIgjSOOIoaHh2kIT7cqMNbivK37p2qN0IpSFJgsp6UViZSUnS5r77mXdXf9hn0OWAnOE8totjnz1tfi3BiuobY1CPZcq1bV2YSLAu69Fw46aFeXKAiCINhRc02Dh4ag2dy1ZdmdhKB1F9s66c1s003vNltL4hdEGA6BAyzYHGsGeAxDo0MI75iZ6mCMx/t6HNQoismrEqhr1xwCpSTeS8osp8wL4jRBqQQnBbmtmJyaYZ8Vy6jKkiRukqRt4iSm1+2A81TGIZUlUhoiRXvRGFPjU0SNFs1Gg0hqKmMYZAVCaRYvHSPWEVVZklUVZWmQUtFqt2m0WkSRpqxyEg0gyIqcfQ9aRVVZelWBiiSLli1FRgIZJVSFR8VNoiihM95hYzaJ7zuSoSY6jWk0GpTSQuVRCLx1kEiW7rsM7y39Tr9uHiwhqwrKIiezFREakbQQ0jMxMY4ygpHGMI1GE61ShhaP8pv778UKh/CCOFEoFJHUKBlhhcMJiXV9mlGMtQ6lFL1en+HRESKlmJqcptVqkzZbaK2pCkOe58Q6otFo0s8zwKOkIkk0orKMT0+SRAnN4SHSRkp3YiPeObzzSKWQymNVHdBXxhChAUWn02ewZi0P/PpX7PO7z0IqgcSCl4DHi01jtnrmMrPNNR8OYWsQ7ImUgkMPhdtugzvuCEFrEATBnuKb36wzwUNoGrylELTuBjxgqUMEBbMBq5/N+Fq/YG0dUkgliJMIyhyEoz95P0VvI0Z4uh7itEljJCbvWdotxVB7hLQ9jIgbWKWwsg6q8B5XVgjrkRbyfs7Q8iF6tsQmmuEVS+ibEpmmaAm9vGQsSfBe0i9yDBJnHc1I0xgbZSbLqKRg6ZJlTE9NM9mdpBHFlJUFFNZSj8XqQMUpo8Mj6CjCWMNMNsD0LM4YynxAo9FgeGwMlUSoNGZQ5HT6fYpBn2xQsWJxQtRImOkZOllJ1xfse+RBZOMdzHSPWMesXbsGlGTJ0qU8sG6c5fsuY/GiUYoqQ4xEdPoDKmOISWmLmESnaCGpSsNgakBpCzrTHYaabUw1zb777odz0O/1aY006Q/6eOkRXjDoZGTdjERHqDjGYomimG63jxCCytp6TF0rGBsbo2vADQriKEU6S2dqil6vRxzHNNImxlqUjlFIrHVEOiJqtcjLEqqSREmWLVpMVuYY5yiKgqosabeatJIGE24jrnJUpkSlGtfv07/3XvJf/Iz0yccgvAQh8KL+8febxaZqs16tIWQNgj3XYYfVQesvf1knZwqCIAh2f5v/vg5B60IhaN1NzIUKav7ZQkJINk+fI4QDb9AUaGFIRtvILMcWFdNTM0xNzdDSKTpKiKMIw+y4pSpCSPDOg/AorYmabbwUyDhCeomSAqkkGIXydf2sQoDUCBWRNhS9vCBKI8zskDLOOlSS4qRERzFJo4FEoqKIFEmUJLRnR7q31hI1UnQSk2cZA2tBCJIoRilFFGssnqyaHTbGWdCKqNFE65i88thBjpeK1miTXq+HKw22IchnLO1UoRNNb6ZHX8/QSlLajSZTU1NY7YiGEhqLWwyrGF9AK2oRm5i8X/ddlUIyumSUdnOI6elprKvoZj3SpEFuS0YWj+ClY9Dr4ypDqzWClHXWYOkkrdEhKjtFUfQAiJQiUgpnDN2ZGdppgzRNUQ46nRl60x2iKKKdNvGCOrOxKfGz4/AaIUArJBFiNmkTgKL+8sHODl3krcM6C8LX48AKAZWhIRW236M/vpHUGhAGVAwe3NyYrH5T0/S6Fj90gguCPdlcv9Y77ti15QiCIAh2zFw/1jmhP+tCIWjdTWxvCJL518VchpzZzohibtxNh/AOnEELjxIevEVrVQdGSjLIBvgoIh4awkcRDklRGKw1CA9pnBCnMSRxva1UeKURpkLaOlSWDqanO2RZxtBQ3ax1dHQRcRyjoghrPaYwSCVJ0gStFLascDZBNyRaaWQU1YmS8HVIJAQWsN7ihSBOE9I0qbMLe0dZGSwOBChd9wfN3QAnIGqmaKWQgNMglWJs2WLMdI8oiRkeHsYYx6DIieIYpTS2zMhtiUzqYX6KrCTvF1glaOk2XniEEiAFURQxMjLCxo0bccYyyAYsWryEpJGS9TOGR0bAeay1FEUxn9F5ZNEYkdI00xRbVVhrAeYzCxdFwXB7qO6P7B0Oj46juv8rnjzL0TohjhP87HbVbIKlPM8Z5BkOT7NZB7heAFIgtarnhSCKI6qswkmFtZZ2o0HW67PugbUs7vWhHdXtgENVahDstQ47rH4MQWsQBMGeYWpq4fNQ07qQ3NUFCGrbjx8keFkHtWKztTyQZ+AcSnhMkWOKArwlTSOGhlrEqaaoSrIix0lJ1B4ibg4RJQ2EUJTWUViLQ4COEEqipSLWmiiK0FGEjDRSKVASISU6ihgaGiJppDSbTdxsuay1xGmKUIooTWi2W+gkRqUxzaEhkmaDyhkKU1I5S2kqqqoCCVGakjRSdJKQNJp1/9ooqpM7SVE/alVPkUImEXG7CZGCWJMMtUjaLZqjQ7THRuj0e+RlRrOVYh30Bn1mel1KY/GuDvRGxxYhlUIoSWkrjLfoNEInEVJKqqoijmOG2u2632llABgaGsI5R5qm80mVBoMB/X6fwWCAc44syxBC0Gw2ieMYay1VVSGEIEkSlFbkeU6e5+jZYLwoCnq9HlVVUZYl3V6PwWBAnueUZV3jrJSaT8KV53mdVXl2/F6tNVJKlFa0hoYQWqEiidaSkVYTWxRMbRjHT07N9o92Cy6l2QbpQbBH+9jHPsaBBx5ImqYcf/zx/OhHP9qh7a644gqEEJxyyimPbgEfQ0ccUT/+/OebepoEQRAEu6/JyYXPQ9C6UAhad2ebB6lC1Fli5xc4sqwHtkLgiQVIW1Flfaoix7mKoswpTEHaatIeHkbM/vNe4JAgNTJKEY0UlSY4Xyd3EsLXyYBV3feRuWzFSiIjDWmzDnx1DFLhhazHaI00Xqo6AE5SfBQh4xiRNpBJClFM3GzRHB5BJSlGCESU0B4eodlqI1QdpAql0UlM1EiJG01UkkAkQUmaw0OIJKIUll6VIRoxS1fuQ2OsTe4rVKLp530GeYZQksKU5GVBp9snLwt6+QBrLWmryciiMdojw6hGghGe3FYY70AphJQ0m02WLV9Oezb50fT0NOMbx2k0Giil6uDbGrTWJEmCEILBYECWZfM1rFAHm0mS0Gg0iON4tj8pWO/qMg36dPs9SlMhtQIBxhjKspwPTOfH5HWuHud1MMAYMx+0RlGdwTiOY0ZGhhkabpE0GzSaCcNDrXos3plpxtevA7mNpr8PVdUfBLu5z3/+85x//vlceOGF3HzzzRxzzDGcfPLJbNiw4UG3u/vuu3nzm9/MCSec8BiV9LHxlKfUGYTXr6+HvgmCIAh2b1vWtM7WWQSzQtC6W9o6eti0pG7+i/CUeYZ3Bo0FU2CyHsWgS1VmeOFBiTqYbDVxcUTW7zHo98myEutBJSlpu0XaGoJGA+fcptxPc+OOiroWzniHE9RBlQQVaYxxSCkRKKIoweFx3s/W2tVD8ngpQQvQEhlHRI2UqJGClhTWUDqD8Y7CGPKywuFB1evGaUKcJkRJgooSxGzNqkoUcauBkYJKAKnCxxqZxuRlydjiRex/0CpGx8awziKlJEkSFi1aQpo0SRotrHWkrbq2ViiBVx4rPGK2b6qxdr7mVErJ2KJFNJtNirKYrzEdHR1leGgYKeX8e2it8d5TliVZllFV1fy6jUYD7z0zMzN470mSBCnrH8EkrcdqrWtVPWla72uuFlUpNf8+c8HqXCALzK8rEKTNJsOLRmi0EprNlDSucwnbPGPd/ffV14+by0AdqmCCvcOHPvQhXvOa13DWWWdx5JFHcumll9JsNvn0pz+93W2stfzpn/4pF110EQcffPBjWNpHX5rC8cfX89/73q4tSxAEQfDQtgxa536HB7UQtO5GtlnRNVsDtim0cOA8eEeVZ0hnUN6Sd2fIez2KQZ+yLNBa0xoeIh1qI6MY6zx5UZFnBUVZ4b0gimJ0kuIjTd1PVqBF3UR4rtmqVAqvFEjF2OKlLFqyDJIGUZzSzzL6g5zJ6WlmOl3KwuDwGGsorcEIj5Wzw7AIj9MSqwQVDivAS4HzHmNtHeQqxaZLUtQ1glojYo1KYnQzRcYaoyWqkSCbmkpY+qYkcxVRM8FKR2u4xfDoMFGaoJQibbaJ0waNVouRsVGG2nXz2biREqd1QiiVxMTNlLiZ4rxnMBgwNTPDIK/HaFVKMTo2RpKmZHmOc46hoSFarRbl7PGqSM8nZIJNgb8QYj7o9N5TVhWlMXghSBoNRhctYnTRIpI0pbKWQZFT2orSljgcSTOhNdxiZNEIS1csZWh0CBUrVKyQkURogROOoirIqvoLi/Zwuw7uY43wjhgPZcUDd98Dld3yKlvIbzYFwR6gLEtuuukmnve8580vk1LyvOc9jx/84Afb3e6d73wny5Yt49WvfvVjUczH3Fzl8fe/v2vLEQRBEDy0uaD18MPhy1+G//W/dm15djchEdNuYlPmVr/FUrn5CrOjkdQ1ZM4YJK4exbUq8aagKnKyrESpmNFmSpK2EXGE8RIhFdbXzX2FBLTESShdhSwdSkqk2DTYiQCcdnjrQXksHqUkWIdDkJclURSBl0gdU1mHlhLrHdY6DB7rPZWrgz4d63qfztXJhhrp7Mg+9X6Fg6o/2NTj0js8sh6HVAqkhCiKmJmexNsKgyduplRFTmErlIW02aA70WXj2gmErYPFdrtNWVZMTExSCUNmC3QaoWJQcYShoJ/1EV4wpFroWJEVJUmS0Gw0qIqCTqdDmjQwlcE7T38wwDtHZSqgrulECDqdDlIrmu0WaZrONxWe678qZ4PfwWDAYDAgSVPSZmO+djZtNohURJo2yPMcgEajgdb/P3tvH2xbftZ1fp7fy3rZe59z7r3dtzux82ZCAgkQgolEA0RGEQocBERgEAVTyh9qqJRdMiVCYTFQE6mZYBBR1IJixFJ0HBxALQZpBAStcURBwZBAXghJv/d9OWfvvdb6vT3zx2+dc2+nOyEh3X3uvVmfrl1773X2y2/tve7p813P83y/jpRSbUue5239bGx1OuscYpjXFFj3a2QntYpcElYLkhLHjz+OxoC0ZY5WMpSb284XobpwG/L444+Tc+beD7FavPfee/n1X//1p33OL/zCL/ADP/AD/PIv//JH/T7TNDFN09n94+Nj4IbR2jNB7XjRZ+T1PudzAAw///NKKbfeP+5ncl9vdZZ9vTNZ9vXO5Lz2tc60Gj7jM5Qv+zJF9dn3JDjv7/Vjed9FtN5CnArXpyqHp9Zgt48/Rt868v4YSYFLR4c8ce2DxHHkwoVLXLr7XrrVBuN78B1eHahlCpkiBtd2WN/MjrPVjHi/3XO47sC1xGFgmNtju25F41aUXDjZD8TxGBFhc3iEs5YUC1MM9OtV/eMt17lLpZosuabFe0+cJrQobdthROr8KzDtdkzDhCmKrXk8CELMhSkOdY1WgOq+6wyEktkcbBimAVS5dPluxkce59577+G9D/4a2+3I7tqOxnVcv3KNT/m0V7FLO4Zh4GQ8pjtacXThAsbVSmW3avHW1wrnpGwONpRS2A17jHNcuXqVFDPO1dlR4yy7k+oi/Fh4gsv33sP73vNeLl++zBNXr9S52a7DGkPMkWvXruG8Z73Z1H1ofG0FToGDw0PKXGFtVz2iME77alSFkFJks1kRo7Db7ei6jrvuej7DMPDEE08gIhhjuHDhsLY0i3Ll2hOsDlccjzuOrz+BGOFC32O6jv173sP60z9zGWFd+ITl5OSEP/Nn/gz/4B/8A+6+++6P+nlvfetb+fZv//anbH/sscfOTjJ9vJRSzkYITscHfre8/OUC3Mt73yu84x2Pctddt5ZwfSb39VZn2dc7k2Vf70zOa18/8IENsKHrBh599Pg5ec/z/l5PTk4+6scuovUW4OmrrDc4rTxWfVnmEmjClIRqgpKQXNuGVZmNfoSMUlDsqRAWmQ2dZK5cKhlQM5/pkPkxpSDUeS/r3ZlodL5FUkRNqk7CTUvjPMoEMZxVG1R1rv5Vx+GCkrUg3mGokTIxZyTX6i7USqVFsFrfV6wBCjEUimRknuVcrVZMw44wTbSr6qabpxHjPIhw/doViiq+saw3a0y27KY9+92ebApGLMZYDAZjZoOkVSKNBVFBS0SEs3+4NbG0fkOnVU3vPeM4nrn4Nk3Dbrc7+76Ojo7IOZ/Nncrp/s1zpxg5M126fM89pJSYSqluzcbQOEccAzFcJ4SJlOKZydLp53vqKnw683o6e9s0DcYJrvf4zuOyIKVgiiIhgLOM145ZF+rpO/mQUySndxZFu3Abcffdd2Ot5ZFHHnnS9kceeYTnPY394rvf/W7e97738SVf8iVn207P9jrneOc738nLXvaypzzvm7/5m7n//vvP7h8fH/PCF76Qy5cvc3h4+IzsSykFEeHy5csf9x8Q99wDL3yh8tu/LVy9evnMUfhW4Znc11udZV/vTJZ9vTM5r30Nof7xdd99Pffc0z0n73ne32vXffT7uYjWc2ZOXf3w1ksfknJzKnC1ZCgZckZKRnPE6Cy2jCHNz7HU9ltRpcza1UgVYtW4J892POXsfXOcEAMxJ5p1X59nLLQdNkQkFIy1uKZDnIOY0SKEkDAqCAZrTBW7QC6QY8ZagzMGFUPRgiiYOp5bBSOQciGlSOM6vPN4zRjXIM4wDIW+aRlOjgnDSHYej1TtVRSrhakk2tZz8vgx43GgBBAx7E+29BfWmDlCyIjFGoNtHaFLaJ4oqcy6vi7ce08ppYpoZ0ha0BTZuIP62faW69eu0XdrYoysNusaJ1OUFCJZS20bNvKkmJq270gp0fU9h0dHPP7440xhIuWExRKjoijGWdKQuHr9GtY71us1KhBj5Hh7cuZQ7IwwjtNZJE4VxIZMqseDZkQVUzI2K2HYz63BTz62nrJhEa4LtwlN0/Da176WBx544Cy2ppTCAw88wJvf/OanPP5TPuVT+G//7b89adu3fuu3cnJywvd8z/fwwhe+8Gnfp21b2rZ9yvZTk7RnitPuiWfiNT/5k+G3fxt+4zfM3C58a/FM7uutzrKvdybLvt6ZnMe+XrtWry9dqsWl54rz/F4/lvdcROstxYe2BhtQKDdXw7Q+RjUjJaE5oDkiqjjnaboO27TgPOpsvRiD5rPCWq1o1qIrGSgkRBWR+i4xBvxc1fPeE2NBbRXDiFBEquBzDoypYrMUTMwwH/TiDCpC0UIuhVJydb11jtY3iBiMqYIzp5pjSsqUmMglszGW3vW0TYNvHThHSYGcElIyUhRNGWcdzlkkF4b9wLDf0Ta+CnMr9F3PwcERJWeOrx2zLyPJJ9Cekgtia8ZsCOHm6FKAswqmiOCdR0s1jXLO0fc9KSWuX7+OtZamaej7npwz3nmstWQtWGtJpYpLKaVGBtn5swNOtlvGMDGFwDRXThvn6XxD13VnFdWrV6+S5/c+jcJRVbquo21bdrtdXasRUo6oEUIIqGZKCjjr8Aq985QYzw61pxWrsAjWhduO+++/n6//+q/nda97HZ/1WZ/F29/+dna7HW9605sA+Lqv+zruu+8+3vrWt9J1HZ/2aZ/2pOdfuHAB4Cnbb3c++ZPhp38a3vWu817JwsLCwsJH4tSI6eLF813HrcoiWm8JPrxVq36IelBAtJroaE5ojpQU8cbSth04T7NaY9sO07QY32KkIWuh2icJ1tSLGsVSSCUBihVQreLSzFU7MaZ6QVlDTpFUdG4xtlUEK6jeaGw+c8p19ky0akqUXMhaICesqaZLIIgqMUbiNFFSRlOuIjglihasq628OSZKqOJ61Xa4WHAIkjIeYRpHnnj4YdLxMevuAo33jGVCtbDp11zfb9nt9uzKgPSGHDNhCoizjOPIMOzx1mOcA1FSKbTWAIpRg28aUsqUXOYsWUPYB9brda1YGyGnajhlkLlNt36GZRpn8ylH168Y4lQNrER48MEP0vU91jvcXKXtVx2brmccDCF0hGDx3iGixBgYhh2r1RqArmvpugbvb/xTVgE1Si4Jaw2lJIwYpChOYdxugbkdnKWounBn8NVf/dU89thjfNu3fRsPP/wwr3nNa/jJn/zJM3Om97///Z8Q1YEP5RWvqNfvfOf5rmNhYWFh4SNTjZgW0frhWETrrchHUBByqm/zLFpTIudEQ505pQHX9ZimAefrBQeS5uzUebTVAGYWwBSKFpwRyBmdjZQwBpCzWcz9MBJLRqzDOosKc4XW4H2LMwZvZ7dfa0GEpFJnauc5USOn86FVAFtldkEWxFpELMYaRCwUwdqGnCInJ9fZ77ZcODxkvd4QMaRxoIwjSQv742OuPPooF8XTtR1HB4cM10bymKAoXiyrtmcYhiqgU3X0bboacVO0zGs8bVNOZzmoNZe1wZhaEY4pnVVZ77rrLoZhYhhHht2eixcvcvXaNbqu4+DoEO89WQv7cSTECNNIKpmmqSZYxycnrA429F1DWwpaFD+LYO896/Uaa0emaWIcR0RkrrLWsnAIE9YaVOusa0oJ1LA5PGSYBryzaKmV+RImcghcv3IVLR9+hnph4XblzW9+89O2AwP87M/+7Ed87g/90A898wu6BfjkT67XS6V1YWFh4dZmqbR+ZBbRegvxUVW75vlU1QKloKVAzpSiVfA5i7EOnc2YjAhZa/SMai2aCqfKtb6WzFOtBkFzOYtfsc5SpLYdIzCOUzX7aRucaxDjqiD0DesV2FKwBmYnI4BaVTW+Vm5ngyeDQKrVYoPW+VeVWtBtquFQplZ5petgF9nvdsRpQhTsHIMDQoyJKUyUEDCqNM5VN+ELF7DJkvawXq0IKZFV2azWBJMgF2II9Js169WKHDJWTI2UyVWUllJbm+1cObVWEaSKXd+c5beKCKUUUk4Mw0AIAeccqlqrq11HO0dlpFRNrHIpuMbTdV09B5Gr2RQCUwzzZyO4xiMpMhyPTDGw2RxgvSPOonoME3keVBaEFDLOWMQJGgvWenIqYAua6gz0sN0+OwfwwsLCLcdppfU3fxNyBmvPdz0LCwsLC0/PIlo/MotovWUQqiSqAvLJP6lbzOnmUqhDqhFKAI3kHKt4metnWhTN1XnXIFXcnr6uqQ62zPmctXqrs3FwBqOkGDG+zrU2raUAMUda29G0Dd41iBpEqviyziIxztmq1ZG4GhlXMSq2uv+KgrMWK1LdjnPBhomkkVIUbwTfNGgKYAzS90io7bXiHDlGphJhCnWutRRKjDRiuHh0hJ+UK1eeQKIhTBmLZT9u2e1OiGQO7towSagtzUlZ+ZbOdUiGFPNcrcxoKpSslKQYazDWgVFMSgzjwNHREXFaE2MChb7rkKIMuz1d15Fy5MqVK+x2J0BtrTbGYL0B64gxYE3LxQtHTNPEMI1Y59Cc6dsWYwwpJpAqmJu2xdrqiGxHizHmzNzJGMPm4KBWsq2l2/TEmIghUWxLomDEYKyhdY6gsx/1TV5fTz5hsjQLLyzcKbzoRdC2ME3w/OfDL/4ivPzl572qhYWFhYVT3v9++Pqvh1MD/EuXznc9tyqfeAM+txhVRgrlKXJVqTZJdarVFJBcZS0UyrQnT1tOrj7EyfVHOBmOCSXh+5aSMnG3Q/cDZT+Qdns0hCrwUEzbUKwhTZEUlRJrRquxQswTuUSMA+cNKU2M044YJ9brDmNBRDHeIFbACUUKu3FPIDOVRCBB68AboiaKUTKZVBJqFDVKkUwikjVQTAZbcI0h50AIe8QoxoKGERXoDw9oVisChSlGUin4rkNE6JqGkhLOGMYwcTxukc7iDjwDA9t4wpBOmMIWQ8FjMEFZm5YmG1yGRiwH/ZrD1QGtaZHsCEOiaTpW6wOyKmINYg0x1VbrbtWR4kTY7zno1lxYbVj5FqFgjNB4xzSNxDDRekfjLE6ENE2EYYBU27sbEZwqGgLkTOM8bdvRtl2taEuN5zHGImLYbA4pBax1bDYH5FxQhZQyq/UGK5ZpHzg6uIhpOkzTkZwQKGy6DqOKDicotdLtqLOucjaYvPxaWFi4U7AWPvuz6+3HHoO/8TfOdz0LCwsLC0/mG78Rbp5gWSqtT8/y1+ktgN50qTJCqQmqudZf56zPM0WrGU0TmkdyPCHnPa5zSFvjVUqKlCmQh4G03aLjhBNovMU4A7bmpxpjsdZjceSUq5kQmSIF6x3eWzBaq69S6LoWY07jcTKFjIpSKMQcKKJkA4lSzYBEiSWTNJHKzZdIKomcI6kEMGAbi28dxkKIA6kElEyZBmKacK2nWXVMKTGmhFhbTZ5UGcaBaRoRUa4dX8d2juIL3WGD2ziCGTm8a8OFuw7Y77ZMuwFJBV8MYTuxu3bMcLIljiOaCmRDiYVhO5FCRqkOyd57fNOAoX5Gs7twDBOm5NrynAveObx33H33JY4OD9lsVsQwMY17UghIyTWKKEZKCHhr8WKwqkgppBCYxkBK1RQrpYxzHuc8Md5oXRYxtG1HKUoIkWEYMWLIuZBDoW9XNS6oacjWkAFvLFIKJU41/1YUW2oVv1ZcT8uvS7V1YeFO4V/+S/jf//d6+0d+BK5fP9/1LCwsLCxU/p//B378x5+8bbM5n7Xc6iyi9ZbiVCjc3B4812LP8kMVcqrVV1G6rmG16lhvVmCEEKZafWw7LEIKEyVHrDH4xuMaV+dhc55zWkGMzLOVp8qltpJaa3G+zpg2jZ8znGrmaE6JFAM5J0RqPIx1jqZtQWorcI26KaSYalIP1f02xoBSBWcuNfO1aRqss/jGUkomhkDKkZwiIUyz+64npkjRjPWOMAvVq1euME0Tq1UPBg4vHWKcIA7aVYNKZnOw4sLFC3WOVxRrhZwS0zSSUxXQu/2WYT9gVbBqKDEx7gb2u33d35SJKWKsqeuYHYGtd+yHPfv9npQSIQT2+31tB7aWtmsZx4kYIzlnGu/p2raaXek846t1pjjHxPWr17h29SrTNJ1lsXrvz+Zkp2lC9fS4kLoGa2u8TkqEKZ6ZTc0HCsbN5lm5VJfmm4+yJ3WjP7VZeGFh4fam7+H+++FVr4L9Hv6P/+O8V7SwsLCwAPBjP1avv/iLa4X1D/7BpW7w4VhE6+3E6UGc89kB3fc9q9UKMdVVdhyraG1XPX6ejUQg50QpCecEkVodHIY9+2FLLomu78mqlJsEjMyiyzpbRY+tGaxKdamNMZJjQhCaWVS5pgGFGKpw0pzrbKaCEYMWrfmoSjU6irnqKmNBbwjzUurzUqzZrJqriZCzBmctp1m1dhaQKSf6zZrVpsc6w+ZwQyHjG8dqsyJpwreew6MDVpseMcKUJmIOiAXrDUUzIVQRa5A6M5sLmmIVlCkRpoC1lt1uzxQDTdex2mwYw0RIkbZvmaYqULfbLScnJ4QY6Vd9NZiaTxZ476uJ0uxafCo+ERjGG07Bp2LVzbmudo4iijESYySlhPeetm3puo5SCuM4knM+E8Wnz/Pez4ZRec5ofRr34MVQeGHhjkQETo2Vv+M7bkQrLCwsLCycH7/6q/X6T/0peM974Od+7nzXcyuziNbbiVmoasqIGIoWfNvi2xZUEAylKPG0fdQITVurpEqueZ2muganHJjCwDgNFE34xlVhmzM51yppLrUaWlRnoyaZq7PVLTfGSE616mutRRDEzNmkuca3lMLZbWcdztbcVdHqVJxSIqdMybUVNoZaFbZGSDFUQ6jqEMW439F6j7OWcRjmfFRou4ZcImIE6x27YY/1lpACbddwcOGArJlYIv26o1+1GF/zZ5VS25U1IxbQQp5nS6Uolto62ziHmd/POksutYppvKXtO4oA3tL0LW3XsVqtCDGgQIyRS5cu0fc91lpyzpRSmKZaQQawzuGbhqZp6Fc91vv6HcwOxc65Oemois6Y0tm1AmV2KrbW4psqTkMI9bso5UwAl9lx+mkF68LCOZBS4qd/+qf5e3/v73FycgLAgw8+yHZxuX7G+fN/Hj71U+Hxx+F/+V/OezULCwsLn9io3hCtn/ZpcOECeH+uS7qlWUTr7cCT2gRq3I1YQ8oFY+cQGzH03Yqm7aopT5goOVTjJKOIUWxjUMmoJsQozhua1uOcQUuiaKZoqdclk0smzyK25PozmFuVoRo75SqArKlVVE0ZKbXlVVSQueMYVawYGufniJsqZkvOtT03JnKMxDBhDTgj5BhIMeBm0brf7TBAyYnt8TW0JFKK9H1LKYnt7rjO5KLshi2FQts1tZ3YGU72J2AU2zh842j7hqZrMM6gMov8xtM1DQ4Ds4twCvFMaFpjaNqWzcGG1WbFFAOhJMQ7fNcScsS4WtXMpXB04QhjDF3XgQjrzYacMynns9bfVDLW1Vbstus4ODigaRpijEwhzDmytbp9WsVV1TMH4RAC169fZ5wmnPdcunQXIlJPCMwnA05bmU9zZ+cj6fSQetLxtZRbF54rfuu3fotP//RP50u/9Ev5S3/pL/HYY48B8F3f9V38lb/yV855dXce3tcqK8C/+Tfnu5aFhYWFT3QeeqjG3Fh7I1N74cOziNZbjCpBP/JUoWrNMA05UVSJORJiwnlP1/W0bYuxds4ZTcQUqnFQY8klUEg4L3S9Z73p8Y1lShPGWcTaKoTnKqTOmbClFPKcDWqMYI3BAFoylIKoklMkTSOqpVZU57ZXay3cVPEThDK3/Bqt4ldLwYrMPsqn7cETOU5oTmhJaIqUFInDSIoTKQVSijhvEAtXr18llQimsNtvQapT8X7Y0a7a+jnMUi2VRC4ZsULTNTjvSDlScsKbOs8ax4kcI2GaiCHU+J+moW0b1ps1TdexG3bsx6HmynrLEMLcjp3PWndzzux2O/b7ff3+gFzmjFcjpBiB2i5tnZtNssxZZTvnfCZWU0oYY2jblrZtgVrJPTk5YRxHVLVWaueKbs6ZEAI557PXNHOL8dMfXM/Qgbyw8FHwlre8hde97nVcvXqVvu/Ptn/5l385DzzwwDmu7M7l1a+u1+95D3y4XwMLCwsLC88+p1XWl78cuu5813I7sOS03iac/nEhAjqb68SciDmTQ6DETGe7KiatqbOesbr+Og/iIJdAzBPet7OZUMKIzi2sE+uuJ2WLmjJnkxrE3DCHSimBShU/VEOjuWSKIvP8aYa5VbVowhrFGIuWUqumxpzdLiWfiV9rDK2zkAOlJFJMlFRzX5MAJdM6i+RMSYHVqgOqoVJC6fqOJ65fp288YRg5XB8R9oH9MBBTJDdaK6qGuUUapjDBZGjWHb71hBBJY0QnZdjtiGlirSso1bwIC843WGcIMWCktucaLNM0YHpD07VYoZolpcSVK1dIOeO8r0ZJOSG2fgZtV08uxJRw1DZJ5/1ZFdXamsd6WiU1xhBjPGv1tdae3T9tIR6nCb1+HWPr807buM08SwsyzwQvLJw//+7f/Tv+/b//9zRN86TtL3nJS/jgBz94Tqu6s3nRi8AYGEd4+OGa3bqwsLCw8Nxzc2vwwu/MUmm9RfhI1VXlpjPiwtwanMAIWQupFBAhF4iza29MkUxCnOC8wUomp4GikUIi5UCII2MYyCWAKSh5NiWq8TPeG4xlvgjGCZBAM0YUa2rCLDq71JZcxWjOpFRzYY0IzhqM1Kqsaka0gJYzseqMxVqDdQZnTY3syREtCeYKq6ZI4ywpTmhJGJSmdThvQRQ/R+YcXTii73uOLlxAjHDl6hNkVa5ceXyudAqbgwM2m001RppboI0x9F1L17ZIUYwq1kjNW3WWkhMnJ8dcu3aFooXdsGcYBqy3iJVaabUW1zUwt+amlLh+/fqZCdLB4SHrzaZG53hPO59WO62c63zbWouIzN9lxjlHv1qx3qxxvsbsIJzNuIoRur6vLciqXL16Fdc0+FkA27nqnlI6m42FWtl9SmW1Ghl/+ErswsIzSJnntj+UD3zgAxwcHJzDiu58vK/CFWq1dWFhYWHhfFhE68fGIlpvE8Rypmqt82z3+ypSjFSx5JozIWOspajSNB5rhZOT65xsr1VxKpkQ9kxxjxgFKSiFtm/YDVtCDIgB31p8Y3AerAPx0K46ikaGcUvJcW7jrQK4aG3hLSVSSuTk+lVimBApQMGYGtFDyVgrNI1DRCklIlIQLaRpJE4jYRpqWm3JSCnVlGmq+ab77TElRh5+8IO0bVNbh8lMceTSpYsgcPnyZYb9HkG4du06IsJ+PzAMNc/VGDmrXgq1Pffk5JjtdosWxQCN9zTOkWJks9mwWq0wIhwfHxNCoGkd4oS2bcg5cXThkMMLh+z3O3zjEWNwszBdr9dcu3aNJ65eqdudo5vdhGOMVWA2zRwXJIgRQgwM41CFqxZSTkwhIMaQtXCy3Z5VwlPOpJwoKKvNGuccq9WKzWaDtZbDw0Ostez3e/q+ZxrHswrsnLlzAzPHKi0sPAd8wRd8AW9/+9vP7osI2+2Wv/7X/zpf/MVffH4Lu8N52cvq9SJaFxYWFs6P3/zNev2KV5zvOm4Xlvbg2wRlzm1SqjidW4Srha0hxQKxRuAgQrfu6+xmCohVxjCRdhFjanyNcR5rapuYioLWQfApjIxTJmmk7zdzxE1BSsSoY5r2GPFka8hpZBz2TPsdBksKteLadx04g5CIIZFMqE7CKVYjpQhpCoRpgFxQY8kiiGZKDKQ4ISmTU0S0Ct4SJ1IxeGcwJWONMJxcJ6SJrBkxMA0Dlw/uYToZ0FwI04QRw/5kixHD0dEBx9eu0zTtmdtuUalzrgVUBZWCsULb+hrLI0qIE9IYLlw4olm3hGmkc2t0FpOqelY9tdbSrVYIwsnJSW3rtZajSxeZHn2UaZpoura6+M5V0NM235RSnWe1tXLar9cwGy2pKm3b4pwjl0wIsVaJZyfgGCO7/b46R88zyKLzDKsx+DmuKM1uw0YMRT6kvr9o1YXnmLe97W184Rd+Ia961asYx5E/9af+FL/xG7/B3XffzT/5J//kvJd3x/LSl8IDD8C73gWl1P8PLCwsLCw8t/zWb9Xrl7zkXJdx27CI1tuEWavO4lWqgjUCmLm1tOCMra2dswI1psa1eG8R16Lo7EQbKTmj4hFtcNZjxSKtxUxK1DRXQDPiqnNvKVpdfsuEdQaRDJJJaaKkAhnWqw2IxXlBcsHbWRQJqFFc04IWpv2elCesE7qux4ow7neEYY+lilvV2SiJQpq3law8/shD3HXhApcuHmJEUAo5R5wzJGd5+KGHkCgcdCtW/Yr27hZNCsnQdSvG3YDFEFMgpYIaoekb1v2KyQTKWEgxIWJBhBACx8cntAcd64uHmM6yHwZSDGhWnPPQyVkuatd1nJyc0PnmLEvVD3uOLlyYc2yruA2zK7DMrcA5Z0KKVUgDKrY6KJfMbhwIIdDFjrZtSaUQc2KKEZVaebc5Y5zFesfKb6oALmAbh3UONXImjqFGFJWb/lJVPfuqztyFFxaebV7wghfwK7/yK/zIj/wI//W//le22y1/7s/9Ob72a7/2ScZMC88sL31pvf7O74Qf/mH4b/8Nlm7shYWFheeOlODUuuF0ZGPhI7OI1luAm+tdH9k1eK62VmUBxiBiEFurc63vmaYJbwRyxFgHVnDG0rqmZnqKUrhRoYVSHYVF8E5oe4tve7q+p+2qOUrOVfyqKtYoVgrWgbNK4wWMRdXQ9w05F7REYgyI5LOKphgQMlrmGVithk1aYp3BjSMxTlhXD8lSEkYVoxmyzO3CNXrHOcPR4Zo0TeQcKWXer1J4//vfz92by6x9j7M1dsZbz347sD95gq5pSTGy3+7ZTgMYYXPhANc19O0KciZaxVmPMY4wZRDFWlPzbrOyWq3YDntKTBys1iRbuHblOhoLXdNyPBxzuN5weHjI9eNj8myI1PY1v/XU5ReRs4qrmY2UjDU415CKns3adl2Hqp45Ep9uOzg4QLVWWXPOeO/P4m3MLEhPHYObprlh3OQbrLOUmwyZzo6thYXnGOccf/pP/+nzXsYnFKeiFeqZ/p//efhjf+z81rOwsLDwicYHP1j/vvZ+McT7aFlE623AzcmZAnXu0RqMGgwGsZZiIeuNSqyYKkhUC0ghxMQwDDRdS9v3+KahKIQQGcaBEhN96zEW+lVPs+pQEUqYyOVG9U1JqCaKRpCM8wbXWKw4pnFLUaFpWrRkxjERwoRvPE3TEMKIRejbFmc79icnHJ8cQ1acEfrWVXFsBS2KmGriJKJVZCXlJS96EW1jQAvHJ9eI4w6ZK6LDsOXw4JBNv6KkQt/2TNcDTduyK/u6xiHgXRWoBWFKgRILpjU454k2kJldkQXEGJA6OxpC4Hh3zKW7L1FKZsz1WzEIOUyMqWDnb2pKkX6z5trJMU3TMI4jIQQeffRRxFqapjnLUrXezTFF5qb51SrmT7NY7Sww61zuxGq1wnuPc46Tk5MzYXr6PqdVKhGp7ctzlE6MEb9ag3OYeiYBymwGpaDIIl4XnjP+4T/8hx/x51/3dV/3HK3kE4vTmdZT/t2/W0TrwsLCwnPJaWvwC1+4jGh8tCyi9Tbg1D1YmT1z5u5gmSutGINQGIeRxjdY66BrSXkipUjRyBgGpjCRNJE00aQWmduJravidz8OeGOxjcOrh5JmEVTbZZ1aUgpYJ2jylNkNuJr5KGEaaduGbtVhPYRpIoRMqf2uxKnmmTYWKAolUmJAtWDxYA1WDLhaETyNfFFVvLWEksgxEIqQyJSSmMaRvm9Imui6juawZToObPcnXL7rHsZhIg2Za09cY9Wtaduetunp1ivW5YAxRbBC0sTVJ64RTiZMNjhbsMZT0Jo3a2oL73a75ejiEev1Gs3KOAxYLG3TIEUYh4EQEk9ME4eHh9X5t++r8BwtV65c4eDoCHdaUT6dP51jazSDbSxdt6LMMUOqtbrbti1Xr17lwQcfPBOn1lpCCIQQznJdnXN47ylkVJWSM34+llJKNd/VOcypA9NcRVeVGmN02ie8sPAs85a3vOVJ92OM7Pd7mqZhtVotovVZ4tWvhj/yR+pcK9RK68LCwsLCc8f731+vX/zi813H7cQiWm8B9KZbT9UK8uQHzZ29OgsUI4Yyx8UErSY8YgySCykEUo4UTRgj9H1H0cI0jUwhYK2jbTuatsO2LSd5IqXAfqjVWWs8RSMpBXKBTbPCGHDO4BpHyZFpzIQYIBj6dVurdzlASbSNo6xqrEvRzDQNlJTQnBGtIjdrglwIOaMi+LbHUF2GDRaDkkvGOIfmzLWrT+BsHec1JMZxz2rVoqWw2Rxy18FdPP6BJ9iyhUytqMaMs242yhVKVEosNS4GwYgl5sx+NxDHwGF/gG8a2lUHUbC9Z3WwoV2t2E47QghV+EFty7UWf9M88Wa15mRfK8AXL15EBVbr9TyDDK5pav4qirUOay05Z3JOKIJRoWgi5XxmwuS9r9XYuTX48PCQvu+q0FVlmiZ2ux0xRpq2wXlHSNWkKad0lplUtNC0zZNO6ylQitYTIUuZdeE55OrVq0/Z9hu/8Rv8hb/wF/imb/qmc1jRJwbew0//NLz3vbVV+P/7/2C/h9XqvFe2sLCw8InBaaV1Ea0fPUtB+pZAq4PtfAsVUDNfqog5SyZRQA2CIGIRHOBpfEfbVGdbJTMNJ4Rpi2qNhGm7lvXBhn69oetq3Ip1FjWzMZMkus4jJjKFLVM4QWXEuoz1CWXA+kzTFbrewsrStA5r6yzpNA10BytCiTx05WE+8PhDnMQ90hjEwrTbk8dAYx3r9YrN4QHdqsd6j2s96/WK9XpN0UgmkDSQJJIIZCKFCCVy6WgDIfD4gw9y/bHHGE62eDFoypSUGaY94zQyTRMPPfQQ0zDQ+4bn3/s87rp4kZwSYZpqBVILMQeyKRxcPODFL3sxly5fIoSJkhKUTMmJFAMWoWscXduAKtM4EsOENwbfuPp6JbFadxwerehXHhHFOcPDD32Qq1efYBxH2rZl3fd11rTUtl1vLCUE4jBQQsS7OmscY6CUXLVuzqT5/Y4ONvRNU4chcsYKc1UaVqsVdz/veah3ZCsIAqkguQAQBKT1YOyNSuusU6XofFtvCgZeWHhuefnLX87f+Bt/4ylV2IVnnpe8BF7wgmoI8v/+v+e9moWFhYVPHBbR+rGziNZz51QcFJQEFG6UUx0UQWrhEwMIFpyncT2CZ7/PhL0y7ia8sXRdA0REIsN0wjCdYL0Ba4gFsA1Nv6Zbb+hWK3zrsB6MyaS85+Bij/OZYbzKbnyUUK7Q9BPH2w8ypUfxXQCzZfvYb/PQQ+8n5ch63dP2HUPcsr7ngPXzL7B+/kXyyjFJwVqLF0tvWzrbEMbAbhhIAsUZXNNydOGIftWSmAiMZB+wa2Ebr7M68myOGlLZc3LlMYbrT3DPasW6CGsc8dqOi/0RaYg89uijGAelRO6+dJG2cQzbLdPJjs56Nl3H1Sce47HHHgabkRYGHbgar7OTAXVK4ywO5fjqVcbtCY0qh13H7to18jiyvXoVp8q9d9/Fbr+lUHjefc9jF/YkMiHtmIZjprDl+OQK63XLNO555KEHObl+jXFfhXTfeFojEAN5GGgALxktCecdq3XPZt2z7jucEeIwMJwcE7ZbxpMt+5MTTq5fY3t8nfWq5frxNTKJk2mLubAmdw7nHJePLuKjcuXxq4yNJW56cB7BAAa1gm8txltEZRGsC+eOc44HH3zwvJdxxyMCn/u59fbSIrywsLDw3LGI1o+dpT34lqDMl5uRj3DPYK1D1SLqwLpakbNmjr1JlBSwhjrfKnOLrbXzPGx1HBZRkASSKSUwpZG2WMQknIWUR1zToCTaHoxPaDEY29CsDH3wyJxtioXt/oTolV2JDDFSYsEnwZsebywRgVxoVh20jqiZTCGPE8fXr2Mk4ppaZTYFMoFiM0MekDES0wBpZNV4DruW43GkjIE0TrR9T993mGgYr+3Jkrl6/Qmed3gPvl0xHQ9sj68R8+wE3DpUCqFEaAzdQYMxlnwyMsSJEgK2sVw42NC0nkcefgjTWFrvaFrPOA0g0LYNu90OgG7Vs91vuXC0Zn24JoQ6Z2ttwziMHB0dznpQeeKJJ2h9bXmexgHnLFaVkgv73Rbalrbr8Y2HrJSUiVMgTYGSMuJLPeNkpOa3tp4iYK1BnGGbBkbNrIyFXCiz2VJxDr9Zn1kFn54iWTJaF86DH//xH3/SfVXloYce4m//7b/NZ3/2Z5/Tqj6xeOMb4Z/8k2rGtLCwsLDw7BMC/Nqv1duLaP3oWUTrrc5p6+ZNo60COGuZhGpelC1KnUm0xpJLpGjGWltzVtFqmCRa52CNqT+z9Y/EXAo5ZYb9yGa1QnB0XcPx9iqCJZfCZrOpeaIlg4Wm69DeMO0zpWSsg027ol+tkTASY2Q77slB2RHxAVQzpdR2YlHIOSFGESuUlGmsRWyDl0zOES0FY4RpHIhTJkwTfePJubbOxhjmfSvkknDW061bxu3Aet3z6BOPkA4ucbA+RIMyToHVusevW5pVxxQju2mHxeNSg7WQciTnhBWHcZam77DO8vj1qxzddQRG6doVYxjo+xaxR7zvfe/nZH/CxYt3kUOkbVv6vuexx56gaVtEHHm/p18dsN/vcc4TwjFt40glM0wjHS1R65xtMAZnDG3bIQgxBeIY6mwqVCdl6vdmRKqLsG/I1DZh5xy77Un9nG1LyImQaws61nBw4WI18Hq6RouPJntpYeEZ4su+7MuedF9EuHz5Mn/4D/9h3va2t53Poj7BeOMb6/W///f1D6mmOd/1LCwsLNzpvP3tNfLm8mX4/b//vFdz+7CI1tsEY+auTQGMxTZdnWk1Dus92vgqKEuae4kFUYNogVIw1mIUpBTEKsaAMULBkksg5cw4BmLMGONomo6cFRFDKZmuX5FCJuVIcgnrLbaBvA+knBAEbzokJ4xmeu+Rvme18vTRggSGEAhTQDqH7SCTsI2tFeEx4JypWa5icN4Rc8QYIQwDcTdhNHJ4eBcPX3s/++2EluqEq5oZh4GuB0eDayzONdjWcjyc0PmeIU2sDzaIWHIcyVoQZ+j6HtNaQozE3Y5xHGhXDb3viRqJKVKc0q4amtZzvDtmdbTBqCfmemLAWksppcbM9C3NPOPa9X11TiZhxJFSIueMc45Lly7VqmlRmqYl50yIE7Fk2vWG1nlEIU6BaZjIKWGNoe1rZmvKmZAimmKN6CkZEaGI1BMa01TdhBuIKRO1tmJjLRcvXbxxXMFSZV04N0r50A6TheeaV74S7r4bHn8c/vN/hj/wB857RQsLCwt3LuMI3/Ed9fb/9r/BwcH5rud2YplpvYX4nYpbudx4kPctiMU4j3MNTduTShUyiOC8xzlLzqXG2lgHUv9IjDERY0JLQebWZFUl50yKBWMd1jUYcYhYkhacc/XnKZFyhDQS8kQsA0lHUhmxJDRMlDDRCBz0HRcvXMB7IU57wrSn5AljMm1jWK1bur4ByWyHY06214hxIoYBawTVakKUS6SUiDFgrTCFgf1+i2rGesMQBq5vr1PInGyPmdLEmAL9wYopBXZhx9XrV9jHkZNxy7XdCcM00vYdz7/vedz7/HvPck1907A5OqRZNeSSyZpJmjg43GC8oZBJGglp5AMP/TaPPP4whxcOuHjXBXJJ9fPNmWvHx2AN22HPMIz0fU/OmabpiDFx6dIlpmkipcTh0RG+bfBNQ9d1tG2Hdx60VqWNqRmsTdtivUesBRFSyuz3e1KMhBAopZxVYzXXarYYQ8q5tg47h/GOg6OLIPZ3fyAuLCzcMYjA53xOvf0Lv3C+a1lYWFi403nve2G7rWL1z/yZ817N7cVSab0lMLNO+PAlrxpLUjiVGq5xNbfVOJxpkZQYVNFSahyOOT0jUUANTgTFMKVEKYlSHCIN1ik6h8Aa40ip0K16UKFpOgqGnJSiUttLTZ1hndLAFAeKZKxVSELOEzpmUghI01Cyok3HuN9x7doV1l3HanOA6T0qmVQiMY6M0wlj2JJyoSRHypHV4SEiWi9GaVpPizDGEecM2jqsN4iBEEeGEBAnbKctmpQyDkgDpljUwZBGwtXE3ZfvoXcrjHdgajuiGMEYoetbnOkwexhOdhQp2M6DV7JkYowY77h2/TpiDTEHtrsdly5dwtuG7e6YzWbDdj+y3e7puhXjMNHYhqPDnpSgcZ6Tky0HBwfsdju89/R9zZk4ODggqzLEQEoR71uctRiRKkClZsWWUmONSookzTjX1uMDJYeJLkZa5zHGYIwhl4y0HtevCM7DZl1dhW9Sp0/RqYtwXXiWuP/++z/qx373d3/3s7iShVM+8zPh//6/4R3vOO+VLCwsLNzZvOc99fqlL31S+uDCR8EiWs+dOnP61NtPfVjWgp/vGNsAFoxHUIoxtSVYLFkzkmoV0ntPzhkoGFFyTEwxYrMDozRiUC0gQuNrS7C1npwV33SoCqpCyhmMwXc1EzalyJQmjDN45yhj4srjj2GNZ9KCW3eUDAf9GmcNB5sVm/WKftMzkNnHPYHEEPaENCBWiXHieLcjhkC3WSEGkILxlt47GoS8D6w2KyaFEhO2MazNmqCFMY10Bz1OLdurJwzHIwerA1zbcHT3BXbX9sSSa8xO1zDFifFaQFpDIGHEYJ1jyiNTTvhVi3gDRtkOe4ooxhumaWLVbbjneffy6KOPsR22HK4PyZqw3iE5sdqsSalg5t9I0zThrCWHhBTliUcfI6VM368IMWK9Z7NZMYaJ7TCgBRrfIiJnbcUignN11janhDiDo2FzcICqEnJiPw6M+4GubVCtM8xJC75pcauOyXtoO8CgmDlC6ebjbGnXXHh2+S//5b98VI9bMoOfOz75k+v1O995vutYWFhYuNN597vr9ctedr7ruB1ZROstQxWsZ3+mPY121bMoEoNYSxGHahWfpdQg11pZC2hO+NbjvSPnRMkFL0BO5BBRTeTGUHyDSjVxcs5RtJwZLjnfkDVTVIi5YK3HOYO1nhhHihR842gaS8yJxgvOCKoGVWUaB8q0Jcc9IpmcRkKEZBW84r0lFiG5QnGZKY8wTozjxGVNtVRsCtZA03maIqQYaVctYdgTp8Dh6oh1t2FXJk6GHb/n8u+h8x3WWK5ffT+lU9TBPc+/h+vtCe9+9/vo+5577ns+znlSrGI8S6YYQ54i+3GPWuhXHYmMGBjGETVKYxsOLhyAMRxePGC737Hf7zm4UE2WYpxYr9e0bc/DDz6C9x4K7E62bPoDjo+POdgc8MQTT9A4T9u2jPuRdtXiXEMexip2Xa2pp5TY7/dM00TXdbVF2FnCNoIxNG3L+vCAlBJlHEi7LSFMdP3BmdDNAt5bpG0pxoJr4CYjppulgcIsXGWOxFmEw8Izy7/9t//2vJew8CG84hX1ehGtCwsLC88uN1daFz42FtF6C/FkeTCXwKRK2Vig6RyQ6wO7NUUNU8oYiSgwxsDBpsHgcUaYphHNCSsGQeosJLVy553HOUfKiXHaIQp93+KsZxxHoOBah/eezWbDftzRNQ3ONYzjRLeuhkB923H16nXSMGJKYdjtKd6zXrc8/+ge4jCx3V9nuH6MCFySu1hfvoi0ypAnxGaMK7jOcNBeYPfEdfbjjjEMRK2ia7Xp0VyYQqTkQNDI5sKGkCaO98dc6u9mc/GAgnLl5Ap3XbyL7rDn4r2XsNHRHa6Y9onHT56g3bRsDg9wrSOTGcJIyeB7T2Md7bpl06/JsaCi2NbS9i1+25DJmMZWw6NS2A5bXvDiF3D16lUKpZ4kcJ6HHnqIg4Mjrly9wuWLl7mwOWLaT6yaNdeeuMKjDz/K4eERuSTG3Z6Do0OuX73GE088QQiBrl9xsNnQti273Y62aVitVuRSiDlRDKwvHBFzqhXWFHni8ccZpwnfNHVeViPbkx13HRyxG/ZYhSkmXvyyl51VWuthJk8q9JcPORYXybqwcOfz8pfX68cfhytX4NKl813PwsLCwp3KIlp/9yyi9RbhyROGp3f0Q7YKWntma9aqc0i2tc1TpDbHG0FUamudUKNwVNGcISaMajVfqsGdc0TK3G5qBDGKoqiU2jaMmZcg6PyaBSiqYAGjZ0U76yA7qfOfaWCYlGkY2O+P2Q8ndK2nSKSY6ngb8kiRSNNbtCQ0JlaHK072x4gXjNaZTPE3jKUeP75G44XWtXRTR0yZZBLNqqkxPAFyUZJkDu46wkzCFEemMNKsWw6OLtB1PQWlaKmzwaa6KWdNhCSsmjVt4xmmkTFOSCN0m54pTaQSEYSkiTBMiDFkTfSrnnE/cP3kuLbzxkJrWygQx0iaEvtpS5kiw8kOyUrTtkSNuP0eaywxhDpji5BDJALo7OCspepLARVBjWDEEmNit9+DCF3f47xn2A/YzpFj5NFHH0WMIwM0DZtLl8DYDz3azg41pVCoX+0iWBeeC/7Tf/pP/LN/9s94//vfTwjhST/70R/90XNa1ScWmw3cd1+NYHjXuxYH4YWFhYVni1PRurQHf+wsI8C3AFWwfmSJ8NSfGnzTIsZTZ1sNOEHn3FM9mwerFj05BcI0AopBKDmhFLwzeO+qcZMpQEY1o1putCPfJIhLfWuSFopIjVtx1YxWrSLOgINiqqhtu4bN0YaLdx+xOVpTSOzHE1IesDZjbEFsAZspkhELhcIYq0BMlFp1LZFilSEM7OPAPo4kMlmUoUwEComC620VxrbQrD3dpq0CuLVsLhzSbjqMN1V8GqHrOnxT23TX6xXtPMeqFrLJJBKRDA5sa8+ieGzr6VZdNaUKA6lEjBesNdxzzz319VYrckyEcaIxDo2J3nUcrQ5Z+ZYLmwMOujWiyqpfIcbUhlxVpmFkt92h84mFUnT+HmT+HrTOywqc7LaIMaxWK1Z9z26/q99xyjzyyCPYxlOMoMZw173Pq0eTmBuHx02t6PWuLik4C88JP/IjP8Ib3vAG3vGOd/Av/sW/IMbIr/3ar/EzP/MzHB0dnffyPqFY5loXFhYWnl1Ul0rrx8MiWm8JTgXmh84Q6lMeojfd77oVxjrU1BlX41wVPtZU193Tp6mSYiKECUExMs/HqmKdxTp75qBr5uBOVa1uwUKt3ppazc1axVMqGaw8KUpFnMW2Btc61IKajGmF/qDj4GjN4YUNmcjV4yeY0kjTW4xTYhpQiRgPmYg6qoBtBNdaIpExBaIm1kcbfO9JZEzn6C+sadYttrMEDVzbHpOlYJwQc2AqkXbdc3jXBdaH6xphQwZraPuWtqtGVpSM9462a1EL+zASS0IN2KbG6qiBZt1gG0vTOJq+oelbjDMginOOzWbD0dEROSbiFHn8oUd49MFH2J9s2V7bojHhxND6DqPQWo/GgqaCqJBiIsVInCamYQQFa2ulFQGRevIgl0JCUTObZFmDClhr63NE0FIIISDWgLdMJeHvugtOK+hPcxieathFtC48F/yv/+v/yt/8m3+Tn/iJn6BpGr7ne76HX//1X+ervuqreNGLXnTey/uE4nSu9V3vOt91LCwsLNypPPwwDEOtAy3/i/vYWUTrOfP0oSNPIx3k5i0GVOi6FWI9GINYg3EOjGDs3CZ8+u2qojmRYkBLwYhg5IYKrgW8Gi0jpgAJPa24zg8Qa8DYugZjSVkx4lAE46tota3H9U1tTTWFfRjYjzv20579tEelVlOnOJLSBPN7IgmRjLGKOGF9uKI/WLE+2nBw4YBm3WEbi5pCu+nx6xYaqQKycwRNTBoZ08QQdxQLeGHKgSGNFFuwja9OwN6CNbjG4VqP9TW/dpxGrl+7Vs2UciKU2sIcS0KcEHLAOAErrA9WpBzZ7bcgSt/3+MbV/ZF6goCi9L4lxcjJlWtcf+IqJ9euo7mQx4nGWEpINNbTOo8Xw8XDIzarDaJCyfnsu7LG1jbpudpdTx7U+VYErHcYY5hCIGmh73ssghXDerNBjEF8QxYDmw1S9KkH1bxJl57gheeQd7/73fyxP/bHAGiaht1uh4jwl//yX+bv//2/f86r+8TitNL6K79yvutYWFhYuBOZJvjGb6y3X/ISaJpzXc5tySJabzWeUuK6yZDpZkFRFNv1GOvAWMSeVkwNxtp5plWQOUVHVSmltvwaY7DOgkDKmZyrY7CxCqaAFAq5ClfRs7xPYwxZFTGmVlqNUBScq+JPvND0Lc26w7ce8RZxFnHmTPgpim8sSSPDuKOQ8I3DOkMuiaSZpq8tvFiDNBbjLGoLQ5wIJWC9qyZRXUvUxLXdMWMciZpo1z2YOpNrvEGlsN1vOR5OapuvhZACMSdiDOgs8rxzjOPIdnvCFEawoKKMYSSVjPEWTBW3mUzIgZQTRROxRMZpJKaIsYbrV67ireOuS5e4+8JdWDEM2x1pnLAIrfOsmo7OeVrvaX2DEcOq6/HWzWLV0DYeawwigve+nowQOdOYKSVSzjRNc2YOlXOmbeprWIGDzaZ+jtZw4fLd4ByI+YiV1CX0ZuG54uLFi5ycnABw33338au/+qsAXJtPIC08d3ze59Xrn/mZWglYWFhYWHjm+Mf/GP6v/wu8h+/6rvNeze3JIlpvIZ6+yKWUp/2BIk1TBavYG23BhlqN48acrFLqKCQCpbYHe2cRgRgnYgyIUYyplUIo81yrorOEMabmepbaL1wrrXPl1ViHsXMFs3O0645m3dKsWpq+pV21+M6TNIEpuMZSNBNSQDVXrydRMEoqGetr1mzNcJ0oUiiijHHCOoNrLLaxdJuOblXbc33naVYNXd8ioqgWXOMxzjLGiTFMtVoMTDGgFGKKxBho24au63DGkEuuQjUlUo6IUUIKWGuYwkSIUxWn3tJ0DYoyTRMx1xlZ51ytFmVFENqmxTtHjomSMuN+xCiE/YAVy7QfGXZ7xt2e/XbH9voJcQo0ztF3q1phFfC+wVqLChQUsfXEwTCNYAQVaOZsVi1lNnViNtpyZFVe8cmvqGV1V0/vlXpwPIlFsC48F5yK0ze+8Y38m3/zbwD4yq/8St7ylrfwDd/wDXzN13wNf+SP/JHzXOInHJ/xGfDCF1bB+jM/c96rWVhYWLiz+OVfrtff+I3wJ//kuS7ltmURrbcSCnM46ZMup7OpN7cSqxrUeIo0ZPEgdS4VAWNueqTMzxSDYKojsFCrsqLEFMg5gujsTKv1ouVGEopUN2Jzah0sUsXR7CYsphoXqQdag20EY6VWIdPIMO3YDseENKKSUU2gCWsVkUyIA1Eirnf4laU/7JBWyJpQUVzj6FYtrnGzeI2cDHtiLvimqcLQesiCFq1pt3MbrRow3tY4mq7BdQ7fOdaHa4w3DGFiSpGYM5m67yknhjAwppF21VE0UzSzG7aIE3zb0nbdLBwTxhnWBxu6dU/SxPpwjW890zQgImzWBxxuDln3B0gSclS210+w4olTJMVcZ1E5dWSuLb9t31ZBCjjvquCc55HNvH0KgTJ/F916DUYIMRJCIBeYSqZYT8Bw8eWvpJp22Rvi1Dz5eg5HeqaP7IWFJ/HqV7+a17/+9Xz6p386X/mVXwnAt3zLt3D//ffzyCOP8BVf8RX8wA/8wDmv8hMLEfgf/8d6+8d+7HzXsrCwsHCncWpy98pXnu86bmcW0XorcDZTeLMh03yRWjE1gKVgKKhCwQEd7dHzGEpDKIaclJwKKRVU6oyrsQ4Ri7Ee37W0qyq2QpooGmlbi/OQysSUJ2Kppj5N29R5T6pobXyLEUFzxgg4ZxinERVlP+2hMcjGE5pMlAAm460iRELcMYQtQ9wxTCeEaUtJAxoHch5Ql2AFZQ26htRkaJSmdxSt68w50bQNbd8x5czJbmAYIuMQ6dya4drA7uoOyQZnPSEm9sOA7xou3H2R9YU1ft1AZ/BHPawcZt1AZzkJe3ZpIjuDX7Uc3XXI+mgFDpJk2nWH+FrNtb5WecUKU5hQI6w2a3bjQMqZqURMZ3lie4UPPvoQ0jicb7l44W4sjt5vmPaJaSzkpBjjWK3XJM24znF41xHZwURGvcF1nmJgN+6Ywshue8LxyXVKzjhnsLa2VQ9hIOYwOyqDOKmTyW3PE7HQX74Pc3A3Ih251PbgLFAMs2lWPfIsBofBLL8aFp5Ffu7nfo5P/dRP5a1vfSuvfOUr+fqv/3p+8Rd/kb/6V/8qP/7jP87b3vY2Ll68eN7L/ITjS7+0Xv/AD8Df+Tvnu5aFhYWFO4lTk7tT/4CFj53lL9NbAAHkScL1povKh24BQMUCHtsdkkxHos6blqLo/DqKoaiQVEmlRtSoCEVra2wqCbGKsUrWSKaQpaBGEGcxVm5UWcXMVdu6UGOglIhKIeVQZ0BbIdhM0IBIxNq55dgUxBUwCSVhjdI6Q2Opj2mU4pVoI9lnogmoTYgv1U1YIzlHRKjtw85wdHSh5o9GZdpNSDL0rsdgEQwlKyFFxjAxpYkhjRyP1ylOsb0jWwUvuL5BWoc0HtM2uL6lP1jRb3qwUqN3yNWReG5bDjkwpQnTOLCCbT1iYDcOTHHC9Z5gIskUXOu5dNclDg4uYE2LEYczLahltxsZp0AuhawFtWBaS3tQzaai1BifYpRYMqnk+r2liGqdQ/aNR6xBUYY4kUUxjUG8oVhhnxWzOeLuF74Mmg2Irw7EZ8cRFFPOKu1WBavmpvr+wsIzz+d+7ufygz/4gzz00EN87/d+L+973/v4Q3/oD/GKV7yC7/qu7+Lhhx8+7yV+QvIFXwB//s/XGO9v/EZ45JHzXtHCwsLC7c84wvveV2+fOrUvfOwsovU24cnC9aZKrPE436GYJ7fzzi3CuRRiSoQUiSkQUyLnjBal5EzOmaKlCpm5/fRJmvnMwKnMQokb+a1QtxlBKehsgFSoIgwK1gq+9XR9i288vnV0fcOqb+n6lqb1WGcplJrFKgmVSCFVt2FiNYSaL0UT1lmOLl7EOov3DTFEurZnvdqgWs2ljK1zvrthz/WTY3bDnu1+O69RSSWRKIhzNH1Xo2u8rRVHI3VGVyCmQIgBMbVFV6XGzYQU8Y1nGEemaaLt2ho9I4I4g/HVGGuaRrr1miJKzIn9ONB2Hb5p68mF2UDJNw3GVdOp9WaNcY6T3Zbj7Ql5/nnXd6w3G9arNTrntLZdOxtrOWJKGGsx3pMRjPdMqdAfHHHfy14OvmXOzeFUkH6oT7VwI3hpkawLzzbr9Zo3velN/NzP/Rzvete7+Mqv/Eq+7/u+jxe96EX88T/+x897eZ9wiMDf//vw2tdW4fqv/tV5r2hhYWHh9ufd764ZrUdHcM89572a25dFtN4qfBQqQbVWUU+rn0B1mW07SoGYcq22AjrPc9Z0HKVoJpd8U95nFSo5F1DBWTeb/shN71eqGJ0Fa62wCqVkxIAYKCXPM7TlTAY9yanYCk3jabuepm3xTYNva5utzHOn1R+qUEqtxCoRlQBEINdtUmN4ZBZrxgqIYqzBNR7fOMQIYRaZ1hnarqn7aqHtWtquJWsi5mqclHIiz7E+On/+KWWmaUJRrDOIEcQKXdez2mzwTYNS53iLFoZxYJwmxJj6+NkgyRhDyonrJ9fZDVu2+y3b/clsCjVSqFmyTdMQSyKVxBQmUk5nubkhRsZpIuVcc3RnUyXjLKkUrHd0fV+NmYxQUJx3FIQpZWzbY9sOjGXzohchzgIQY2Ke/EXm720RqAvnzSd90ifx1/7aX+Nbv/VbOTg44F8tiulcEIHT8wU/8RPnu5aFhYWFO4HTedZXzH6YC787FtF6G1ErnMrNpVBrLW3XURBiLoRcSFrbPsUarLf4xuF8bfe1VrDO1GxPMWgBEYNzvrYBz/mu9R9VNWQqJVNKRrXMojUhcyZpFbA6Z7oWlBuVWaRWYY2zuMbVeVBfhWDKiTGM1TAo19cuOgtWIhBQQr2vtfJaJBFSrXrGFBArjHHENY5MbXENMRBTROa53KZr6FY966MVm6M1mUwuqY4L29NW6USehaZqbSsWga7v6FYdTVMFb9u2IDCFCWstOWecd7jGU7TQti0xRUrJrDbr6nLsLdthS9RIyBHXOooU1IDvPK7zs/lVrYojQprbhWsuaxXBIUZiSrVqHgMpJZxz+MaTS6GoYq3FOkfIiako0vaod/SHR7A5oO60oRR9klD90OuFheean//5n+fP/tk/y/Oe9zy+6Zu+iT/xJ/4Ev/iLv/gxv873fd/38ZKXvISu63j961/Pf/yP//HDPvZHf/RHed3rXseFCxdYr9e85jWv4Yd/+Ic/nt24Y/iSL6nXP/VTsN2e71oWFhYWbneWedZnBnfeC1j46DitXto5tuXUFdgaR9f17F2DmiqIMlWAOCMY7/Dqq9tsAesNzrhaYZtbU41Q22mzzJE3cNo0WiusMPey1opeSoAiclppre29pVQRCzdmJFX0rF24SEHQWg1OmZQSxRSKnUWv1tedp2YplOp4XN8NUSHkhDOOME1YcUwx0DU9cQpIThRVnK1xPmJqa7L3DnGCSnXnNWJpXIPBklMhxlzjYZylqMEYwXpHv1ohTmazo4KURIgTuSTatmWcAn3f4V1tDe66nu12R9HEer0hHK5p1g3TtQnftZjW4HtPYxtiTmQpTHEilUJjPJj6+Q7DiBoLxmCkfiI5Jewcd1NUoWQAis73UZxtKKpMMVKcR41jSoWXfsorIRfwgpaC9+7sO77RJHzKIl0XnhsefPBBfuiHfogf+qEf4jd/8zd5wxvewN/6W3+Lr/qqr2K9Xn/Mr/dP/+k/5f777+f7v//7ef3rX8/b3/52vvALv5B3vvOd3PM0/ViXLl3iW77lW/iUT/kUmqbhX/7Lf8mb3vQm7rnnHr7wC7/wmdjF25bXvAZe/GL4rd+q+a0/8zNweHjeq1pYWFi4Pfnv/71eL6L142OptN7q3KQhSincXBerfksG33R0qwOafoNpWtRYMkKmusKKsxhvMa621Vpn5oppIc4VPC219fes9VhO25FrZmutghaMmauroojU6qgYBS2kXKuMKuXUQ4pMIebEFCfGGAglVcOn03ZltD6WTNE4V1oDhUg5rbQSUa0twuKUWAL7sGOYhlpR7VvEWfZhxDhXo3FUyZrmaqcQ88SURmxj8J3DtRbbGKw38+dhwJxWoS0qinG1chlzZDfsGMeBmBJt14HoXN2VG1FDzrA+WJNKJmmi6RuaVQOO6j7ce7JkmnVDkkzUxMl+y5QD8ydB1ir2a4Xa4Jq5HZjMlAKppGqeNOfW7sc9xpmzz3sII1EV9Q17VfzBIfd9xmugVGGqpdz4ns9ahG9Mter8Oh8S37qw8IzyRV/0Rbz4xS/me7/3e/nyL/9y3vGOd/ALv/ALvOlNb/pdCVaA7/7u7+YbvuEbeNOb3sSrXvUqvv/7v5/VasUP/uAPPu3jP+/zPo8v//Iv55WvfCUve9nLeMtb3sKrX/1qfuEXfuHj2bU7AhH4x/8Y7roLfumX4Ed+5LxXtLCwsHD78ku/VK8/8zPPdx23O4tovU04nWc95Wz2VEFcw2pzgX5zEd9uwDYkFWJRUlGKllplPL3MYjSlxDRNhCmQYuJsxFQ+9H3LmXg1pgrM+lp13lREUQolpypaZyGK0TO33TGOjGEk5kgRRazB+CoQjTVzq3ECIkiqF9KNeVZJlNmMKZNxjSOkgG0sWRO2cRQD1hp84+fqbqbpG3zvUVPIkjGNoLPwjTlQqPO5hVJbi3Mia2EMdTa2ZraODLNgNXPbcYgR3zR1P063pUC/XuG7ps6tGmWMe9Qp2WS6g45kEtkUiim06xbxBtM4ikAxgljDar2mW/eIraZOtnG1LTnV9RlXhfJu2HOy32KbWknOmtiPe7LAJIZtLjz/974U7rkXnK/VVmPrcTQfTPOpj6W+uvCc4r3nn//zf84HPvABvuu7votP/jhPP4cQ+KVf+iU+//M//2ybMYbP//zP5z/8h//wOz5fVXnggQd45zvfyRvf+MaPay13Cm94A3zN19Tbp66XCwsLCwsfG9stvOMd9fZrX3u+a7ndWdqDbwVOk2SebvtNGGNPbwHlpgdZfH9I219kIpFCBvGoBoyCvckVOM2irKbXaHURLlrnMw1g5mZckTpDK6DlRsxNjZ4ptTJJNfLJOYDkOsM6exuLmeN3KLXyWntbKVLde62zOG/BKkkjMTML13g2UyuiMMf0VIGsNRqmCJvNASlssVbYT3us8bRdg2TFnTrpOqkztHbOHdVc9ytByhEtgmQDaqroLhmMUtTVKmQNx63CXepn0fUdxlYTpr7t8b7BO49m2O+3qAqrzYqJmmE7DKH+KxPBrhyYxD7tybZWWtUJxSghR0oC55s681tqZbW1LU3TYiYDeZ6D9Z5Ycp1xzRkplqw1nzflasC0L4U9lpf+vteCMeAayHWON6tiRM5agxfBuvBc8+M//uPP6Os9/vjj5Jy59957n7T93nvv5dd//dc/7POuX7/OfffdxzTVOfW/83f+Dn/0j/7RD/v4aZqYpuns/vHxMVC7YGonzMfPqYndM/V6Hw/33Qdg+O3frr/Pn2lupX19tln29c5k2dc7k2dyX//zfwZVw333Kffco9xqH995f68fy/suovVW4XdQDsbMLayzWBXMTRVRg/gNrrvAsN8S8w5vWoQIGkEVI0rRRAgBMDjnsMbixGFdbRfNJZ29l4iCqTOQpw7CVgzjtEeMYp1UJ2NTmMIe5z3WVAlkVWo7qhbECe2qo2kccWqIqeaeemtp+7ZWQEOtYFpnCCGC1n1VkbkNmbP5WN96hm0ga8G1Du8aUs6EMNG6hpxCbdU1gm9bMpkQAs55ErU6amYzqDyWan5EnentVx4xgm0cTecxtroRD2Gc26CFpmsRYHWwQopgnQWRWkFGOT6+xj133cPBhQPGk5GSlKgBJ44pj9XN2Bjag5aru+tEEmIttnEY7xBjiDkSSqIwi30r5Fo7xjhLLAkFXOspUTnZHteIHiM0TUsCjmOku/seXnAqWsVA4yh6+v3WEw6VU3Ovp7u3sHDncnBwwC//8i+z3W554IEHuP/++3npS1/K533e5z3t49/61rfy7d/+7U/Z/thjjzGO4zOyplIK169fr+7r5nyboQ4PO+AC731v4NFHrz7jr38r7euzzbKvdybLvt6ZPJP7+rM/uwIO+bRPm3j00WvPyPqeSc77ez05OfmoH7uI1jsCC7an6e+iyBUwPSlPtK7Dmkwcj2lEsM6RYs1pjanUqBQBZ2qUSso1CuVGDus853jTbOsNg6ZU78/mSkih5NqGbLAYIxhsNf8xFoollUjJNVG2yNy2PDsOq8wtz7Ni0vk/RKuLrjVYnZ1vrVDIWG9JJSPeoCmChUxmP+xwzuG8I2tmKomUE663iAqaFbEgTjBlTr6VavSEQNZMzGCQGk1jDSK1BbdQ0JRp2xbBojmjAl3XkHNm2O8pUluv9+MeVaVdtfSuAw+7YaB1mYPVEZYaU+S7hn7VEykM00SWQpE6i4wRYqyC2HhHoTDFwDCNtH1P1oLxDtUqvH3Xss+F62Pgf/j9nwVtX1uC58/2dCr6I1VZF9G6cLtx9913Y63lkUceedL2Rx55hOc973kf9nnGGD7pkz4JgNe85jW84x3v4K1vfeuHFa3f/M3fzP333392//j4mBe+8IVcvnyZw2fIqaiUapJ3+fLlc//D8FWvqtePPto8rZnVx8uttK/PNsu+3pks+3pn8kzu67veVf+iesMbnp3fox8v5/29dl33UT92Ea23HU93QFmgxTYXaFd3keJASnu8ZKzxqBiSZlprEWsoOaO5us9WcyZb22bPBOmp//Dp6GOZhWw5i7fJuaAkICOmSiFra9XRIBids2DL6QwtVJtiUCk3InLm/NdaNZbqHKUyv68ABmMs4gyIJaaC85ZMpmk7pjHhWsc4jogTDg8PieOEbTwhB4ZpIOaIGuHihYvkBCUoeMGoIPP7SBYoUtuXSyHEUgWtoe6XtYits6+nETXWmvozNeSUKVrzUscwsN8OXL9yHYfj4voizaqhXXdMMWKtpVk1pKL4COuDDd26ZzcN7ONEKhm1gvMeRBhjqO/nPVmVVOoJBxWIKeEaT84Z6x1t35F3EbNa8amvfwM0TT1mxFGoovW0ybzy4Vv+FvG6cLvQNA2vfe1reeCBB/iyL/syoP6P+IEHHuDNb37zR/06pZQntf9+KG07R199CDc6YZ4ZavzYM/uavxte9KJ6/YEPnJr0PfPvcavs63PBsq93Jsu+3pk8U/v6y79cr1/7WsOt+rGd5/f6sbznIlpvCX6nWaHf6S8FA9oi9oCDC/cxjVu07Mk5IMZgXYPGobbCmjnCJdczK87U1y4pV2Gqc3UTebIJU631VSGqhZTLHFFTZiOmjHMOFQulthWXVMg5kUKkpETShBrmil81SppdgaqeNQZj3VxxNagYFDM79Npa7TQJ62qt0DWeaUpYb8EoxgqdbzFi6PqWqydXGcJAkULJEHLEiAErSKkmRxTQMr/f/FWoKiWXKvCMRaxgnFCk1J8bGMNI10DTrSHDdrtjHEds4xnHkRBHkkasCGoyxRSySWTJrC9cwnUNaZxwxtH0DcZbCDVqJ6bqJWysRQVCijXKx9XZVbHCqt1gbHVM9taiWhBryKpk4KWv/FS6F74YxM6f4Tyjm2U+oj70mCswxwudHpGLYF24nbj//vv5+q//el73utfxWZ/1Wbz97W9nt9vxpje9CYCv+7qv47777uOtb30rUFt9X/e61/Gyl72MaZr41//6X/PDP/zD/N2/+3fPczduKX7P76nXwwBXr8KlS+e7noWFhYXbiRDgne+st1/96vNdy53AIlpvGT6ccP0opIMaVA3ICr+6m/XRdXZlSxiOIQutbSg5klLAiKkROLNYtVZQLcQUUZurG7HIXAU9bQ0+Fa86x+EoKdYoGmvnbSgxRSADBkp1A84lkTVTNFchdFN2ayHPdeMqkq2xeNcgtSyLqEHLbMSEqY+xFrWKAYyr6xcLWMU4Ybvb0tgW33istVhncN4SUmYMA8439TMwDszcemzqmlTn3NMb/bNn+1/KvFsoKWdkfmyMkRxybeGd52PVwmq9xhuPx9PalkJmzBNTGVkfrgkhMcQRY12tpMbAGKZ6xmn+/Gub9I2TBqfmWdUUa25V1nIWuZM0c/X4GsVveN0b3gDWgVgwnhgLpjU3Feq1CtcPOex0UaoLtylf/dVfzWOPPca3fdu38fDDD/Oa17yGn/zJnzwzZ3r/+9//pDO6u92Ov/gX/yIf+MAH6PueT/mUT+Ef/aN/xFd/9Vef1y7ccnQdXL4Mjz0Gn/3Z8B3fAX/yT573qhYWFhZuD971Lkip5ly/4AXnvZrbn0W03iHkWGcaxbQcHt5DGh4lhw7NOwp2nmedwBjkppZcMbUKV6uwdcbxdKZ1bhauAgo4bQ+mKCnVDNXaWmxQhRAnFIMRh5mzc8RarFYvoCyFXGrLcDl1KpP6ulWrOYys0FTI+VQo13cWQLRmp5Iy3jUUCq5xQMF7ixgYpwnbGfb7LTEHjDW4xuP72XwpZYoWrMpNjdaCFs7cjsXW6JlaXdXarlvrzNWwKmdWqzXGWHbDljjW9TetYwqBrltX4Z8LGgtJ69ztxbsucnR0RNu3HG+3hBRwBk62W1JOdW638fXzN1Krqlrdj0sqJK2V8pgSQwwYV1t+VSBpIYfAfpx4/qe9nBd8xmvQkgEHVkha8Kcf5MLCHcqb3/zmD9sO/LM/+7NPuv+d3/mdfOd3fudzsKrbmxe8oIrWX/91uP9++Iqv4FlpE15YWFi40/jVX63Xn/Zpy+/NZ4JbtLt64WNBTy8KioVmRbe5SNcdYqRBsBjjsdZhTI1riTGSUkJEsGaezWQWrJrPZk3rjOuNiuNpW2kpiZyr23CtzNbZT2sd1grGCsbU1663q2hUA0UgU4VrVaVz1ddYmqbDNg3WOsCiKpRcI3lyqc5miuK9p5RC2/kqQn2dOXXOoKqc7LYgiu88SsH56k5cSp5fq86g1uTXmiebSo0DQsD5auQkIqSUiCGSc43MaZoG72ZhDvX1VLHO0fc9TVtNmY6Pj3n8yhNcO7mOGuXiXRd40YtfjGs9WKHtO3zbElJgygnrGhBDYRatubZsO+dQrdXt6vJc5+5yrq7LKSemEFARVpsNf+Bz/hCs1+BbNBcQwbduPvUwG1w9pbC//DZdWFh4Kjf7WP32b8Ov/Mr5rWVhYWHhduJm0brw8bNUWu8EBHAQk+LEYMWyWt1N3F0ljDsKhlwUcRHKVAVaHrFYFIu1Hqd1zrUOedYq6alYFb15AtKgKqgKIoab+mjpug7Bz5mnpc6x5kjWQCmZQgQ5tQMyc9utoMUgVBFo5/lMNRlkdiXWUy/hmu9aTAZviGHCu4Y8R8EUag5s1kTMkaMLR2CVqyfXmaapti8XweAxWiha23yzVsfgml9rcMZirCC2Ou4mTRTNmFKNSLquI+cMRXFtg8/KuJsQC0cHh5AiGhPjuGcYJ4rAUAaCJrRYaMCvHC2WIoZ9HPHiadc9ISVSqtXVLILD4KxFCWjJc0ZuQlyqETbGsCNyohnaNXc//4Vc/Nw/CtIAjqwBJ3VadQqFtjE3nSeord61J7i2GN+cALywsLDw4INPvv8TPwGvec25LGVhYWHhtmIRrc8si2i9JbhpiPJ3QUEpFiaNRFU60+Laezi67LHSc+2x3yANA613eBMoJKyHxkOMI2GaaFce74RUMilmUplALUZqldaZht3xFucstjEcHV1ETKaUQEwjKSopjojU2JyURqY0omS8MzS9o5QqKsWCFYOIJUfISXHWgy3s99dRSWRVsHM7hdaW4kTGGMP60oqUEskqx+MW37WQhbifMI1hmiZCGdmNljGMrA83IFIdj7OiEcI+k1LEaCGmTJgijW1ouxZjlZgGSq5uwAebHuMMIQZO9idMacK5hhwSfbuq7cKbNcNu4PjaNRgjonDh4IC273B9S3ARObRc3V9l5VfYI8vVx66xOxnpuwMOjy6wHyLvf+hh7KajXx/WyCAMmqD1PaWMhGGLtQFrJgYdicbD6iJ7e8gJni/+k38W+rvB9Yix+NWaatwsrFt743C7+Xg7uy+zX/NNmxcWFj6h+Z//Z/jar63xN//9v8O3fRv80i/B//l/gvfnvbqFhYWFW5OTE/hP/6neXkTrM8PSHnxHoBQixoHxFhWH0oE9wnX30HSXUbshZI8aj20aVJQhjOSccM7Wtt9ymtspdZZSq4SpFVfBux4RTylCDIUYCjmDEY/3Lc42WGOxYjBisNVqmEIiE8maUVMHXBWtIlIFKw1GPRSDsQZjakVTKWQySRORSCShUupsLLUSW4Q5ykVRI7TrlqbzNH0DBowRUozklAghEKeEKjjnsM7WttscKZoQq2AzWVIVzJbq+juvoWa2CtZWJe2cJZeCc45Syty6DFaBlNFSqkOxM4wkRgLFFa5sr7CPe2xjq3OwMzVvtXFsLhxgu5Y6MaykomQt8yVTNIFRbCNEIn6z4iQnUtfzqjd8Hvd86meCW4PxIOa0gPq0l/rP//RyQ6J+fKdQFhYW7iS+5mvgve+Ff/tv4eCgbvuxH6v3FxYWFhaeym4Hn/u58MEPwtER/L7fd94rujNYROsdggE8QoNBVGs+qvW0qwMOLtxFu7qASkehxbgVSI2LOZ1rhSpMyQZRh2ARagZp/RlgXI2hKbU6mpKSi0HxWNNijccaj3Me6zwitgrfBCUWSszInLuqKsSY5+gdg4ijqMHaFmsajPHzNiEXQ86QYhW5Oc0mTrMx0qlxlDGC8xYRpW0aVBXvG1Q5y6LNuWbQWmsxIhTNFKoDr3EGFSWRwCnSgNpMKBOxBFQK1hvE2prf2ni01JnTnDPWmLO1xJwoKN45rLWkGIkxYYzDOceq71kdHNB0LTEnTvY7hjBiG4cYKDlS8kTSkagDSfdEHQhzNd21aw4vPo+TfcF3Fzi4cJk//hV/AvrV3IK9sLCw8PEjAi95CdxzD/zGb9z44+snfuJcl7WwsLBwy/Kd31nn/++5B37qp6pwXfj4WUTrHYABLKb2eivzjGLdKu2K9ugyB5eej1tdZCoNSRvErRDjyYVaDU0ZMqiaKiwxN4lZ6oxqqbE3NUbFgnhQi6olFZBisFisWgx2bm8VclFSyuSk80yppWQlhEROikhtg6XUqqtIg5E6HwsOLYacqrtvLlV4ppJrK6upU5hKQUw1JRpjQKkGSdbXimrbdrRth7EOtP4szQLWOY9tHGqUqIFIJNsCdfdIJt3Y5gQ8tdppLUULYm2ttCKUUmpWaplzXhs/myVlUkoYMWdmU6fxNjFnxhQoFHzrMRa0JLIGMiNJBwIDkUAgEVSJeNQdUMyaMXq+6Eu/EnPxnhtF06VUurCw8Axz773w1/96vf0TP8GZu/vCwsLCQuU974G3va3e/gf/AD7rs853PXcSy0zrHYAATmubbM4JaxxGagsuNEh3gf7uFxB1z9VHdqQ00TYbmi6gYSDGiDEFcbU9WMSAqUZMWQulVEOmWDLWWpy1OOdr5ZJEygFNikjBGalVy1Iv5JpnqjJHxhhzVi3NqVAsaKniTdRWwV1V8qkdMlBdipkNoXIuSBGs2LmNuK7buGoQddrxWkRB6uNLKeSccdZCNsQYmELES0Pfd2RbyDkRNKBOSS6ixhJNmg2aal6sCHgjczwOZHIV9Lm6EJestcqq1enYGEuZhbuIoQiM+wHyiMa6u826w9gWrCNRv0jVhEihSEJtrHO+kohFiWqJ+0yKE9Ef8Krf9zl80qtfD7aFosu/6oWFhWeNz//8mt/6W78F1sLP/Ax83ued96oWFhYWbg1+6qcgRvicz4Ev+ZLzXs2dxVJpvYOQXDCnSTVU3ZfVoDTY5iL9hd9De3gP6g5Q1lh/iNiOmJQYC6XUiBuhYKWKUJECGikaKSWhGhHROndqazmvqFAyTGMgjIk01apqyaAZyFXMIQbUkGOuJkdqKVlIMUMRRDw5C7lYSjGU4kAdgp9bhh2qkEqNrNGzimKprbvG0rSOpu9QIxhrmUIgxsRuN7DbDnVNQMqFlGrsjfEW1zrKnCWrrpCk1jWzRNQWcFBMJpdUc1sVQppAIYRALoUxBlJKhJjBCNb7+l5aq8m1PVmZcmaKkagF23i6VY9tLWMOXB9OCDmAUdRGsBF1kewjyReyN0zGcRwy10fDped9El/8JV9NiaY6BtuaW7uwsLDwbLBawf/0P9XbqrUNbmFhYWGhcuoY/Af/4JLN+kyziNY7Ba0VUuNqtmiNfzEU8RRpQTra/m4uXv69dOt7CLkla4eYDTk7Uq5zsKrV3EhNRkwGiSgRJGCdVsdgDaQ8kfJE0YQRxTgIYyAMI2kK5JDRpJBlFq+KxdQImZQpBUQsqhDP7jtyNuRkKdmgahEcGItYh3GeMrf2llLzVM9mWrWAhaRVwIYYAZimUM2WQiSn2qILtbVXjMyNxVCMgDeIN6iFQGYqkSQFvCCNUKQwlUAqkUQVnkWUcQqolmr0lDNjDOAttvEUIKUyd2w7UlGc91jrASFrYUwT22nHPu0IOtVIH1vAKeozxWdyU0georeUtses7+Loeb+X/+GLvwq560WYi8+DLChCKuncDsOFhYU7nx/8Qfi5n6u3H3ig/mH2lrec75oWFhYWbgWWmJtnj0W03gkoEFN1/51nWkuajZXEkNQQskfMEe3qXtrVvQy5Y0wtxfao6UjFkhEyCpSzCiaSUckoAd8UjM2gEymPpDhQcsSiNNaTQySFRI5lFqyFOeemzrKKAww5n862VhGrSee2XkvMpT4NS8GgxmKMqxc5NYUSis4Zo7PwVBQRZTfuGGJgOw0UqVVZa+vzvWvJub6XdQ7rXI3nydXZGAPihGKVpImkqboUW2pV2ciZky8oMUVEhJgSRSDEOmsbc0CMwThLpsztxa6K9FJbi6cc2U8D17cnXNteZxsGpLEc3nWBdt3VFl8LxUJ2SnaQvCX4htD0tBfv4eWveT2/9/f9AaQ7REyLOs8UYp1TXlhYWHiWEIE3vhG+6ItubPtbfwum6fzWtLCwsHDeqN4QrZ/+6ee7ljuRZfrtjkDANfMMaJ3ptK7qziyC4FGBSMH7S1y85+U01nDt0fcQd49hHCCF69tH2WwsWSHmwGa94uLhIScn1/ngg4+yWvW4tsG7FtSQUkbnN2sah7OW4yeuoSVz4dIRq4MeCYpGxVjLfrtjHMfquGua6uUE5FLY7wdMMCQyzlu88zTOAJmQAiGOlKiUUmdFLZZcMiXpLDwLx9sTtIBrLC0NqWQuXLhAScp2tyeGjLeeNOey7oYBIw4xDmtqwA4GXONRo7PJU53nnVJECzRNw6pfMewmCnV+dX1wQBoDpex54srjvPQFL2AME2Ec6A8OEYWr164xjhFvGySXanolgrrqOKxGGcvEsI81ysdkfOuwjWGYImOC4lqG1NCsLvLCV3wGX/DVX/v/t3fmcXIUdf9/V3XPzO5mNxe5gUA45E6AhMQQ5JBAQEAuBREFJIAgCD8RlPggCHJ7PKAiKKAR5fYBVJAbwiWg4T4jxHAIhEDItbtzdFfV74+anpmdzB4598j3zavpnq7q7qrpzs585nuhGgb4xFgASpNOF63NSC4mQRDWLN//Pjz4oI/fAl+TcOedxSVOEIR1k48+goULQWvYcsvuHk3fQ0Rrn0AV41iTKqttXqKUQrkUQTGtrE71p9+g0WBhsYXWpQat8qTq+mNVRCodEOo0rYUsSz9YRBA6Ro0aTkvrMlIhXuA5g3IRNnYYm6JgNCk0qSAkMoa4UCDOK58kSmmcM2itSaVSRVfhgACFdQ5rDA7nsw0Hfp9xjgDnkzgVU1S64jQD5d1gff3YcvpKB+QKWdJhBoIkK3AKaw3G+QzG6bCOMEyjbICtV2gdkE5nfDKlKMaiUK6o/4unds5nBXbWS8GC8e63YRCiCQkIcGFIpiFDU9yfXCGPwwvslmwrMQqnNEEQYp1DFXNMGRRWOSyOWIPRxbqwNiYI/NzivIF0I3mXpzlnaRy0Ho1DNmTvQw5D9RtAlM2j0nWEdQHgiIvvqSAIwppmt918PcIjjoD/+z+feGTDDeHZZ2Ho0O4enSAIwtolsbJuthnU13fvWPoiIlr7DKrNqmpv8afvEFwG5RrRdQENQ0OshYJRFLKWhjQYs5hC5EilNaiIQmwJlaVfUXCGOgCKbr+xxcUWG0OMIqUzNKQytMYxhVweHRhSdQFhoIisI6UDgpTCKItCo5zCGS9KnU+ZW3IZts6gjPUxp8VSO/4/AyosqvIkb7BXmA5Hpr4OjSLKFSgUjI/TjRRaaerq69GEvtxOHJOPCmitMVissUQmAhRBrCFw2KKbrXPgjMIaP5bWOId2gc9e7ByFOE9sLCrQZBrqyedy6JQvg5NvyWKVRgUhQSqFjS0on03ZKS/OjbLEylvFrQarHbkoTyEXE2TqMFYT6X6EjY00rLc+Uw/8Mv1GjcbaEKMdmYzPFB3FoMKAUOvlHwRBEIQ1QCoFn/ucF60A770Hf/87HH10945LEARhbfPii34t8axrBhGtfQVNKWuwxysuVbSulkt3pkH5BEdBWtM4PIXVaT792JJrzaNtPQ6DjSM0ddTX9ye2rSxtzpJOaZ8AySqIHdoqAqdRVuOsI1CKunSKOApozbeQUwV0UE+Y1mCNF2quGDOLQimF1godWJQrlsPBEjsIrC1Wv3FYF+Oc9cmWAOd85K1zQdVsIQi8S691jiiKUQa08Zl7U2EdoUpjjaVQiMhms6AUYSrly9MEvj6tixwYQCtvxHbeamqN88dGWdJBhlSgMXGBuDWmkPeJpzQKFWiMs8Qm9qLaOZwDTUAqnSJv8t7CDF64KjDaYnzoL8YZTKBxOkOBkJg6Ci7DsJGbsdu+h7DBlmO9q3N9SDoMQKmiZdeSVlKoVRCEtcuuu7Z9/cQTIloFQVj3uOkmv95ll+4dR19FRGtfItEpyoJPY1TckWTjVfhcRgG4epQOCNIhA4ZpwpTjvbcWoTU4FRHbPM62gAsIVIZAh5ioUKyRCto6AhcWLaMa5bzgS4WKTDokl7cUCjFhQaGCFM75OqnGGJz1/v4q0CilCLRC6xC0o+B8dl9TLL8DxVqxRaFKEq3pvJ018eP14taRy2XJpDOEYYiq04SksTlLFBuwEem6OhSq5KqstCZI+QDgMPT/HOLIoLSPDXb4eq/W+JJAznp3a10M2opjS2shT5SLwEGoA5rqMrTkssRAuq4BqwLyhRilHHXpOvKtEc76GFqDLSaSAqOdt7QqBS4gk+5H3qbJ5mDQiE2Y8LkvsNl2k9FBHSrdgAGsKr6vkSEdpsFBlLdk6sqCXhAEYU0ydqyv3/rgg/71Y49173gEQRDWNs8+65d0Gr7+9e4eTd9ERGsfoWRkLcWxOry5UIFTaBTOKF83NShaNW0ai0OnFP0GbczI9Rex7NN5tDR/WrRYWoyJQEN9JiSfXYqNHcSW0GkCHRIWBbE1XoKFKU0mnSGVShEVCuSzOVAxOtRYY4jjCOccQdEVWLmg6Onr3WbB4JzxllPj4z3BeoulSl77RTuK+8oTN85ilfMZf2PlxWHRvVihaWlpwcSWKIoIwpBUJg1akS8UiAsFAqVxVqFssaxssSSOsfhas0BdKkMQptBoYl20CCvQSqMCDUE5KXcqncISkssbImP9WCjG5ypvgbXKYpT1VlaAMECpkOZ8REsE/YdszITP7c12E3cnCPqhwjqsCvy8nUMHCq1TaOfjcVMpSQouCMLaIwjggQdg0SJYbz349799QpLhw7t7ZIIgCGuHa67x60MPhSFDuncsfRX5dtsH8NZAvHspRTHkpVZpUVjvNFrc7ZzCoinYNLGqR6cGMGDkpgwdvglN/Uegwya0bkQHTUA9uRyEuoGQDNqFOBOiTIiLNSYPUTb2NVCdI50Kqa9LE+DI57Pkc1nAoLUXm94qajHWEMURURRRiArEcQRYrLM+eZKNsDYuugeboj2yct6utK8yKZNS3q3XOktsDJE1OOeTLrW0tLBs2TJac3mMdaiiO60xMVFUII4sRAoihYuLQj8GYl9r1sUOY7zozefzRMW0mToIUGGA0ppCHIGCIAx9zKoxPqmSiWlpafUW28Q6DFhXvG94q25sY6zWRCgamgaz0+f2YvzOe5JJDUCFTcSkyNmYiAirLCiLLrpaK1e0AksuJkEQ1jKDBnmrK8Cll3rxKnnhhO5m6VJ5DoU1S6EAt93mt6dP796x9GVEtPYFKsIXXZtQxor4xqQGii+VijFgDQRBgHUpIhuiU0OpG7YlwzaZQH2/DWmN6oldA0bVky1YwkwjOlUPOo1De0EWQxTH5PMFYlPAEhOkIJVJobQiKkQUCjHOaQIVEqrQZwl2iriY/Cgf5ckWsuSjQklrOWd9iVdrk1BWykrMW1+tinFYLDFGxxhtaM3lKMQxQRiSqasjXZdGhxrjIvJRFqsdToNTtlgaSBOGAel0mrq6esJUgMHXZ7XOZwx2VuH9dr1VOZeNaF2Wp6U5SzaXJzIGqx1o7+rbms/jdIgOQ+LYEMWxf78MLGlpJcYnXYq1JtKWKLCYwBEHhlhDrOv5pNmRGbABu+5zKDt9bm8aBgzDBXXkI0s2XwDny/5QfAeK4bdevEudVkEQuokTT/Tr//1f2GIL2H9/iOPuHZOw7nLvvTBggH8eBWFN8cAD8Omn3rtk9927ezR9FxGtfQSFv5lepiogwCvU0G9rhQqKJT0D0AEEgSJUihBNoJowhUHocGPSjZsxZIMdaFpvc+JgAHnqoK6JVuMwqRDdkIH6gIKNaIlayLs8NmUwYURrtJTm3FIshkxdHakwQ5R3tCzJ07KkQD5rsbHGRIp8PqI1lyMb58jbAgUXE1sHaLROoQiwsSPKG6JCjIl9/GZkC+RNjpzJkXWtFFSWKMhigpgw7ZMuGWPJRwUKpkBYF1I/oA5dpyA06Ayk6wPCNMS2gDEFlHIo5YWwqrPoBiiQJxfnfexvkCGfdSxZlCPOaaJIY22IVinClE9q5VIWExhyJqY1smTzhlzekC84jFU4lUKl6ljUmqUFyzJiltg8ucCi+qWIUpashveXOuqGjWXKvsew/S4H0Dh4AyAF+PjftA6o0ykCQkJCUoTFe17xEAiCIHQDJ54IM2fCmDEQhj6T8C67wIwZlDKyC32Ljz+Gk06Cl19eu9ddsgR+/nP/zH38MeTzvnbwjTeW+/z1r359yy1rd2zCusXNN/v1YYf5cAlhzSAxrX0AVfpfeY8iqOpQXit8BRxX0RsyxCrEWoXWaTJNmpEbKjINGRZ/Opd86wcolUMTE7jYx8VmVNHmGRHHeVJBGmsjYmPQKiAIA1KpEFtwmMiSSqcJtS/PEluf2Mg6iK3DmogYR5gKfV1ZVNHaqrAWH5OLd/nFFQudUpGNWIHWIa6YNdg4S6FQAKtIhRm0VkQqxoUWlC+Bg3JYF4H1sa/GWmIXYwOHxuICh3IBxkI+G5HPWTRpooJP5KRCjSv+C7LKW1nRhry1KCyBjYuJr8BahXEQ49D1KZrjPC6toK6OWFla8nnChgyffppj0KhxbL/z/my6zS40Dlrfuw8XXZlDrQjxwrz6KSg9B5I4WBCEbuToo/1y++0+vuuZZ/yy2WZtXec++QQ+/VQxbFj3jVVYdS69FK6+Gl5/HWbNWvPXe+89GDwY9tzTJ74Bf+0NNigL1r328i6bSd3M55+HXA7q6tb8+IR1iyVL/N868DWrhTWHiNY+gurgVee7lE8kFIbFeNMQrftR17g+Q1IpcLAwcuQLnxCgCB1oZ9GBt1o6ZQmNQoUBJgKsT4bkFAQpTcoprI1QKgDliCNvUXVKE4YZsM670BpfVkajcLoY6JksyYAtvsSLA7Cl2FAoXtd6a6x1tihwITYxgVNYa1Ba+/jPoshz1mGdwRjrY1VdjHU+u7EzgU+2VCyRY4wl0Gmigo/Ptc4RONCh827HzoHTxLZYgUhZwAfEmqIojpxDZ9IYZ4gshOkG4jimNW8o5CJGb7wdO+/5ZTbdZg/6DxhWrElbzAatfbZlf52K+6gqfo0QBEHoIRxyCMyeDWed5TMLf//7XkyMHu1d6bbdVhHHQznvPG+RDUO44AIYN667Ry6sCH/7m18/9hh88AGMGAGPPJLmnnsUl1/uBebq4pFH/DNkTNv91Rmrhw3zNYSLaSeIIi9cJ09efWMRBIAbboDWVthqK/jsZ7t7NH0bEa1C2QJbrPWqCEBlUEqRrgtYb3BMJmjk4/lzsNGnmMInxCZGqwJhAGE/TegyuEKAicFYg1YOpRWpdIh1MaYQYWwenMZYh7W+nqlWoc9kHASAwcQWhUM5H8/qkxZREoQKXazZWkwmVWq3OKcIAo1S+IREWoNSRRutQgeaMAVYhXK+tqyjaGE1MXFsiZwBF+ICTVwwEDsyYYp0OkMh58i25lBKEwQ+kZVTAUp5y68vwaOwFGunKoe3rcbEyhArS4QjDBWxNURWk88b8iagJZtmy+3GscteX2TTbT5HQ9NwHClfFxcfe1xpXXUVWSWcczUsr2ue7rimIAi9i/HjvSAdP967j06cCHfcAffdBx9/7F1DTj213L++Hm69FRYuhGzWW8+EnsfFF8P778Oxx/qEW+AdoG67DQ4/HL76Va9Uhw+Hn/xk+eM/+gjOOANOP91bqRoavAt5RxQK8K1vtRWsJ53kfxz5/vf989Lc7C2xUBasCU8/7d3W47j8XP373/5HlKVL/fEbbbQSb4awzuIc/Pa3fvub3/RejMKaQ0SrUKp/6lQxm67TWJsC62uvZhrGkEqvRypopHXpuyz+NKC1tYByBQgjwsDglMPEBqe9hdXiCBQEYYrAGpSJcUoRBAFp7a2RxkIcR5jE21dZTJzEtKpiFmEv2hKNVtJqjmKiJi/arHVYa0jpsChGvblTuWLZG7xbsNLF6F/n/aMt1tdLdYbYGmLfDaUUcWSxsSMdKsJUCqXzFOICYZgCF6CcJbYOZbyrssWCMxCGOBUAFqcjDIbYFSgYU0zCFBKHmtiELMsa0PXsMHF3puy6FxtttT3pxqHendjGvias9j7GtiIgLNlOhGO1cBVBKQhCTyGVgrvuggMOgJdegs99bnlL2Y47wnPPwcMPw7x53mKRzcLbb69eS52w6vz3v/CDH/jtK6/06zD0YvDmm+HFF8ufP3/4A5x9tre2b7UVNDb6ezxyJPzpT976+eqrvu922/nEXe3xu9/BG2/4mMHk+fnmN71lPnETfu012Gab2seffjqceaZ3EZ4zxz+LX/iCF71PP+2txHvt5WNkb73V9584cRXeKKHP8+678OKL/vk/6qjuHk3fR0SrQFI8xjqDN2FqtNNo5xM6KV1HkG5iwNB66hoHkclk+HihZtnSmJZ8ltA66lLgXIzWoFI+BhNAqwAdpAkC4+2dOiTQAco6b301BS/8tLeQxsaCCnFOl1x/nXPFeNXiaJ13kXXWuxN7iywoa9HWYOMY5/DutA5fW9YqtAp9bVhlAY1VDqscBofBuzQn9WEVzidgSvl+cZT3yaXqU/hcUT4LsVG+r7U+i7G1DlJ1RU3s3YaNc0TWUsBScJBrbUGl+5OLFZnGoYzbcTd2/tw+DN/wM6iwEROFfiwVQtQLc0sQBGit21hauwMRxYIgrAijR8OTT8LXvw533un3bbON4+abP6ZQGMJ222nWW89bWHfdFRYs8H3++U8vaE8+GU47zSd1SrAW/vUvb8UN5dvMWuOuu5bfd+65fnn6aX9PEj7+GAYOXL5/JuPXiWAFOPVUn3m1sdFb5TfZBPr1K7f/6U9+fdFF3kI6ZMjyruRbb+2trm++WY4zTKe9lRa82G1p8e7qf/mL35f0A58F9oEH/PaLL/q59uvnXZ6t9SJ73Dh53gTP00/79fbb+5JfwppF8owKRcrWQq+HFEprn27YheAyoAaSaRrN4PW3YeQGY2kaOAarh5CLm8jGdeSMwwYWndEQlivEEoToVB2OgCh2RLHBWINSBh1aUmlDmDYEgSE2sY9JLcanVgrXcpYhVRSu3vJqLRjj/GJjClGBKM5jrMEYQyGOKMQFYhtjnMF426oXqhRL4BQXL2J9m05pUpkQpw0Fk8VqQ6YhJEgrdBpIO1zgsMoS4+vB5l1ErCNiHRPpmIKy5LHkgazTZG1AgQzL8tA0ZAOm7L4vn9/nEEZuvBWkBmBsmjgGY7wl1VqfUCqfz5fqvVYule9R9fvV2bJKT4sUvRMEYQVpbIT/+z949FEvXB980DFkiGX77b01NikV8d//lo+ZPdu73/35zz7e9e234Z13fNuFF3qL7DnnrN15rGssWwYvvOC3n38err3Wb595phd+Dz/sLa977OH3G6PYaacC55/ffrrofH75ffPmwTe+4cvUjB3rXYET3n3X/+ihFBx5pB/DJZfUPvcll/jnLOHQQ/2PH3fcAQce6Pf97W/ebb2S448v1xkGb9XdbDNvIf73v7378oQJPumUIEBZtEos69pBRKtQQinlK6ZohdJgnHfhhWJl1EKAiVK4YDBNQz7D8I12YsT64wkzY1ja0kA2CjA6QKVSEKSIFUQ4rA5QYQodpkArYmeIbQGUIUw5gpTBqTyxzWFtjLFF0erKwjUZQ1m0JjGtiZuwt2iqQHkrqHLF8j4+qZNVXpQaLBGG2HnHYKuKSaO0wgXKVwoKgNARExO7Ajq0pBsCgowlVnl0nUNlHKQdhA4TWGJliJyh4AoUyJFXOXKqQM7FZC20Gk2rSZGNM9jUQDbYZBy77X0wn91tH+r7DyNSdURGl+qwFgoRcRwTRRGFQqEkWK21xHHcRni2ty0IgtDT0NpbUg88kOWyBu+1l18HgXfZBO/2mQim2bO9VXW77byw/elP/f7f/MZnhhXWDKecAjvs4EXbjjuWXXGPPhq++EUvVrWGr3ylfMzXv97KjBnw+997F9ybb/aW2KFDa1/jyCP9Dxd//rN3+QUvMpMav4lQ3HVXWH/9ro37H//wgvVnP4OddoKDDvLCFLyozefLFtNUyv8o8swz/jk7++zyeRYv9sdedpl/fd55Xbu+0PcR0bp2EQeHPk6lgKl26awWN845XDFFry+zonw1maRsi1K4CDApdGY9Bgyuoz49GPQwFuqBtCx5lYhWNBobRDitiiI0whGjUhbtLM7EOBvjnLd1xiYmiuPih1OIscVPKe2tvygf34o1OGNRgcYBkSmKWqVRyhEbgwls8bhiIiStUKH2P8+EChN7t2KUwymFUxajHCYAp5SP6cULX2u8u2+gQKc1RIaCyRHqFLbY1ypfsidWlkg7X2c2iIiJsUZjnKZgAvI2hUo1UdevP6NGb8GknaeyyRbjsC5Dc94R2jxBqClE/v0KAkVggtI90lqXLNCVca613HSVUsvFvFavK+9/9TlKGYqr9nX0bFX364pwFhdjQRAqOeooLzQOPtjHPN5+uxcQSZmShQvLfffe2yfPAZ+J+LDDfM3OzTZb++PuyzhXtkj+7Gfl/Tvu6F1xKznkEB+72tDg2H//HFr355hj4Jhjyn2+/vXa9+ikk3xypEsv9VZV8Bbe7bf3sc3/+Y/fV3muzpg82YvgSqZMafv6hz/0SaHGji3/iDJ+PGy+uXdR33xzuPxyX1InIRHeb73lhe9663V9TELfwFrvLSKide0iorUP05FwqLTMgbdiGutFoq824y2aSjmfnAiHCh3OhBibxuYDtA4JwnqGjxjBkPXG8OYbjqjwHq2FVrB5lFYolcOSx9oc1uSKLsEKZxyFQkShYDDGgfZZhGNrMLaAcQaVxKWiiJ2D2GAiQyqT9vVYTYQ1jlQ6xBFgowKt+SzKgVIBJnYotBeioSbGYJx3f3bWz9dZ/D6Kgj1QgCN2BQh9gqfWwjKfpViBzjhyURaHBgKs0xgUBkusFUZrL1h1gLGa1rwlHwWk6wczaoOtGL7+ZoyfsBthXX8+XexQgUWHBm2yKB0Va9T6P4hx7BMxJUvl6+pswrW2K49NyuVUJ3Wq7JO8rrbW1jp3e326K5OxIAi9nwED4Kab/HZzs3cFrXQVriQRERMmeGH7t795wTtnjoiI1cl//uPr6UI5G++ZZ8L//M/ymVIHD/Yxqko5bDuewZts4kXfxx+33T92LPTv70VrJZUxrxdc4K27q8LAgV6IvvkmbLyxF9np9PL9+vf3FnzwicN23bUcFzt3rre+TpwIhYLi6ae11Bpex7jhhnLipSFD/HMtrHnEPbgP0pnraK3Xzjo0AdppX7oFg6OAI48lhyGPcd51t2AC8iZNIWokHw8knx9ELj+UwevtQEO/rYjscJpzDWTjDEan0Zk0QYPGpSKMzuPCCKNjcnFEczZPS9ZSiAKMC4iMIbYxkS0QmYiCKfgl9ovR1o8DW3T5ddhiwiMXQMEW3XRtTMFG5G1EzhbImwK5uEDO5MnGebImTy4ukDV5sqZAtridswUKLqZgY/K2QMHmydsCeZcn73LkKXh3YG2JvBwnZy05p8hbRc4qluYjcjYg6wKaoxCXGsyIDbdhs60/y2ZbTSZbqGdJs2Jpi2NZa8zS1hzNLc00Z5fQnF1Ka7aFXC7XZsnn86WlUCgQx3FpMcaU1tXblUtSw7az2NiuxsOuqXhZQRCExkbYcsuO++y4I9x/v3ctBW+JTbLa9jY++cQnAOrOP5+ffuqtR4k4g7IlqZIDD/Q/MNRi+HD/Jb49lCrXSt1hB7/ebDNoaoJtty1bb3XFt9OvftWL51pCeWW46io47jg/t1qCtZpJk7w7cSJSCgW44gpYtAhaWhR3351Z9UEJvYr77itv//KXUupmbSGitZewsoKi1rGJeEmSHSWLV4EKrMMZgzMFYpPHWL8uxDnycUQ+jsnnIZtXZHMpWnJ1tGQb6de4NU1NY6lr2AKd2pDIDiQbp8nGkDOWWHs33Fg5nNYE6TrSdf1JZQagdT+sS2FIYkmLi42IigK0YCOMM0Q2JrIxFoPVjtj5kjUu8PGyUfHYqHhMwUTkTUTeFrCBgpTGhQEupbEphUuWUBErR87G5J0h7ww5a8i7mLz1S8HFxFoR4cg7yBloNYrWWJE1ITmTIhvXsXCZo7mQYfDIzRg3YQ+222E3+g/eiMXLHJ8uNSxeErFkWZ4lS7MsXdrMkmVLWbp0McuWLaKlZRmtrS20trbS2tpKNpstLdUitlAoUCgUiKKIKIraiNmOlmoRmzwD1c9F8rzU2u4JiyAIfZfp02vvT6V8Xc/rrvMZO//0J3jsMd92zTXeirY2WLbMJyIyBh55xGfNrf6zlM97K3Cle2ktTj3Vuzz//Odrbrwd8eqrXjQefHA50RLUFq2VyYpWhosu8u7A994L3/teuWyOUj5+FuBHP/JCdtw4Hxc7ZsyqXbOSPff0z8nw4V0/Zv/9ffme7bcvjy/hzjvrV9/ghF5B8vfmgQfaxnILaxbl1uFvfi0tLTQ2NgLQ3NxMv8rc6j2Mzm5TrfZqF+DK/dWiFhTaBThncc5gXIyzvsaoc94yZ4wjjh3GgDMBihSRUeSyMXGhmbowjzELybZ+SEvrf8ll3yWO3gf3MUotIgxzKJfHRBE2BkUK5TJYExLHlihuoRAvBh15f2WF/59L3Fe9iyuuum5p4ppqiHP54rZP1kSyRuHQBDpEoXFOEZdK5vgFAyb2Cw6ctT721hrvHmzBonEqhbGKQhQQm4CCCbEuwLgUVmla8jFhppGhIzZizMbbMmjIaJxrpBCncaqOdKoJpQNUqAkCRxBAGFiCwKG1Ip2qI9BptPblbZIlKXejtSYMw5quv5Xuvh21J9vQvotwdXuy3V6farTu+Dex1eFGvLpckXvT3wKh57B06VIGDBjAkiVL6N+//2o5p7WWBQsWMGzYsE7/DfV2OptrFJUtYcOH+9jDVMrHGhqzvBvwF74A99zj3VeTpDnvvecT+zQ2wo03rlqpktZWLyxfe827JX/mM15wHXUUXH+977Pfft5aGYbeVXnPPeH992HgQMdjj33MNtsMqTnXyj9l3fGtbPLkskA95BBvWbTWi8ZXXin323RTH8vZEavyDDsH8+f7EjOLF/vSOA0NKzaXNcmRR/rnqBKlHNdf77jkEs2FF5YzFPc15G+T5513vGt5GPpntLd/Xeju+7oin6MS09pL6MpvC7XEaUKbLLw1rLDKgbOJJdaLVmsMsYux1hRjSGPiyBYTIPkHuxBDLlsgyudYFOWwJo+JA0zchDXDsU5jTR3YfiiXJ1A5nG3BqQJKOYoRnDhbwOoCpKxPoOSqEvq4otNyHGOMl6k68EmTEvEa6AAXBKAc1uEFuKW47c8R25gk67BxXrQaY/3crcKZEGcDsGBdsbSOVf48FpwLsA5iq8lFiigKiF2IIwMqgwvqaBo0hPrGQfQfMJy8aeSDj1qITZ5UXRP9mtJE+ZaicAwIAkUqUISBQwcKrRUmBUFg2wjUIAhKolUpRVQMLkrEbKWgrSVgq2Naw+I3t2pBWkucdtSeCNfqpE61kjmtbiR2VhD6NqmUt1BedZWPZdxjD9h339p1P8GL03vugZ/8xItJrX1W4SQD7V//6jPPXnGFdzWdPt1bONvjtde86J02zb++805fdgW8heWJJ/x2IlgB7r7bWwg32KCtuFu8WDF27DDGjXP87W+w4Ybltur4z9mzvUXv/vv9uT73uU7eqFWkpcWXhEn461+9KE+yBNfV+ddPPFG2NK4plPJJuKBn1r3caqvy9pQpUFfneOghxde/7j+PTj2174pWwXP//X694469X7D2NkS09hFqWVWrt40xy/UtiVYLxBZXdP+MbTFW0lhfHsb6eqdxHPvYUudwzlKII++ymi+QUiG51izZliwmsoSqAcUQ4pwi3+pIp7L0a2ikod8g+jXEBGGOKF5KnFtEFC0lIgvaYHE+kUMxebD1g8U6RSHKU4iLlWlSKQIUBROj0aRCi9bFREL4qFznHNb5pBBeIgeAwqpiSR98bVarfCImax1xwaKcwhZjfV3REuts8X20ishCvqCJTIB1KQjqCMMGHE2k0+sTpgeTL9TT8klEmFLUN9XhrKF10QIa6voRqJBAa0KtMUFAGGgCpVFaEaccYWjQgReklaI1ScBUKUYr2xJxWqtftXU1Wdfars5OXMsaWyv5UmfWV0EQhBVhyy29yARvae3IUrrffl6Uvv++t4pWk8/72MiDD/ZC9sILvTuq1nDbbd5Ce9NN3uronBfI774Ld93lz33zzW3Pl822fb3ZZl6o5nJlwZpK+aQthx/ucE7x4ouKCy4oJ/kBb1ms5Fvf8vGlc+f6499+G0aN6tLbtVLMnu2F86BBsGSJf28SwQq+Tm4m40Xrl7605sbRG9hrL+8avMcecMst8Pbbjp12KlZbwD8vhULXYmWF3seFF5bLIe26a/eOZV1ERGsfoiPBmsQjVraVLK6umEU3smAsxnqrqomdT4xUtFTmCjFRVCCyOWJXwLqIQpwlm2sll8uTCuvIFSKyuRxx3qGjEBPVkW9pJNecRxFQX5elsSlP00BHQ5MhSEfEKocNWjA2XxSXgRfPDpzxAtRZL2Lzkf9A0EBaRWgUhdihsb68jVJgveU0tg5nfHZga8FZhdZevFrnBao1eGtrMatwHEEhZ8FqrLO4Ypkda/0XDuslNFEcEMUBsQ2x1BHoBgL6gR7ApwsVA4IG6uoGkwoskS3Q0pojSEeoUEE+9hZUlSKtUsQ6RahDUjqFVoootOiwgA4UYRi2WSrdhRPBWhljmgjTRDwmgtVa28b6aozpUJAmtCdOK622lawNC6sgCOsmSemb9ghDLyTuvhsOOMCXMFEKRo/2rsMPPOD7JZbXBQu8hXHiRF+D9J13vJX29tu9uE1Kr5x7rk8QdO+9/vWRR3ohWs3vfudreM6eDVOn+njXn/8cvvxlyOcd11xT4LHHMsyc6UutrL++/2xNyrmAz1r7r3+VX0cRPPooHHGE7+tc2yRFHZHPw8yZXqRXZ7e9/XYfT/qtb5WzAu+5p7dsJxl7t90WvvMdX6YGvAUxs47nHJo0ySdgamz0z9bAgXDyyS386lf9cEUPsXPP9Umm7r/fPzMPPQRbbNG94xZWnUWLfAZr8PWJ/9//69bhrJOIaO1hrMgX/lrCtFZ7IlirrbElS6t1YB2xKYBRxNYSJ1bW2BIl7sFRTBT5jL6xK2BtnkKUo5DPUSjkWbxwCbgQbIC1mnxkyTdDvjVNHDWRz0XM/2QZsVlMmFnGwMEx6w1R9Os/GBXUY2wrxkRFIW2xGB9XStHiiSMX58jHvhSrDdNoNPlChCbA6gxa25J11RhbEqW26CZsYm8pdQ7fZn1slLEKZzVxIU2cT4EJcc5inSlaWgGjsAQEYQNRHBKbAOfSoPrhVD8C1USgB9DQfz3yeViytJmGxnpUmKJQyBEqS0O6njgu+CzNyuC0wWiLLS5aabRVKKMIQr1ckqTEmppKpdqNY652DYblLaYlC3uFQG3P1bcW1f2qj61+PleEFbHQdnYNsfYKwrrFlCnL1+IE7zZsrXdzfeml8v6//c2X15k717/++9+9tTFJtALe6piUtBg7Fo49dnnRmkr5c993n79OKuVFYyLyvvpVmDp1EYcfPpzHHlN8+cve9fj998vn2HNPOP10OPxwb8UZPtwnIXrsMS9mjznGZ0iePt1baqvFq3NerH/6KTz8sLeQnnaatwy+/365/623+msAfPe7XoiBrzW5YEHbeVdaDNd1wZrQ1NT29YwZzVx4YQNHHaW480645JK27b/5Tfcl2BJWH3/8o/eiGDvWhwrI14u1j4jWHkzlF/JqcVFLgCbrykyv1VmDk+1aWWJNFHtxZ21FKRWLKfZpzS4rthW8m26U80I2BmyKutAnVIqtIYoj8rEvEROFEQaFVQ24YCgmX09Ldikfv93MG/OyKBWjg5B+/TIMG6bQYY5AW3SoCALQoSPQFqcsS1qXYHFgFUsLSexpP5/5eKklnVGgIkAXkzD5erO4EOsUrS154tgWY1XBOV2cMzgbki9kyOcyQArwNWKV0igVoFQABCiTRqkUKpMhDOpIperQQYYwCHEqRbo+jdKgAkMUtRK4kEwqJHAa2xqXXXe18gmfMMT4GFylFKkwRUqHbe5V5b2tdN2tdA02xpQssKlUCijHvGqt2wjabDbboftwtXtx9dKVRE0d1YJtj45Ec7UFWCy6giB0lSDwyznnwGGHwT77eIH629961+CEfB7uuAMef3z5c+y6K/zqVz4Ji1JeJIaht9xuvz3U15evBbVF3uWXO6ZMUTWz8m66qbcIL1zoxeKdd3rRevXVPkNyc7Pvd+21Pib2nHP86yiCX//aj+Xuu/2+e+7xll7w7sdBALvt5kX1jBltr/vMM3792c96q/PUqfDTn4qLa1dRysc27rKLv2fV3HKLfz/7eO6iPk1raznL9QkniGDtLkS09jJqWbI6SrDUmQW2cjHOYouLcbGPa3UGU7S0WhuXy+M4BTYAa1E2hbIWrEE5wBmfZEmDChwEDi8i0z5A1YZg6sAN9McBKEtr6zLmzf0IpRU6sCgdo7VFawPaobUjTK1XHK+3jDoblLeJicxSfOphXZxjMduwC8BptK4DF+CcT+KUtPtswxpDBhdm8P80NKigtHbOC1et02idItApgiBFkMoUt0O0Vr67tihVFGxYlNMop8AqlC6KYae9nvZZorAYFMoneyqK0+ofHaAsCGuJyYRK999qEpFaLQ7be115XEdCsZbFtT1Lbq1ztWe9rT6+1tgEQRC6wqGHeu+aJUu8ZfStt3yN1IEDvZXyj3/0yXSWLfP9//53H+OqVNvapDvsAM895+Pbfv5zb33tCuPG+cRN06d7t+H33y+7HSfW3EQs7rJL+bjmZth9d19e48QT4fzz/Xq99byorXZVvPlm7+5cyaOPwkEHeXfkIUN8gqsvf7l87fHjvQt2LidfyleGQw/1MY/77OOfo2zWJ+T64AN/vwcN8m6lu+/e3SMVVgTnfJK3f/8bhg6Fr32tu0e07iKitZfQnitwR0K1eqm0tFZbYxPramLZq16SPpXnqEW15S5JJJTsr0wYlEqlSuJKKQWmkUJ2ENoCLgZlgBirIlAGq+KiRbKY6dfp4jooitYC6GbQ+Qorq+9TzDeMIkCpsJiQSRetpwqNF5A6sKRSSSrHsoVV4UWuViFhWOfL0aiQIEgRhmk/L50qWlB9IqnK2NOu3NNKQVZ5n6otnMl96Cz2tNINuPK81QK3ox82OtvXEdXCtCPR25m7ca24WkEQhJVlwAB44QVvZc1mvfU1nfYuu7Nm+T6pFOy8c1uxmnD99b5EzLHH+hjGFeFLX/JlZbT2FtFEtG60Udt+Q4Z4V8SXXvJi97bb/L5rr/Vxs2PG+C/U1QmhwLsAJ1x4oT/2hRfK1/rOd7zIuv56L1APPbQcMyx/YleOjTf2P4AkH/mNjf59/d3vfGwx+Hv3zjs9MzOyUJs77vCeDkHg/13V+nsgrB1EtPYS2rOw1hIl1W7BlUIHWM4tuPJ1Zfxksk5Eay0qE/IkfYIgaCOQEtfV5NzpdJp0Ok0cx2XrIRpMCpsJi5bPGOsirItwLo8jwjkLyhaFalKOpkK4uohMgwFVKNVyLbsHFy2pxidcSoSoVgFaB94FWIMNsljlsxgrpVFolNIkrsIQEIZpFAFaJ1l9U16cqhClHc6Z0vuSCPTKzL6Vwr4999z2hGS1mAVKiZaqXXOrrZKV96T6WpVux8kzUi0Oq/e1ZwldFWFZS+hWzmFVzy8IgpDQr5+PE63kvvvguut8EqZdd23/C+o22/hlZUn+3E6d2vac1Vx/vS+zM3162d34K1/xorU6Q/LQod4F+MUX4c03/b4xY+AHP4CTT/aieMkSb+07/XQvTpMkS8Lqofo36osv9u97Nut/OJg717uY//CHvj2K4PLL/bOWxBYL3c/rr8ONN/bjxBPLXgwzZoiVvLsR0dqLaM8duDOrantt1eK1s6VacAXFwJ1KoVFZDzSpMZpYcGuJ4XKCKAU2xMYKLDgXY6zGWo1xAAHOWcIwifVMYlKDootw8h7Z5d4vv413aSaJ1fRiVCdrHYB2GJXFqdailVcVRasqWmTx44SSaK20pnqrrPKW3CohWilQk3jRSmt0dd/K1+0J264+K9XCsvLZqcwsXMuqW3ntynO0Z9ntCl21wHZkaa3lfiwIV155JT/5yU+YP38+48aN45e//CUTJ06s2feaa67h+uuv55VXXgFg/PjxXHTRRe32F9YN0mk46aS1d71UCp5/HubNg+22W7593Di/VHLYYXDGGW33HXccXHON3/7Tn8pidIMN/HrAAG9B/ve/vaVX4ivXDsOGlWOPx43zWaB/9jPvjv7++/7+X3CBv09vv12Ohxa6j/feg89/XrFgQRO/+IWjudn/8FAdCy6sfUS09hKqLaqV+7oqXDuyrFaL0lpJdqpFVKV4rSynE4YhxhhSqVSb8yeW1Wrrb3IsBMV4U3A2xrgIZ31pHYfBOVM8FnDa13G1GuuKQheNiQ0OvGXVZzlK3kAf6aoCUIok3rScbEmBcqDqUToFqkLA0Vacl9+rsvisfJ/CikKCtURrtTitJVpruVi3J2Q7Errt3c9EoFYK1mphWkm1oK3s19HrzgRqLbfgWudqb1sQEm655RZOP/10rr76aiZNmsTll1/OtGnTmDNnDv6QCVMAACrNSURBVMOq630As2bN4ogjjmDnnXemrq6OSy+9lL333ptXX32V9ddfvxtmIKyrbL+9X7rKhhv6GLtnnvGxq4884q2vCUce6WNe33zTuyGv7HWE1cuXv+xrDj/9tI+bruS///Wu4vvv3z1jE8occwwsWOC/YzQ3K8LQxyg3NHTvuARQbkUD1foQLS0tNDY2AtDc3Ey/fv26eUS13YCB5UrWVIvYjuJVK62cyeta2YMrhWQcx6UMwslxcbG4XbULcXJ8FEUdiuYoikrnqJyX74evFWuczw7sDBYDLvK1UZ2BYo1Uh09oZPG7jNWooj5VOFCWRK16Aetqr/EC2LmyuNWqGPOqEvFXvGKVSGpPFAJtrKG1+nRmaU2OS9oql0phmVhtq5da1tlqUVt5jo6swe3NtT0xXHnuSkt8rR9BKsdQa7vWWKu3OxOvXRW3PfFvgbBiTJo0iZ122olf/epXgP/bsuGGG/Ltb3+bs846q9PjjTEMGjSIX/3qVxx11FFduubSpUsZMGAAS5YsoX///qs0/gRrLQsWLGDYsGHtxsP3FWSua45Fi3z24COOWPulauS+tk9zs//B4fbbfcKrSvbf35dgeustL27POKMc53z//fCPf3hrX3eVHloX7uu8eT4pmdaO885bxg03NPGDH6g+7Ubf3fd1RT5HxdLaS6i2sra3v71+7R1TKX4T4ZK4jVa7jla2JVS6klYnaerIxTihLJy9y29Icm6Hwydi8mtX6g8UXYNVsVRNknTJFt16vWgtvwe+xiskQioR9aZizL4MDi6FcqmSW7CfIyhdnq8XU23dU/06ye4bLNdWLea6Klpr9al872sJ0o6srZX3q/J5qLWvkmrxV3lMLetpNR1ZSqstre0JTbGwCu1RKBR49tlnmVHhv6W1ZurUqTz11FNdOkdraytRFDF48OA1NUxBWGsMGrR8vK7Q/TQ2lmv83ncffPvb3pX7nHN8puq33/Y1dJ97zscmP/qoL8m3775+ncstXwdWWH3ccotf77EHHHdcKz/4QaOvCiH0CES09gDaEwsrYgRfkb6VwqbaElpteUulUqX2fD5fulalNTXZl8vl2rX+OudIp9M1LcZ+G6xVRBH4UjSm6BLs8ALUVM1XldyE/Xm8aDQ2ouwTDM4HyFIWrpVLUaiSXAM0KRQpfOxqWYiCK4azViYjSkRqcj0vVsMw3aEFMl2sZ1Dt4pv0SaVSHQrP5L60Z/VMXLTbu37yw0Oyr/payXGVx1ceW9neFdoTyJ39orc6nEBqiWkRvn2PTz75BGMMw4cPb7N/+PDhvPHGG106x/e//31GjRrF1MrMOFXk8/nS30HwvxBD+ce31UGlp0xfR+baN5G5do299oLkz9PDDytmzVJst52judl/Rj3+OFx/vWWzzcBa/3n50586TjjBsfHG/ribb/ZC9/vfX/NZn/v6fY0iuOEG/93vy1/u23OtpLvv64pcV0RrD6KjL+nV1qzq/Z1Zu2q5ZVa3V1pSKy2sCZVZgSstp8nY0+l0h6K18ovd8tZYL0SDMNmvgUQwmzb9/VqVjkuEK4Ax5UfaucRVODmubHktiVbanlcToAipKVqhdD7/niXna5tVNwzLorTyva8UidX7K/d1FpvaXmxxLaFZS9RWj629fSsi7lZVCIqQFLqTSy65hJtvvplZs2ZRl9T9qMHFF1/Meeedt9z+jz/+mFy1r99KYq1lyZIlXfphp7cjc+2byFxXnMMPr2PWrIElwTpuXMSLL6b48Y8t++6bA3z4ijGKq69u5vTTW2hthWOPHU4+r9hxx0/Yfvu4gyusOn3tvj76aJr7788wY0YzYej4+tcH8corGerqHFOmLGDx4sV9Zq4d0d33dVlSFLsLiGjtZXQkXjsiEaTJsdWCMukDZWtYknynlptvraXyGtVL9TWT65Rfexddi/EJkagUyGVhbGJbcSwVbQpcgLWpNiK2Uri2dRkuvvYZnEgsrUppX7MVVerT9o1OztXW4trW/TdVuie1hGSltbKWpbVWeZxqgVodu1otimsJ41p9O9pX3Z68bo9aAri914KwOhkyZAhBEPDRRx+12f/RRx8xYsSIDo/96U9/yiWXXMKDDz7I2LFjO+w7Y8YMTj/99NLrpUuXsuGGGzJ06NDVGtOqlGLo0KHrxJclmWvfQ+a64hx9NJx/vuPDDxVnnumYMSNgww0dc+eG/OpXXrBOmeJ48knFc881MmxYP+67D/J5/9k6Z85g9t57tUypXfraff3KV/wcwrCBbbd1PPGEprHRceONji23HMrHH9Nn5toR3X1fO/qhuBoRrb2ISsFaKYCqRUUtM39l/0rLbOVSLidTtrZWuvNWXr+W6K20otZyH67uXy1afcZeh1MWbDEhk3W+Uo1VOAVxIS4lU7LOoZyu6KdxxmcRVs7bUZXDH++8VTVxBlaUra8+y3AiThVKdfSPttLSmrznbYWt1mG7ArD6nqyINbVyqU5yVOv4yvZqYVwtctsTpx3trz5PrTlW7xPxKqwJ0uk048eP56GHHuKggw4C/N+jhx56iFNOOaXd4y677DIuvPBC7rvvPiZMmNDpdTKZDJkaWVCq481XleTfbF//sgQy176KzHXFqK/37sAffQQ77+w9vb761XIZo/p6uOgixW67wVNPKd58U3H33eXjn3lGc/DB8L3v+Xq/J50EQ4a0vca8eb4Ez6rkGewr9/Xtt8vbv/1t4lkHl12mOOAAny+lr8y1K3TnXFfkmiJaeyHV4rXaygltLauVQhRqJ0iqJWSrRWst9+DKa1bWXa22vlZeN6H6eJ+l16/RDutA6WKWYKVwKILAlMSoF6vKv9bWvw7yOFUhVkuiF5xSpf1JtmF/6QrrtdKoNpbWaip/DNDLibjK7Y5EYuW+rlpBq0Vm0qeW6Oys9E1n1++KaK11rlrH1HpfBGF1c/rpp3P00UczYcIEJk6cyOWXX05LSwvf+MY3ADjqqKNYf/31ufjiiwG49NJLOeecc7jxxhvZeOONmT9/PgCNjY2lTNKCIAhri0039UvCt74F113nkzfdeivssgsMHgyffgpbbtn22Icfhi9+0SdvAvjd7+Duu30d2M039+177+1LIP3619DaCqNHr7259TQefHD5fSNHQvHjQuihiGjtJVR+0a+OJU3EZkdioPr46pjTyj5JWyKIKkVtV4+v1Z60VfapbtNFq6XPCFzhGlxMQmB1taVWFS2vxltoUT7bcI1rFt+J4j4q1pWJmyi6J1fgyr/CtaW2aC1dqR3B15HI7OgcXRW2nbVV/+jRniW2sr3WdvW+jt6HjtoFYXVw+OGH8/HHH3POOecwf/58tt9+e+69995ScqZ33323zS+6V111FYVCgS996UttznPuuefyox/9aG0OXRAEYTm23x7++U8YMQKS0tE77AAPPbR8348+8gt4ofr2297iGgTegnvwwWAM3HYbvPaab3/5ZRgzZi1NpoeRiNZdd4WWFp+Z+Yc/hBXwVBW6ARGtvZBK0VG5r7NjEmq56Faet9LluNLFt9pSWy1KE5fVjqyx1WNoE3uKAWdQlfGqpezARXfYYqxr+RzJ+Sze9hqQiNBKQZpYVMv6VCehrOXarcrhVIxT5RI7xXev7bZTpW3/vqrl3t/OLJXVbZXtle9VR6JzZS2plXQkeKvX7bW1t92epbVWmyCsDk455ZR23YFnzZrV5vXblf5hgiAIPZDx49u+/spXvGj9whe81XWTTbwQff113/7EEzBgAEye7GvCGgM779z2HK++6tdXXAGXX77Gp9DjsLYs/C+6CKZM6d7xCF1HRGsvo5ao6czKWtm3kiRutfIclYK0vfqs7bkTr6horeyblLVxSckaH9ZadAPWKKuwCkKVKroHKyzW98OiXFBcp0ruv7YihhXnSrGuSZUb50DhS+cUo3JxOsapmLZuwGWRWhasle+prthevixMtSA0xiy3v3JJfgzoSFBWtif3sr32rojWWs9JR+K0q8K1K22CIAiCIHTOscd6C+z220Oxsh3jxsEf/+gF2FZb+X2vvgqvvAL77df+ua64Ap5/Hq68Erbddk2PfO3z73/DCy/AFlt48X766XDuuV7Uf/IJNDXBxIndPUphRRDR2kOoFnWdUS02allIK8/bnotupbtcddKk6nN3dM7K46vP0VFMa9u1xblifVEoJloCi0IV3YDLCZY0VtmimDXLJV5aLhFTMabVGdcmIVMbSysapdMolaK2aIVEoHZEZy6yYRh22CfJDFy9f2XddCvvcWduv5XHdCZaq4PnuyJga1mCa7VVj7sWnYlfEceCIAhCX0JrqM4Zd8ghfqlk9Gi/fPOb8JvfwNe+BoWCj42t5LHH4OKL4YYb1uy41zatrV6QLlniXw8dCh9/DEcdVY5b3X1370ot9B5EtPZgqkVprf0rSrVVttI62J6LarJdq8ZqrfNXn6OWaK2OkfVofH1WbwUNimVogorzW2vRRREZoHEaAgJwbrmoU1U5B13cDlRZqBYvVLldPkmtHxG69r53dn+6KsY6E3tdFXy1+lUmg+roGu2NqSMLbVeO78o8a72uRoSrIAiCINTm17+GM8/0bsSXX14WrUcc4V2J33sP/v53WLAATjsN4tgfM3Rotw57lXnxxbJgBS9YAf77X/jxj/321Klrf1zCqiGitQ9RKRRruQ135kZcXae1vWNqCdpa7e1ZYttr9691h6K4vbF0NI7K7VoW6ur1ilq9OxtjrfYVEVvtCb2u9GlvTJ0d35X2WuPtyIK8ooJUEARBEISVR+tyRuJx48r7f/xj2HhjGD4cFi70SZs++cS3PfcczJ7t3WjvvdcflySC6i08+6xf77uvt6b+9a9wzDEwc2a5z157dcfIhFVBRGsfo5bYrI5/rGUNrWUZrd5ub19Xz9GeBbejc1bvr7Qgrox4rhTm7Z1nVUVrZ6yoqF1RwdnZNnRuae3MUtsV4dqZkK6mo+MFQRAEQVh5JkzwZV1GjPCWV6V8zOv113vButFGPknRf/4D114LTz8Nt9/ukzr94x/dPfoVY/Zsv5440WcFfvttL96nTfPuwZtuunzZIKHnI6K1h7A6v6B3ZlFtT6x2ZIFs7zqV2ysiQldEGLc3hhW1sq7I9dckXbWydiQ6a+3vipttV4RlV9tXxj24vbmINVYQBEEQ1hz9+8OcOb4MTvLR+vWve9G6yy7wf//nY1tPPx2+973ycU895cVr//49N2HT//6vT0b1xz96q3FiaR0/3s83sTZ/5SvewlpfX34PhN6DiNY+wqp8ua8UDB2Jvc5cc9sTql3drqYz4bwyojXJzNten6647q4qXTl/ZyKuVoxzR2KwK9udidAV3V4R0drZ8YIgCIIgrBpNTW1fT50K8+f7GFat4bDDvGit5tBD/efyI48EDBu2dsbaVfL58ph33hkefdTXooXlSwYBrLfe2hubsHoR0doDWJNfzttLsFQtUJN9tayt1duVr1fGxXdFRWvldi3RtipxtbX6rOr96IrldlVjWttLzNXefW3vmitrqe1KW0fn7uoYRbgKgiAIwppj+PDy9vrrezfi2bNh7729ZfLYY32bc4o776xn5529dXaffbyrcXdz333l7aVLYYcd/PaoUX4R+g4iWvsQiRioFdda2aeztmo6czeudXxyTGfJoGplE65u6+i46u3O2itrybbXd1XdhVdHIqeVEXyVx66qJbajttUlXDtCxKogCIIgrH1mzvRuwmec4a2vZ58NH3zg2/7ylzq22sqX0tlgA5g3r1wvdm1w5ZW+Hu2ll8IXvwh//jNMn+7b9t3Xl/V56CH4zGfgssvW3riEtYOI1j5GrRjVjrLwdiSwaonO9s5Tfa72zt+VxE5dae+qhbZ6f3vjrdy3pkVrZ+2rIi5X5by19nfU3tk4VlZ4V9LZ8ykIgiAIwupjm228MEx4+21obob113fMmxfyve/5z+X//hemTIETT4Tnn4cbb4S//Q1eftm76TY3w4cfwkEHrVr8qLU+gdLSpd4CbK2Pxa3mRz/yiZeyWairk5jVvoiI1l7CinxBryUe2hNjK2vxqrW/K2JtVQVd4t67soIlOb69c6yNpEyVY1gZlFKd1nrt6nk6auusvav1Zlfm/F1FhKsgCIIgrDlSKRg0yFs2b7kFliwpf+7+859+SdhvP1i0CAYO9KI1jr119FvfWv68v/wlnH8+3H23F5u1cA5eeQUefLB2+xZbwO67w9ix5XPU16/UNIVewKp/8xWEFSARK7WWVW3v6tLZ+LrzPVgdc+jqGFZljIIgCIIgrDscf3xlqJV3vz35ZNh883KfRYv8evFiL1gBTjutXIIGvBBtbfWC9ZNPylbd6t/z777bi9/99mu7/+yzYdkyf+zrr8PVV9cWxULfQyytwlqjK2JnTbuDaq1X2dq7qqyO86/qe9HdxwuCIAiC0HvYfffy9kYbwZln+m3nwBjYYw944gm/b8wYvwwYAHfcASedBI89BpdcAlddBR9/XD7XXXd5a+phh0G/fnDzzZDLwf77+/alS8t9N9jAn6ux0S/CuoWI1nWI7hYaqyOr7uqgo2us7ZqtK8PqsHauqsV5dVitu/t5FARBEAShaygFv/3tIs46ayC/+Y1qsz8M4dxz4ctfhssvh6OO8m0ffeQTI82e7UXsRx+1PafWXvBut11537hx3pJbzaOPwq67rv55Cb0HEa3CWqO3iJQ1Pc7eYGntLfdKEARBEIS1wwEH5Jk+3aH18t8Rpk4tuwcnjBjhY1e/8Q0vWAcMgF/8wmf9ffll+MEP4IQT2h7T0uLXU6bAppv68jrQftyrsO4gonUdoa+IkFWdR3e7Bq+ta3TG6kjk1BkijAVBEARh3eaoo7xr8V/+4svSbLZZ2RIL8MYb8POf+6RKr7wCjz8Ob74JRx/tXYNfftkfX1fXXTMQegoiWoW1SncLlc6u3xuyB68uuvteCIIgCILQ9xk9Gr797dptl10GO+4IkyZ5N+M99vALwNCh8Nxza2+cQs9GRKuwTtETrJwiFgVBEARBEHz86pFHdvcohN6AiFZhtdEbMtKKaF17rCvzFARBEARBENYsUqdVEARBEARBEARB6LGIaBUEQRAEQRAEQRB6LOIeLKxTiMuqIAiCIAiCIPQuxNIqCIIgCIIgCIIg9FjWaUtrZVKelqSasSAI6xyV//57QrIuQRAEQRAEocw6LVpbW1tL28OHD+/GkQiC0FNobW2lsbGxu4ch9AKSHziWLl262s5prWXZsmXU1dWhdd92hpK59k1krn0TmWvfpLvnmnx+dsVgsE6LVkEQBEFYWZYtWwbAhhtu2M0jEQRBEITey7JlyxgwYECHfZRbh33hrLV88sknADQ0NEiSHkFYR3HOlTwvhgwZ0ud/WRVWD9ZaPvjgA5qamlbb58fSpUvZcMMNee+99+jfv/9qOWdPRebaN5G59k1krn2T7p6rc45ly5YxatSoTr97rdOWVq01w4YN6+5hCILQAxCXYGFF0VqzwQYbrJFz9+/fv89/WUqQufZNZK59E5lr36Q759qZhTVBzAmCIAiCIAiCIAhCj0VEqyAIgiAIgiAIgtBjEdEqCIIgCD2ETCbDueeeSyaT6e6hrHFkrn0TmWvfRObaN+lNc12nEzEJgiAIgiAIgiAIPRuxtAqCIAiCIAiCIAg9FhGtgiAIgiAIgiAIQo9FRKsgCIIgCIIgCILQYxHRKgiCIAg9gCuvvJKNN96Yuro6Jk2axD//+c/uHtIq86Mf/QilVJtlyy23LLXncjlOPvlk1ltvPRobGzn00EP56KOPunHEXeexxx7jgAMOYNSoUSiluPPOO9u0O+c455xzGDlyJPX19UydOpU333yzTZ9PP/2UI488kv79+zNw4ECmT59Oc3PzWpxF1+lsvsccc8xy93qfffZp06c3zPfiiy9mp512oqmpiWHDhnHQQQcxZ86cNn268ty+++677LfffjQ0NDBs2DDOPPNM4jhem1PplK7Mdffdd1/uvp544olt+vSGuV511VWMHTu2VI908uTJ3HPPPaX2vnJPofO59tZ7KqJVEARBELqZW265hdNPP51zzz2X5557jnHjxjFt2jQWLFjQ3UNbZbbZZhs+/PDD0vLEE0+U2r7zne/wt7/9jdtuu41HH32UDz74gEMOOaQbR9t1WlpaGDduHFdeeWXN9ssuu4xf/OIXXH311TzzzDP069ePadOmkcvlSn2OPPJIXn31VR544AHuuusuHnvsMU444YS1NYUVorP5Auyzzz5t7vVNN93Upr03zPfRRx/l5JNP5umnn+aBBx4giiL23ntvWlpaSn06e26NMey3334UCgX+8Y9/8Ic//IGZM2dyzjnndMeU2qUrcwU4/vjj29zXyy67rNTWW+a6wQYbcMkll/Dss88ye/ZsPv/5z3PggQfy6quvAn3nnkLnc4Veek+dIAiCIAjdysSJE93JJ59cem2McaNGjXIXX3xxN45q1Tn33HPduHHjarYtXrzYpVIpd9ttt5X2vf766w5wTz311Foa4eoBcHfccUfptbXWjRgxwv3kJz8p7Vu8eLHLZDLupptucs4599prrznA/etf/yr1ueeee5xSyr3//vtrbewrQ/V8nXPu6KOPdgceeGC7x/TW+S5YsMAB7tFHH3XOde25/fvf/+601m7+/PmlPldddZXr37+/y+fza3cCK0D1XJ1zbrfddnOnnXZau8f01rk659ygQYPctdde26fvaUIyV+d67z0VS6sgCIIgdCOFQoFnn32WqVOnlvZprZk6dSpPPfVUN45s9fDmm28yatQoNtlkE4488kjeffddAJ599lmiKGoz7y233JLRo0f3+nnPmzeP+fPnt5nbgAEDmDRpUmluTz31FAMHDmTChAmlPlOnTkVrzTPPPLPWx7w6mDVrFsOGDWOLLbbgpJNOYuHChaW23jrfJUuWADB48GCga8/tU089xXbbbcfw4cNLfaZNm8bSpUvbWLt6GtVzTbjhhhsYMmQI2267LTNmzKC1tbXU1hvnaozh5ptvpqWlhcmTJ/fpe1o914TeeE/DbruyIAiCIAh88sknGGPafEEAGD58OG+88UY3jWr1MGnSJGbOnMkWW2zBhx9+yHnnncfnPvc5XnnlFebPn086nWbgwIFtjhk+fDjz58/vngGvJpLx17qnSdv8+fMZNmxYm/YwDBk8eHCvnP8+++zDIYccwpgxY5g7dy4/+MEP2HfffXnqqacIgqBXztday//7f/+PKVOmsO222wJ06bmdP39+zXuftPVEas0V4Ktf/SobbbQRo0aN4qWXXuL73/8+c+bM4fbbbwd611xffvllJk+eTC6Xo7GxkTvuuIOtt96aF154oc/d0/bmCr33nopoFQRBEARhjbDvvvuWtseOHcukSZPYaKONuPXWW6mvr+/GkQmrm6985Sul7e22246xY8ey6aabMmvWLPbcc89uHNnKc/LJJ/PKK6+0icPuq7Q318qY4+22246RI0ey5557MnfuXDbddNO1PcxVYosttuCFF15gyZIl/PnPf+boo4/m0Ucf7e5hrRHam+vWW2/da++puAcLgiAIQjcyZMgQgiBYLlPlRx99xIgRI7ppVGuGgQMH8pnPfIa33nqLESNGUCgUWLx4cZs+fWHeyfg7uqcjRoxYLtFWHMd8+umnvX7+AJtssglDhgzhrbfeAnrffE855RTuuusuHnnkETbYYIPS/q48tyNGjKh575O2nkZ7c63FpEmTANrc194y13Q6zWabbcb48eO5+OKLGTduHFdccUWfvKftzbUWveWeimgVBEEQhG4knU4zfvx4HnroodI+ay0PPfRQmxikvkBzczNz585l5MiRjB8/nlQq1Wbec+bM4d133+318x4zZgwjRoxoM7elS5fyzDPPlOY2efJkFi9ezLPPPlvq8/DDD2OtLX2J7M3897//ZeHChYwcORLoPfN1znHKKadwxx138PDDDzNmzJg27V15bidPnszLL7/cRqQ/8MAD9O/fv+Si2RPobK61eOGFFwDa3NfeMNdaWGvJ5/N96p62RzLXWvSae9ptKaAEQRAEQXDOOXfzzTe7TCbjZs6c6V577TV3wgknuIEDB7bJ3tgb+e53v+tmzZrl5s2b55588kk3depUN2TIELdgwQLnnHMnnniiGz16tHv44Yfd7Nmz3eTJk93kyZO7edRdY9myZe755593zz//vAPcz3/+c/f888+7d955xznn3CWXXOIGDhzo/vKXv7iXXnrJHXjggW7MmDEum82WzrHPPvu4HXbYwT3zzDPuiSeecJtvvrk74ogjumtKHdLRfJctW+bOOOMM99RTT7l58+a5Bx980O24445u8803d7lcrnSO3jDfk046yQ0YMMDNmjXLffjhh6WltbW11Kez5zaOY7ftttu6vffe273wwgvu3nvvdUOHDnUzZszojim1S2dzfeutt9z555/vZs+e7ebNm+f+8pe/uE022cTtuuuupXP0lrmeddZZ7tFHH3Xz5s1zL730kjvrrLOcUsrdf//9zrm+c0+d63iuvfmeimgVBEEQhB7AL3/5Szd69GiXTqfdxIkT3dNPP93dQ1plDj/8cDdy5EiXTqfd+uuv7w4//HD31ltvldqz2az71re+5QYNGuQaGhrcwQcf7D788MNuHHHXeeSRRxyw3HL00Uc753zZmx/+8Idu+PDhLpPJuD333NPNmTOnzTkWLlzojjjiCNfY2Oj69+/vvvGNb7hly5Z1w2w6p6P5tra2ur333tsNHTrUpVIpt9FGG7njjz9+uR9desN8a80RcL///e9Lfbry3L799ttu3333dfX19W7IkCHuu9/9rouiaC3PpmM6m+u7777rdt11Vzd48GCXyWTcZptt5s4880y3ZMmSNufpDXM99thj3UYbbeTS6bQbOnSo23PPPUuC1bm+c0+d63iuvfmeKuecW3t2XUEQBEEQBEEQBEHoOhLTKgiCIAiCIAiCIPRYRLQKgiAIgiAIgiAIPRYRrYIgCIIgCIIgCEKPRUSrIAiCIAiCIAiC0GMR0SoIgiAIgiAIgiD0WES0CoIgCIIgCIIgCD0WEa2CIAiCIAiCIAhCj0VEqyAIgiAIgiAIgtBjEdEqCIIgCIIgrDWOOeYYDjrooFU+z5w5cxgxYgTLli1rt8/MmTMZOHDgKl+rJ3D11VdzwAEHdPcwBKFbENEqCIIgCIIgAPDUU08RBAH77bdfdw+lU2bMmMG3v/1tmpqaunsoa4Vjjz2W5557jscff7y7hyIIax0RrYIgCIIgCAIA1113Hd/+9rd57LHH+OCDD7p7OO3y7rvvctddd3HMMcd091AAiKJojV8jnU7z1a9+lV/84hdr/FqC0NMQ0SoIgiAIgiDQ3NzMLbfcwkknncR+++3HzJkz27TPmjULpRQPPfQQEyZMoKGhgZ133pk5c+a06XfBBRcwbNgwmpqaOO644zjrrLPYfvvt272utZaLL76YMWPGUF9fz7hx4/jzn//c4VhvvfVWxo0bx/rrr99m/8yZMxk9ejQNDQ0cfPDBLFy4cLlj//KXv7DjjjtSV1fHJptswnnnnUccx6X2N954g1122YW6ujq23nprHnzwQZRS3HnnnQC8/fbbKKW45ZZb2G233airq+OGG24A4Nprr2Wrrbairq6OLbfckl//+tdtrv3ee+9x2GGHMXDgQAYPHsyBBx7I22+/3eY9njhxIv369WPgwIFMmTKFd955p9R+wAEH8Ne//pVsNtvh+yMIfQ0RrYIgCIIgCAK33norW265JVtssQVf+9rX+N3vfodzbrl+//M//8PPfvYzZs+eTRiGHHvssaW2G264gQsvvJBLL72UZ599ltGjR3PVVVd1eN2LL76Y66+/nquvvppXX32V73znO3zta1/j0UcfbfeYxx9/nAkTJrTZ98wzzzB9+nROOeUUXnjhBfbYYw8uuOCC5Y476qijOO2003jttdf4zW9+w8yZM7nwwgsBMMZw0EEH0dDQwDPPPMNvf/tb/ud//qfmGM466yxOO+00Xn/9daZNm8YNN9zAOeecw4UXXsjrr7/ORRddxA9/+EP+8Ic/AN4aO23aNJqamnj88cd58sknaWxsZJ999qFQKBDHMQcddBC77bYbL730Ek899RQnnHACSqnSNSdMmEAcxzzzzDMdvqeC0OdwgiAIgiAIwjrPzjvv7C6//HLnnHNRFLkhQ4a4Rx55pNT+yCOPOMA9+OCDpX133323A1w2m3XOOTdp0iR38skntznvlClT3Lhx40qvjz76aHfggQc655zL5XKuoaHB/eMf/2hzzPTp090RRxzR7ljHjRvnzj///Db7jjjiCPeFL3yhzb7DDz/cDRgwoPR6zz33dBdddFGbPn/84x/dyJEjnXPO3XPPPS4MQ/fhhx+W2h944AEHuDvuuMM559y8efMcUHqvEjbddFN34403ttn34x//2E2ePLl0nS222MJZa0vt+Xze1dfXu/vuu88tXLjQAW7WrFntzts55wYNGuRmzpzZYR9B6GuIpVUQBEEQBGEdZ86cOfzzn//kiCOOACAMQw4//HCuu+665fqOHTu2tD1y5EgAFixYUDrPxIkT2/Svfl3JW2+9RWtrK3vttReNjY2l5frrr2fu3LntHpfNZqmrq2uz7/XXX2fSpElt9k2ePLnN6xdffJHzzz+/zbWOP/54PvzwQ1pbW5kzZw4bbrghI0aM6HT8lZbelpYW5s6dy/Tp09uc+4ILLijN48UXX+Stt96iqamp1D548GByuRxz585l8ODBHHPMMUybNo0DDjiAK664gg8//HC569bX19Pa2trueyMIfZGwuwcgCIIgCIIgdC/XXXcdcRwzatSo0j7nHJlMhl/96lcMGDCgtD+VSpW2E9dVa+1KXbe5uRmAu+++e7n41Ewm0+5xQ4YMYdGiRSt1vfPOO49DDjlkubZqEdwZ/fr1a3NegGuuuWY54RwEQanP+PHjS/GvlQwdOhSA3//+95x66qnce++93HLLLZx99tk88MADfPazny31/fTTT0v9BWFdQUSrIAiCIAjCOkwcx1x//fX87Gc/Y++9927TdtBBB3HTTTdx4okndulcW2yxBf/617846qijSvv+9a9/tdt/6623JpPJ8O6777Lbbrt1ecw77LADr732Wpt9W2211XKxnk8//XSb1zvuuCNz5sxhs802a3f87733Hh999BHDhw/vdPwJw4cPZ9SoUfznP//hyCOPrNlnxx135JZbbmHYsGH079+/w7ntsMMOzJgxg8mTJ3PjjTeWROvcuXPJ5XLssMMOnY5JEPoSIloFQRAEQRDWYe666y4WLVrE9OnT21hUAQ499FCuu+66LovWb3/72xx//PFMmDCBnXfemVtuuYWXXnqJTTbZpGb/pqYmzjjjDL7zne9grWWXXXZhyZIlPPnkk/Tv35+jjz665nHTpk3juOOOwxhTsmSeeuqpTJkyhZ/+9KcceOCB3Hfffdx7771tjjvnnHPYf//9GT16NF/60pfQWvPiiy/yyiuvcMEFF7DXXnux6aabcvTRR3PZZZexbNkyzj77bIA2CZFqcd5553HqqacyYMAA9tlnH/L5PLNnz2bRokWcfvrpHHnkkfzkJz/hwAMP5Pzzz2eDDTbgnXfe4fbbb+d73/seURTx29/+li9+8YuMGjWKOXPm8Oabb7b5AeDxxx9nk002YdNNN+3S/RCEvoLEtAqCIAiCIKzDXHfddUydOnU5wQpetM6ePZuXXnqpS+c68sgjmTFjBmeccQY77rgj8+bN45hjjunQ9fbHP/4xP/zhD7n44ovZaqut2Geffbj77rsZM2ZMu8fsu+++hGHIgw8+WNr32c9+lmuuuYYrrriCcePGcf/995cEZ8K0adO46667uP/++9lpp5347Gc/y//+7/+y0UYbAd6V984776S5uZmddtqJ4447rpQ9uDP34eOOO45rr72W3//+92y33XbstttuzJw5szSPhoYGHnvsMUaPHs0hhxzCVlttxfTp08nlcvTv35+GhgbeeOMNDj30UD7zmc9wwgkncPLJJ/PNb36zdI2bbrqJ448/vsNxCEJfRDlXI5e5IAiCIAiCIKwG9tprL0aMGMEf//jH1XreK6+8kr/+9a/cd999q/W81Tz55JPssssuvPXWW91q4Xz11Vf5/Oc/z7///e+aPzAIQl9G3IMFQRAEQRCE1UJraytXX30106ZNIwgCbrrpJh588EEeeOCB1X6tb37zmyxevJhly5bR1NS02s57xx130NjYyOabb85bb73FaaedxpQpU7rdJffDDz/k+uuvF8EqrJOIpVUQBEEQBEFYLWSzWQ444ACef/55crkcW2yxBWeffXbNbL09leuvv54LLriAd999lyFDhjB16lR+9rOfsd5663X30ARhnUVEqyAIgiAIgiAIgtBjkURMgiAIgiAIgiAIQo9FRKsgCIIgCIIgCILQYxHRKgiCIAiCIAiCIPRYRLQKgiAIgiAIgiAIPRYRrYIgCIIgCIIgCEKPRUSrIAiCIAiCIAiC0GMR0SoIgiAIgiAIgiD0WES0CoIgCIIgCIIgCD0WEa2CIAiCIAiCIAhCj+X/A6bx9kQDrwlUAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# image_paths = ['./test_demo/lion.jpg']\n", + "# image_paths = ['./test_demo/bowl.jpg']\n", + "# save_path = './test_demo_output/bowl.jpg'\n", + "# image_paths = ['./test_demo/coin2.jpg', './test_demo/coin3.jpg']\n", + "# image_paths = ['./test_demo/coin2.jpg']\n", + "# save_path = './test_demo_output/coin.jpg'\n", + "\n", + "# image_paths = ['./test_demo/F22-0.jpg', './test_demo/F22-1.jpg']\n", + "# save_path = './test_demo_output/F22.jpg'\n", + "\n", + "# image_paths = ['./test_demo/F22-1.jpg']\n", + "# save_path = './test_demo_output/F22.jpg'\n", + "\n", + "# image_paths = ['./test_demo/handbag6.jpg']\n", + "# save_path = './test_demo_output/handbag.jpg'\n", + "\n", + "# image_paths = ['./test_demo/bottle.jpg']\n", + "# save_path = './test_demo_output/bottle.jpg'\n", + "\n", + "image_paths = ['./test_demo/apple.jpg']\n", + "save_path = './test_demo_output/apple.jpg'\n", + "\n", + "# image_paths = ['./test_demo/pot.jpg', './test_demo/pot2.jpg']\n", + "# save_path = './test_demo_output/pot.jpg'\n", + "\n", + "if len(image_paths) == 1:\n", + " ori_az, ori_el, ori_ro, alpha_pred, pose_enc = ori_single(image_paths[0], True, False)\n", + " titles = [f'azi:{ori_az} ele:{ori_el} rot:{ori_ro} alpha:{alpha_pred}']\n", + "else:\n", + " ori_az, ori_el, ori_ro, alpha_pred, rel_az, rel_el, rel_ro, pose_enc = ref_single(image_paths[0], image_paths[1], False, False)\n", + " titles = [f'azi:{ori_az} ele:{ori_el} rot:{ori_ro} alpha:{alpha_pred}', f'azi:{rel_az} ele:{rel_el} rot:{rel_ro}']\n", + "\n", + "vis_distribution(image_paths, F.sigmoid(pose_enc), titles, save_path)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "OriAnyV2", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.13" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/paths.py b/paths.py new file mode 100644 index 0000000000000000000000000000000000000000..4b50f6c4299fd4229296c7fcab7572e17f868047 --- /dev/null +++ b/paths.py @@ -0,0 +1,16 @@ +DINO_SMALL = "facebook/dinov2-small" +DINO_BASE = "facebook/dinov2-base" +DINO_LARGE = "facebook/dinov2-large" +DINO_GIANT = "facebook/dinov2-giant" + +VGGT_1B = "facebook/VGGT-1B" + +ORIANY_V2 = "Viglong/OriAnyV2_ckpt" + +REMOTE_CKPT_PATH = "demo_ckpts/acc8mask20lowlr.pt" + + +RENDER_FILE = "assets/axis_render.blend" +REF_AXIS_IMAGE = "assets/axis_ref.png" +TGT_AXIS_IMAGE = "assets/axis_tgt.png" + diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..26e63b739c8ed1d32809f07d4687b572058009b0 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,22 @@ +matplotlib +pydantic==2.10.6 +gradio==5.9.0 +onnxruntime +rembg +accelerate==1.8.1 +numpy>=1.24 +einops +pandas +pillow +huggingface_hub>=0.23 +pytorch-lightning +scipy +torch +torchmetrics +torchvision +tqdm +transformers +scikit-learn +opencv-python +timm +bpy==4.2 diff --git a/vggt/heads/camera_head.py b/vggt/heads/camera_head.py new file mode 100644 index 0000000000000000000000000000000000000000..176d76fb5baeb3a42fa3675a1d1fb14010f2904d --- /dev/null +++ b/vggt/heads/camera_head.py @@ -0,0 +1,162 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import math +import numpy as np + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from vggt.layers import Mlp +from vggt.layers.block import Block +from vggt.heads.head_act import activate_pose + + +class CameraHead(nn.Module): + """ + CameraHead predicts camera parameters from token representations using iterative refinement. + + It applies a series of transformer blocks (the "trunk") to dedicated camera tokens. + """ + + def __init__( + self, + dim_in: int = 2048, + trunk_depth: int = 4, + pose_encoding_type: str = "absT_quaR_FoV", + num_heads: int = 16, + mlp_ratio: int = 4, + init_values: float = 0.01, + trans_act: str = "linear", + quat_act: str = "linear", + fl_act: str = "relu", # Field of view activations: ensures FOV values are positive. + ): + super().__init__() + + if pose_encoding_type == "absT_quaR_FoV": + self.target_dim = 9 + else: + raise ValueError(f"Unsupported camera encoding type: {pose_encoding_type}") + + self.trans_act = trans_act + self.quat_act = quat_act + self.fl_act = fl_act + self.trunk_depth = trunk_depth + + # Build the trunk using a sequence of transformer blocks. + self.trunk = nn.Sequential( + *[ + Block( + dim=dim_in, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + init_values=init_values, + ) + for _ in range(trunk_depth) + ] + ) + + # Normalizations for camera token and trunk output. + self.token_norm = nn.LayerNorm(dim_in) + self.trunk_norm = nn.LayerNorm(dim_in) + + # Learnable empty camera pose token. + self.empty_pose_tokens = nn.Parameter(torch.zeros(1, 1, self.target_dim)) + self.embed_pose = nn.Linear(self.target_dim, dim_in) + + # Module for producing modulation parameters: shift, scale, and a gate. + self.poseLN_modulation = nn.Sequential(nn.SiLU(), nn.Linear(dim_in, 3 * dim_in, bias=True)) + + # Adaptive layer normalization without affine parameters. + self.adaln_norm = nn.LayerNorm(dim_in, elementwise_affine=False, eps=1e-6) + self.pose_branch = Mlp( + in_features=dim_in, + hidden_features=dim_in // 2, + out_features=self.target_dim, + drop=0, + ) + + def forward(self, aggregated_tokens_list: list, num_iterations: int = 4) -> list: + """ + Forward pass to predict camera parameters. + + Args: + aggregated_tokens_list (list): List of token tensors from the network; + the last tensor is used for prediction. + num_iterations (int, optional): Number of iterative refinement steps. Defaults to 4. + + Returns: + list: A list of predicted camera encodings (post-activation) from each iteration. + """ + # Use tokens from the last block for camera prediction. + tokens = aggregated_tokens_list[-1] + + # Extract the camera tokens + pose_tokens = tokens[:, :, 0] + pose_tokens = self.token_norm(pose_tokens) + + pred_pose_enc_list = self.trunk_fn(pose_tokens, num_iterations) + return pred_pose_enc_list + + def trunk_fn(self, pose_tokens: torch.Tensor, num_iterations: int) -> list: + """ + Iteratively refine camera pose predictions. + + Args: + pose_tokens (torch.Tensor): Normalized camera tokens with shape [B, 1, C]. + num_iterations (int): Number of refinement iterations. + + Returns: + list: List of activated camera encodings from each iteration. + """ + B, S, C = pose_tokens.shape # S is expected to be 1. + pred_pose_enc = None + pred_pose_enc_list = [] + + for _ in range(num_iterations): + # Use a learned empty pose for the first iteration. + if pred_pose_enc is None: + module_input = self.embed_pose(self.empty_pose_tokens.expand(B, S, -1)) + else: + # Detach the previous prediction to avoid backprop through time. + pred_pose_enc = pred_pose_enc.detach() + module_input = self.embed_pose(pred_pose_enc) + + # Generate modulation parameters and split them into shift, scale, and gate components. + shift_msa, scale_msa, gate_msa = self.poseLN_modulation(module_input).chunk(3, dim=-1) + + # Adaptive layer normalization and modulation. + pose_tokens_modulated = gate_msa * modulate(self.adaln_norm(pose_tokens), shift_msa, scale_msa) + pose_tokens_modulated = pose_tokens_modulated + pose_tokens + + pose_tokens_modulated = self.trunk(pose_tokens_modulated) + # Compute the delta update for the pose encoding. + pred_pose_enc_delta = self.pose_branch(self.trunk_norm(pose_tokens_modulated)) + + if pred_pose_enc is None: + pred_pose_enc = pred_pose_enc_delta + else: + pred_pose_enc = pred_pose_enc + pred_pose_enc_delta + + # Apply final activation functions for translation, quaternion, and field-of-view. + activated_pose = activate_pose( + pred_pose_enc, + trans_act=self.trans_act, + quat_act=self.quat_act, + fl_act=self.fl_act, + ) + pred_pose_enc_list.append(activated_pose) + + return pred_pose_enc_list + + +def modulate(x: torch.Tensor, shift: torch.Tensor, scale: torch.Tensor) -> torch.Tensor: + """ + Modulate the input tensor using scaling and shifting parameters. + """ + # modified from https://github.com/facebookresearch/DiT/blob/796c29e532f47bba17c5b9c5eb39b9354b8b7c64/models.py#L19 + return x * (1 + scale) + shift diff --git a/vggt/heads/dpt_head.py b/vggt/heads/dpt_head.py new file mode 100644 index 0000000000000000000000000000000000000000..fa5cf6e1cee2eb7cb2ad9538d0d168f97a590382 --- /dev/null +++ b/vggt/heads/dpt_head.py @@ -0,0 +1,497 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + + +# Inspired by https://github.com/DepthAnything/Depth-Anything-V2 + + +import os +from typing import List, Dict, Tuple, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F +from .head_act import activate_head +from .utils import create_uv_grid, position_grid_to_embed + + +class DPTHead(nn.Module): + """ + DPT Head for dense prediction tasks. + + This implementation follows the architecture described in "Vision Transformers for Dense Prediction" + (https://arxiv.org/abs/2103.13413). The DPT head processes features from a vision transformer + backbone and produces dense predictions by fusing multi-scale features. + + Args: + dim_in (int): Input dimension (channels). + patch_size (int, optional): Patch size. Default is 14. + output_dim (int, optional): Number of output channels. Default is 4. + activation (str, optional): Activation type. Default is "inv_log". + conf_activation (str, optional): Confidence activation type. Default is "expp1". + features (int, optional): Feature channels for intermediate representations. Default is 256. + out_channels (List[int], optional): Output channels for each intermediate layer. + intermediate_layer_idx (List[int], optional): Indices of layers from aggregated tokens used for DPT. + pos_embed (bool, optional): Whether to use positional embedding. Default is True. + feature_only (bool, optional): If True, return features only without the last several layers and activation head. Default is False. + down_ratio (int, optional): Downscaling factor for the output resolution. Default is 1. + """ + + def __init__( + self, + dim_in: int, + patch_size: int = 14, + output_dim: int = 4, + activation: str = "inv_log", + conf_activation: str = "expp1", + features: int = 256, + out_channels: List[int] = [256, 512, 1024, 1024], + intermediate_layer_idx: List[int] = [4, 11, 17, 23], + pos_embed: bool = True, + feature_only: bool = False, + down_ratio: int = 1, + ) -> None: + super(DPTHead, self).__init__() + self.patch_size = patch_size + self.activation = activation + self.conf_activation = conf_activation + self.pos_embed = pos_embed + self.feature_only = feature_only + self.down_ratio = down_ratio + self.intermediate_layer_idx = intermediate_layer_idx + + self.norm = nn.LayerNorm(dim_in) + + # Projection layers for each output channel from tokens. + self.projects = nn.ModuleList( + [ + nn.Conv2d( + in_channels=dim_in, + out_channels=oc, + kernel_size=1, + stride=1, + padding=0, + ) + for oc in out_channels + ] + ) + + # Resize layers for upsampling feature maps. + self.resize_layers = nn.ModuleList( + [ + nn.ConvTranspose2d( + in_channels=out_channels[0], out_channels=out_channels[0], kernel_size=4, stride=4, padding=0 + ), + nn.ConvTranspose2d( + in_channels=out_channels[1], out_channels=out_channels[1], kernel_size=2, stride=2, padding=0 + ), + nn.Identity(), + nn.Conv2d( + in_channels=out_channels[3], out_channels=out_channels[3], kernel_size=3, stride=2, padding=1 + ), + ] + ) + + self.scratch = _make_scratch( + out_channels, + features, + expand=False, + ) + + # Attach additional modules to scratch. + self.scratch.stem_transpose = None + self.scratch.refinenet1 = _make_fusion_block(features) + self.scratch.refinenet2 = _make_fusion_block(features) + self.scratch.refinenet3 = _make_fusion_block(features) + self.scratch.refinenet4 = _make_fusion_block(features, has_residual=False) + + head_features_1 = features + head_features_2 = 32 + + if feature_only: + self.scratch.output_conv1 = nn.Conv2d(head_features_1, head_features_1, kernel_size=3, stride=1, padding=1) + else: + self.scratch.output_conv1 = nn.Conv2d( + head_features_1, head_features_1 // 2, kernel_size=3, stride=1, padding=1 + ) + conv2_in_channels = head_features_1 // 2 + + self.scratch.output_conv2 = nn.Sequential( + nn.Conv2d(conv2_in_channels, head_features_2, kernel_size=3, stride=1, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(head_features_2, output_dim, kernel_size=1, stride=1, padding=0), + ) + + def forward( + self, + aggregated_tokens_list: List[torch.Tensor], + images: torch.Tensor, + patch_start_idx: int, + frames_chunk_size: int = 8, + ) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: + """ + Forward pass through the DPT head, supports processing by chunking frames. + Args: + aggregated_tokens_list (List[Tensor]): List of token tensors from different transformer layers. + images (Tensor): Input images with shape [B, S, 3, H, W], in range [0, 1]. + patch_start_idx (int): Starting index for patch tokens in the token sequence. + Used to separate patch tokens from other tokens (e.g., camera or register tokens). + frames_chunk_size (int, optional): Number of frames to process in each chunk. + If None or larger than S, all frames are processed at once. Default: 8. + + Returns: + Tensor or Tuple[Tensor, Tensor]: + - If feature_only=True: Feature maps with shape [B, S, C, H, W] + - Otherwise: Tuple of (predictions, confidence) both with shape [B, S, 1, H, W] + """ + B, S, _, H, W = images.shape + + # If frames_chunk_size is not specified or greater than S, process all frames at once + if frames_chunk_size is None or frames_chunk_size >= S: + return self._forward_impl(aggregated_tokens_list, images, patch_start_idx) + + # Otherwise, process frames in chunks to manage memory usage + assert frames_chunk_size > 0 + + # Process frames in batches + all_preds = [] + all_conf = [] + + for frames_start_idx in range(0, S, frames_chunk_size): + frames_end_idx = min(frames_start_idx + frames_chunk_size, S) + + # Process batch of frames + if self.feature_only: + chunk_output = self._forward_impl( + aggregated_tokens_list, images, patch_start_idx, frames_start_idx, frames_end_idx + ) + all_preds.append(chunk_output) + else: + chunk_preds, chunk_conf = self._forward_impl( + aggregated_tokens_list, images, patch_start_idx, frames_start_idx, frames_end_idx + ) + all_preds.append(chunk_preds) + all_conf.append(chunk_conf) + + # Concatenate results along the sequence dimension + if self.feature_only: + return torch.cat(all_preds, dim=1) + else: + return torch.cat(all_preds, dim=1), torch.cat(all_conf, dim=1) + + def _forward_impl( + self, + aggregated_tokens_list: List[torch.Tensor], + images: torch.Tensor, + patch_start_idx: int, + frames_start_idx: int = None, + frames_end_idx: int = None, + ) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: + """ + Implementation of the forward pass through the DPT head. + + This method processes a specific chunk of frames from the sequence. + + Args: + aggregated_tokens_list (List[Tensor]): List of token tensors from different transformer layers. + images (Tensor): Input images with shape [B, S, 3, H, W]. + patch_start_idx (int): Starting index for patch tokens. + frames_start_idx (int, optional): Starting index for frames to process. + frames_end_idx (int, optional): Ending index for frames to process. + + Returns: + Tensor or Tuple[Tensor, Tensor]: Feature maps or (predictions, confidence). + """ + if frames_start_idx is not None and frames_end_idx is not None: + images = images[:, frames_start_idx:frames_end_idx].contiguous() + + B, S, _, H, W = images.shape + + patch_h, patch_w = H // self.patch_size, W // self.patch_size + + out = [] + dpt_idx = 0 + + for layer_idx in self.intermediate_layer_idx: + x = aggregated_tokens_list[layer_idx][:, :, patch_start_idx:] + + # Select frames if processing a chunk + if frames_start_idx is not None and frames_end_idx is not None: + x = x[:, frames_start_idx:frames_end_idx] + + x = x.view(B * S, -1, x.shape[-1]) + + x = self.norm(x) + + x = x.permute(0, 2, 1).reshape((x.shape[0], x.shape[-1], patch_h, patch_w)) + + x = self.projects[dpt_idx](x) + if self.pos_embed: + x = self._apply_pos_embed(x, W, H) + x = self.resize_layers[dpt_idx](x) + + out.append(x) + dpt_idx += 1 + + # Fuse features from multiple layers. + out = self.scratch_forward(out) + # Interpolate fused output to match target image resolution. + out = custom_interpolate( + out, + (int(patch_h * self.patch_size / self.down_ratio), int(patch_w * self.patch_size / self.down_ratio)), + mode="bilinear", + align_corners=True, + ) + + if self.pos_embed: + out = self._apply_pos_embed(out, W, H) + + if self.feature_only: + return out.view(B, S, *out.shape[1:]) + + out = self.scratch.output_conv2(out) + preds, conf = activate_head(out, activation=self.activation, conf_activation=self.conf_activation) + + preds = preds.view(B, S, *preds.shape[1:]) + conf = conf.view(B, S, *conf.shape[1:]) + return preds, conf + + def _apply_pos_embed(self, x: torch.Tensor, W: int, H: int, ratio: float = 0.1) -> torch.Tensor: + """ + Apply positional embedding to tensor x. + """ + patch_w = x.shape[-1] + patch_h = x.shape[-2] + pos_embed = create_uv_grid(patch_w, patch_h, aspect_ratio=W / H, dtype=x.dtype, device=x.device) + pos_embed = position_grid_to_embed(pos_embed, x.shape[1]) + pos_embed = pos_embed * ratio + pos_embed = pos_embed.permute(2, 0, 1)[None].expand(x.shape[0], -1, -1, -1) + return x + pos_embed + + def scratch_forward(self, features: List[torch.Tensor]) -> torch.Tensor: + """ + Forward pass through the fusion blocks. + + Args: + features (List[Tensor]): List of feature maps from different layers. + + Returns: + Tensor: Fused feature map. + """ + layer_1, layer_2, layer_3, layer_4 = features + + layer_1_rn = self.scratch.layer1_rn(layer_1) + layer_2_rn = self.scratch.layer2_rn(layer_2) + layer_3_rn = self.scratch.layer3_rn(layer_3) + layer_4_rn = self.scratch.layer4_rn(layer_4) + + out = self.scratch.refinenet4(layer_4_rn, size=layer_3_rn.shape[2:]) + del layer_4_rn, layer_4 + + out = self.scratch.refinenet3(out, layer_3_rn, size=layer_2_rn.shape[2:]) + del layer_3_rn, layer_3 + + out = self.scratch.refinenet2(out, layer_2_rn, size=layer_1_rn.shape[2:]) + del layer_2_rn, layer_2 + + out = self.scratch.refinenet1(out, layer_1_rn) + del layer_1_rn, layer_1 + + out = self.scratch.output_conv1(out) + return out + + +################################################################################ +# Modules +################################################################################ + + +def _make_fusion_block(features: int, size: int = None, has_residual: bool = True, groups: int = 1) -> nn.Module: + return FeatureFusionBlock( + features, + nn.ReLU(inplace=True), + deconv=False, + bn=False, + expand=False, + align_corners=True, + size=size, + has_residual=has_residual, + groups=groups, + ) + + +def _make_scratch(in_shape: List[int], out_shape: int, groups: int = 1, expand: bool = False) -> nn.Module: + scratch = nn.Module() + out_shape1 = out_shape + out_shape2 = out_shape + out_shape3 = out_shape + if len(in_shape) >= 4: + out_shape4 = out_shape + + if expand: + out_shape1 = out_shape + out_shape2 = out_shape * 2 + out_shape3 = out_shape * 4 + if len(in_shape) >= 4: + out_shape4 = out_shape * 8 + + scratch.layer1_rn = nn.Conv2d( + in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups + ) + scratch.layer2_rn = nn.Conv2d( + in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups + ) + scratch.layer3_rn = nn.Conv2d( + in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups + ) + if len(in_shape) >= 4: + scratch.layer4_rn = nn.Conv2d( + in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups + ) + return scratch + + +class ResidualConvUnit(nn.Module): + """Residual convolution module.""" + + def __init__(self, features, activation, bn, groups=1): + """Init. + + Args: + features (int): number of features + """ + super().__init__() + + self.bn = bn + self.groups = groups + self.conv1 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups) + self.conv2 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups) + + self.norm1 = None + self.norm2 = None + + self.activation = activation + self.skip_add = nn.quantized.FloatFunctional() + + def forward(self, x): + """Forward pass. + + Args: + x (tensor): input + + Returns: + tensor: output + """ + + out = self.activation(x) + out = self.conv1(out) + if self.norm1 is not None: + out = self.norm1(out) + + out = self.activation(out) + out = self.conv2(out) + if self.norm2 is not None: + out = self.norm2(out) + + return self.skip_add.add(out, x) + + +class FeatureFusionBlock(nn.Module): + """Feature fusion block.""" + + def __init__( + self, + features, + activation, + deconv=False, + bn=False, + expand=False, + align_corners=True, + size=None, + has_residual=True, + groups=1, + ): + """Init. + + Args: + features (int): number of features + """ + super(FeatureFusionBlock, self).__init__() + + self.deconv = deconv + self.align_corners = align_corners + self.groups = groups + self.expand = expand + out_features = features + if self.expand == True: + out_features = features // 2 + + self.out_conv = nn.Conv2d( + features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=self.groups + ) + + if has_residual: + self.resConfUnit1 = ResidualConvUnit(features, activation, bn, groups=self.groups) + + self.has_residual = has_residual + self.resConfUnit2 = ResidualConvUnit(features, activation, bn, groups=self.groups) + + self.skip_add = nn.quantized.FloatFunctional() + self.size = size + + def forward(self, *xs, size=None): + """Forward pass. + + Returns: + tensor: output + """ + output = xs[0] + + if self.has_residual: + res = self.resConfUnit1(xs[1]) + output = self.skip_add.add(output, res) + + output = self.resConfUnit2(output) + + if (size is None) and (self.size is None): + modifier = {"scale_factor": 2} + elif size is None: + modifier = {"size": self.size} + else: + modifier = {"size": size} + + output = custom_interpolate(output, **modifier, mode="bilinear", align_corners=self.align_corners) + output = self.out_conv(output) + + return output + + +def custom_interpolate( + x: torch.Tensor, + size: Tuple[int, int] = None, + scale_factor: float = None, + mode: str = "bilinear", + align_corners: bool = True, +) -> torch.Tensor: + """ + Custom interpolate to avoid INT_MAX issues in nn.functional.interpolate. + """ + if size is None: + size = (int(x.shape[-2] * scale_factor), int(x.shape[-1] * scale_factor)) + + INT_MAX = 1610612736 + + input_elements = size[0] * size[1] * x.shape[0] * x.shape[1] + + if input_elements > INT_MAX: + chunks = torch.chunk(x, chunks=(input_elements // INT_MAX) + 1, dim=0) + interpolated_chunks = [ + nn.functional.interpolate(chunk, size=size, mode=mode, align_corners=align_corners) for chunk in chunks + ] + x = torch.cat(interpolated_chunks, dim=0) + return x.contiguous() + else: + return nn.functional.interpolate(x, size=size, mode=mode, align_corners=align_corners) diff --git a/vggt/heads/head_act.py b/vggt/heads/head_act.py new file mode 100644 index 0000000000000000000000000000000000000000..2dedfcf1180a653dddc99623e60df625e5897489 --- /dev/null +++ b/vggt/heads/head_act.py @@ -0,0 +1,125 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + + +import torch +import torch.nn.functional as F + + +def activate_pose(pred_pose_enc, trans_act="linear", quat_act="linear", fl_act="linear"): + """ + Activate pose parameters with specified activation functions. + + Args: + pred_pose_enc: Tensor containing encoded pose parameters [translation, quaternion, focal length] + trans_act: Activation type for translation component + quat_act: Activation type for quaternion component + fl_act: Activation type for focal length component + + Returns: + Activated pose parameters tensor + """ + T = pred_pose_enc[..., :3] + quat = pred_pose_enc[..., 3:7] + fl = pred_pose_enc[..., 7:] # or fov + + T = base_pose_act(T, trans_act) + quat = base_pose_act(quat, quat_act) + fl = base_pose_act(fl, fl_act) # or fov + + pred_pose_enc = torch.cat([T, quat, fl], dim=-1) + + return pred_pose_enc + + +def base_pose_act(pose_enc, act_type="linear"): + """ + Apply basic activation function to pose parameters. + + Args: + pose_enc: Tensor containing encoded pose parameters + act_type: Activation type ("linear", "inv_log", "exp", "relu") + + Returns: + Activated pose parameters + """ + if act_type == "linear": + return pose_enc + elif act_type == "inv_log": + return inverse_log_transform(pose_enc) + elif act_type == "exp": + return torch.exp(pose_enc) + elif act_type == "relu": + return F.relu(pose_enc) + else: + raise ValueError(f"Unknown act_type: {act_type}") + + +def activate_head(out, activation="norm_exp", conf_activation="expp1"): + """ + Process network output to extract 3D points and confidence values. + + Args: + out: Network output tensor (B, C, H, W) + activation: Activation type for 3D points + conf_activation: Activation type for confidence values + + Returns: + Tuple of (3D points tensor, confidence tensor) + """ + # Move channels from last dim to the 4th dimension => (B, H, W, C) + fmap = out.permute(0, 2, 3, 1) # B,H,W,C expected + + # Split into xyz (first C-1 channels) and confidence (last channel) + xyz = fmap[:, :, :, :-1] + conf = fmap[:, :, :, -1] + + if activation == "norm_exp": + d = xyz.norm(dim=-1, keepdim=True).clamp(min=1e-8) + xyz_normed = xyz / d + pts3d = xyz_normed * torch.expm1(d) + elif activation == "norm": + pts3d = xyz / xyz.norm(dim=-1, keepdim=True) + elif activation == "exp": + pts3d = torch.exp(xyz) + elif activation == "relu": + pts3d = F.relu(xyz) + elif activation == "inv_log": + pts3d = inverse_log_transform(xyz) + elif activation == "xy_inv_log": + xy, z = xyz.split([2, 1], dim=-1) + z = inverse_log_transform(z) + pts3d = torch.cat([xy * z, z], dim=-1) + elif activation == "sigmoid": + pts3d = torch.sigmoid(xyz) + elif activation == "linear": + pts3d = xyz + else: + raise ValueError(f"Unknown activation: {activation}") + + if conf_activation == "expp1": + conf_out = 1 + conf.exp() + elif conf_activation == "expp0": + conf_out = conf.exp() + elif conf_activation == "sigmoid": + conf_out = torch.sigmoid(conf) + else: + raise ValueError(f"Unknown conf_activation: {conf_activation}") + + return pts3d, conf_out + + +def inverse_log_transform(y): + """ + Apply inverse log transform: sign(y) * (exp(|y|) - 1) + + Args: + y: Input tensor + + Returns: + Transformed tensor + """ + return torch.sign(y) * (torch.expm1(torch.abs(y))) diff --git a/vggt/heads/track_head.py b/vggt/heads/track_head.py new file mode 100644 index 0000000000000000000000000000000000000000..9ec7199bd185060989c236997f93b93f4fc77825 --- /dev/null +++ b/vggt/heads/track_head.py @@ -0,0 +1,108 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch.nn as nn +from .dpt_head import DPTHead +from .track_modules.base_track_predictor import BaseTrackerPredictor + + +class TrackHead(nn.Module): + """ + Track head that uses DPT head to process tokens and BaseTrackerPredictor for tracking. + The tracking is performed iteratively, refining predictions over multiple iterations. + """ + + def __init__( + self, + dim_in, + patch_size=14, + features=128, + iters=4, + predict_conf=True, + stride=2, + corr_levels=7, + corr_radius=4, + hidden_size=384, + ): + """ + Initialize the TrackHead module. + + Args: + dim_in (int): Input dimension of tokens from the backbone. + patch_size (int): Size of image patches used in the vision transformer. + features (int): Number of feature channels in the feature extractor output. + iters (int): Number of refinement iterations for tracking predictions. + predict_conf (bool): Whether to predict confidence scores for tracked points. + stride (int): Stride value for the tracker predictor. + corr_levels (int): Number of correlation pyramid levels + corr_radius (int): Radius for correlation computation, controlling the search area. + hidden_size (int): Size of hidden layers in the tracker network. + """ + super().__init__() + + self.patch_size = patch_size + + # Feature extractor based on DPT architecture + # Processes tokens into feature maps for tracking + self.feature_extractor = DPTHead( + dim_in=dim_in, + patch_size=patch_size, + features=features, + feature_only=True, # Only output features, no activation + down_ratio=2, # Reduces spatial dimensions by factor of 2 + pos_embed=False, + ) + + # Tracker module that predicts point trajectories + # Takes feature maps and predicts coordinates and visibility + self.tracker = BaseTrackerPredictor( + latent_dim=features, # Match the output_dim of feature extractor + predict_conf=predict_conf, + stride=stride, + corr_levels=corr_levels, + corr_radius=corr_radius, + hidden_size=hidden_size, + ) + + self.iters = iters + + def forward(self, aggregated_tokens_list, images, patch_start_idx, query_points=None, iters=None): + """ + Forward pass of the TrackHead. + + Args: + aggregated_tokens_list (list): List of aggregated tokens from the backbone. + images (torch.Tensor): Input images of shape (B, S, C, H, W) where: + B = batch size, S = sequence length. + patch_start_idx (int): Starting index for patch tokens. + query_points (torch.Tensor, optional): Initial query points to track. + If None, points are initialized by the tracker. + iters (int, optional): Number of refinement iterations. If None, uses self.iters. + + Returns: + tuple: + - coord_preds (torch.Tensor): Predicted coordinates for tracked points. + - vis_scores (torch.Tensor): Visibility scores for tracked points. + - conf_scores (torch.Tensor): Confidence scores for tracked points (if predict_conf=True). + """ + B, S, _, H, W = images.shape + + # Extract features from tokens + # feature_maps has shape (B, S, C, H//2, W//2) due to down_ratio=2 + feature_maps = self.feature_extractor(aggregated_tokens_list, images, patch_start_idx) + + # Use default iterations if not specified + if iters is None: + iters = self.iters + + # Perform tracking using the extracted features + coord_preds, vis_scores, conf_scores = self.tracker( + query_points=query_points, + fmaps=feature_maps, + iters=iters, + ) + + return coord_preds, vis_scores, conf_scores diff --git a/vggt/heads/track_modules/__init__.py b/vggt/heads/track_modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0952fcc3f57e34b3747962e9ebd6fc57aeea63fa --- /dev/null +++ b/vggt/heads/track_modules/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/vggt/heads/track_modules/base_track_predictor.py b/vggt/heads/track_modules/base_track_predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..3ce8ec4b66fff236e015d1bcaf85c8237a52be7a --- /dev/null +++ b/vggt/heads/track_modules/base_track_predictor.py @@ -0,0 +1,209 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +import torch.nn as nn +from einops import rearrange, repeat + + +from .blocks import EfficientUpdateFormer, CorrBlock +from .utils import sample_features4d, get_2d_embedding, get_2d_sincos_pos_embed +from .modules import Mlp + + +class BaseTrackerPredictor(nn.Module): + def __init__( + self, + stride=1, + corr_levels=5, + corr_radius=4, + latent_dim=128, + hidden_size=384, + use_spaceatt=True, + depth=6, + max_scale=518, + predict_conf=True, + ): + super(BaseTrackerPredictor, self).__init__() + """ + The base template to create a track predictor + + Modified from https://github.com/facebookresearch/co-tracker/ + and https://github.com/facebookresearch/vggsfm + """ + + self.stride = stride + self.latent_dim = latent_dim + self.corr_levels = corr_levels + self.corr_radius = corr_radius + self.hidden_size = hidden_size + self.max_scale = max_scale + self.predict_conf = predict_conf + + self.flows_emb_dim = latent_dim // 2 + + self.corr_mlp = Mlp( + in_features=self.corr_levels * (self.corr_radius * 2 + 1) ** 2, + hidden_features=self.hidden_size, + out_features=self.latent_dim, + ) + + self.transformer_dim = self.latent_dim + self.latent_dim + self.latent_dim + 4 + + self.query_ref_token = nn.Parameter(torch.randn(1, 2, self.transformer_dim)) + + space_depth = depth if use_spaceatt else 0 + time_depth = depth + + self.updateformer = EfficientUpdateFormer( + space_depth=space_depth, + time_depth=time_depth, + input_dim=self.transformer_dim, + hidden_size=self.hidden_size, + output_dim=self.latent_dim + 2, + mlp_ratio=4.0, + add_space_attn=use_spaceatt, + ) + + self.fmap_norm = nn.LayerNorm(self.latent_dim) + self.ffeat_norm = nn.GroupNorm(1, self.latent_dim) + + # A linear layer to update track feats at each iteration + self.ffeat_updater = nn.Sequential(nn.Linear(self.latent_dim, self.latent_dim), nn.GELU()) + + self.vis_predictor = nn.Sequential(nn.Linear(self.latent_dim, 1)) + + if predict_conf: + self.conf_predictor = nn.Sequential(nn.Linear(self.latent_dim, 1)) + + def forward(self, query_points, fmaps=None, iters=6, return_feat=False, down_ratio=1, apply_sigmoid=True): + """ + query_points: B x N x 2, the number of batches, tracks, and xy + fmaps: B x S x C x HH x WW, the number of batches, frames, and feature dimension. + note HH and WW is the size of feature maps instead of original images + """ + B, N, D = query_points.shape + B, S, C, HH, WW = fmaps.shape + + assert D == 2, "Input points must be 2D coordinates" + + # apply a layernorm to fmaps here + fmaps = self.fmap_norm(fmaps.permute(0, 1, 3, 4, 2)) + fmaps = fmaps.permute(0, 1, 4, 2, 3) + + # Scale the input query_points because we may downsample the images + # by down_ratio or self.stride + # e.g., if a 3x1024x1024 image is processed to a 128x256x256 feature map + # its query_points should be query_points/4 + if down_ratio > 1: + query_points = query_points / float(down_ratio) + + query_points = query_points / float(self.stride) + + # Init with coords as the query points + # It means the search will start from the position of query points at the reference frames + coords = query_points.clone().reshape(B, 1, N, 2).repeat(1, S, 1, 1) + + # Sample/extract the features of the query points in the query frame + query_track_feat = sample_features4d(fmaps[:, 0], coords[:, 0]) + + # init track feats by query feats + track_feats = query_track_feat.unsqueeze(1).repeat(1, S, 1, 1) # B, S, N, C + # back up the init coords + coords_backup = coords.clone() + + fcorr_fn = CorrBlock(fmaps, num_levels=self.corr_levels, radius=self.corr_radius) + + coord_preds = [] + + # Iterative Refinement + for _ in range(iters): + # Detach the gradients from the last iteration + # (in my experience, not very important for performance) + coords = coords.detach() + + fcorrs = fcorr_fn.corr_sample(track_feats, coords) + + corr_dim = fcorrs.shape[3] + fcorrs_ = fcorrs.permute(0, 2, 1, 3).reshape(B * N, S, corr_dim) + fcorrs_ = self.corr_mlp(fcorrs_) + + # Movement of current coords relative to query points + flows = (coords - coords[:, 0:1]).permute(0, 2, 1, 3).reshape(B * N, S, 2) + + flows_emb = get_2d_embedding(flows, self.flows_emb_dim, cat_coords=False) + + # (In my trials, it is also okay to just add the flows_emb instead of concat) + flows_emb = torch.cat([flows_emb, flows / self.max_scale, flows / self.max_scale], dim=-1) + + track_feats_ = track_feats.permute(0, 2, 1, 3).reshape(B * N, S, self.latent_dim) + + # Concatenate them as the input for the transformers + transformer_input = torch.cat([flows_emb, fcorrs_, track_feats_], dim=2) + + # 2D positional embed + # TODO: this can be much simplified + pos_embed = get_2d_sincos_pos_embed(self.transformer_dim, grid_size=(HH, WW)).to(query_points.device) + sampled_pos_emb = sample_features4d(pos_embed.expand(B, -1, -1, -1), coords[:, 0]) + + sampled_pos_emb = rearrange(sampled_pos_emb, "b n c -> (b n) c").unsqueeze(1) + + x = transformer_input + sampled_pos_emb + + # Add the query ref token to the track feats + query_ref_token = torch.cat( + [self.query_ref_token[:, 0:1], self.query_ref_token[:, 1:2].expand(-1, S - 1, -1)], dim=1 + ) + x = x + query_ref_token.to(x.device).to(x.dtype) + + # B, N, S, C + x = rearrange(x, "(b n) s d -> b n s d", b=B) + + # Compute the delta coordinates and delta track features + delta, _ = self.updateformer(x) + + # BN, S, C + delta = rearrange(delta, " b n s d -> (b n) s d", b=B) + delta_coords_ = delta[:, :, :2] + delta_feats_ = delta[:, :, 2:] + + track_feats_ = track_feats_.reshape(B * N * S, self.latent_dim) + delta_feats_ = delta_feats_.reshape(B * N * S, self.latent_dim) + + # Update the track features + track_feats_ = self.ffeat_updater(self.ffeat_norm(delta_feats_)) + track_feats_ + + track_feats = track_feats_.reshape(B, N, S, self.latent_dim).permute(0, 2, 1, 3) # BxSxNxC + + # B x S x N x 2 + coords = coords + delta_coords_.reshape(B, N, S, 2).permute(0, 2, 1, 3) + + # Force coord0 as query + # because we assume the query points should not be changed + coords[:, 0] = coords_backup[:, 0] + + # The predicted tracks are in the original image scale + if down_ratio > 1: + coord_preds.append(coords * self.stride * down_ratio) + else: + coord_preds.append(coords * self.stride) + + # B, S, N + vis_e = self.vis_predictor(track_feats.reshape(B * S * N, self.latent_dim)).reshape(B, S, N) + if apply_sigmoid: + vis_e = torch.sigmoid(vis_e) + + if self.predict_conf: + conf_e = self.conf_predictor(track_feats.reshape(B * S * N, self.latent_dim)).reshape(B, S, N) + if apply_sigmoid: + conf_e = torch.sigmoid(conf_e) + else: + conf_e = None + + if return_feat: + return coord_preds, vis_e, track_feats, query_track_feat, conf_e + else: + return coord_preds, vis_e, conf_e diff --git a/vggt/heads/track_modules/blocks.py b/vggt/heads/track_modules/blocks.py new file mode 100644 index 0000000000000000000000000000000000000000..8e7763f4fd8f515662421db192594380dbb574e5 --- /dev/null +++ b/vggt/heads/track_modules/blocks.py @@ -0,0 +1,246 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + + +# Modified from https://github.com/facebookresearch/co-tracker/ + +import math +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .utils import bilinear_sampler +from .modules import Mlp, AttnBlock, CrossAttnBlock, ResidualBlock + + +class EfficientUpdateFormer(nn.Module): + """ + Transformer model that updates track estimates. + """ + + def __init__( + self, + space_depth=6, + time_depth=6, + input_dim=320, + hidden_size=384, + num_heads=8, + output_dim=130, + mlp_ratio=4.0, + add_space_attn=True, + num_virtual_tracks=64, + ): + super().__init__() + + self.out_channels = 2 + self.num_heads = num_heads + self.hidden_size = hidden_size + self.add_space_attn = add_space_attn + + # Add input LayerNorm before linear projection + self.input_norm = nn.LayerNorm(input_dim) + self.input_transform = torch.nn.Linear(input_dim, hidden_size, bias=True) + + # Add output LayerNorm before final projection + self.output_norm = nn.LayerNorm(hidden_size) + self.flow_head = torch.nn.Linear(hidden_size, output_dim, bias=True) + self.num_virtual_tracks = num_virtual_tracks + + if self.add_space_attn: + self.virual_tracks = nn.Parameter(torch.randn(1, num_virtual_tracks, 1, hidden_size)) + else: + self.virual_tracks = None + + self.time_blocks = nn.ModuleList( + [ + AttnBlock( + hidden_size, + num_heads, + mlp_ratio=mlp_ratio, + attn_class=nn.MultiheadAttention, + ) + for _ in range(time_depth) + ] + ) + + if add_space_attn: + self.space_virtual_blocks = nn.ModuleList( + [ + AttnBlock( + hidden_size, + num_heads, + mlp_ratio=mlp_ratio, + attn_class=nn.MultiheadAttention, + ) + for _ in range(space_depth) + ] + ) + self.space_point2virtual_blocks = nn.ModuleList( + [CrossAttnBlock(hidden_size, hidden_size, num_heads, mlp_ratio=mlp_ratio) for _ in range(space_depth)] + ) + self.space_virtual2point_blocks = nn.ModuleList( + [CrossAttnBlock(hidden_size, hidden_size, num_heads, mlp_ratio=mlp_ratio) for _ in range(space_depth)] + ) + assert len(self.time_blocks) >= len(self.space_virtual2point_blocks) + self.initialize_weights() + + def initialize_weights(self): + def _basic_init(module): + if isinstance(module, nn.Linear): + torch.nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + nn.init.constant_(module.bias, 0) + torch.nn.init.trunc_normal_(self.flow_head.weight, std=0.001) + + self.apply(_basic_init) + + def forward(self, input_tensor, mask=None): + # Apply input LayerNorm + input_tensor = self.input_norm(input_tensor) + tokens = self.input_transform(input_tensor) + + init_tokens = tokens + + B, _, T, _ = tokens.shape + + if self.add_space_attn: + virtual_tokens = self.virual_tracks.repeat(B, 1, T, 1) + tokens = torch.cat([tokens, virtual_tokens], dim=1) + + _, N, _, _ = tokens.shape + + j = 0 + for i in range(len(self.time_blocks)): + time_tokens = tokens.contiguous().view(B * N, T, -1) # B N T C -> (B N) T C + + time_tokens = self.time_blocks[i](time_tokens) + + tokens = time_tokens.view(B, N, T, -1) # (B N) T C -> B N T C + if self.add_space_attn and (i % (len(self.time_blocks) // len(self.space_virtual_blocks)) == 0): + space_tokens = tokens.permute(0, 2, 1, 3).contiguous().view(B * T, N, -1) # B N T C -> (B T) N C + point_tokens = space_tokens[:, : N - self.num_virtual_tracks] + virtual_tokens = space_tokens[:, N - self.num_virtual_tracks :] + + virtual_tokens = self.space_virtual2point_blocks[j](virtual_tokens, point_tokens, mask=mask) + virtual_tokens = self.space_virtual_blocks[j](virtual_tokens) + point_tokens = self.space_point2virtual_blocks[j](point_tokens, virtual_tokens, mask=mask) + + space_tokens = torch.cat([point_tokens, virtual_tokens], dim=1) + tokens = space_tokens.view(B, T, N, -1).permute(0, 2, 1, 3) # (B T) N C -> B N T C + j += 1 + + if self.add_space_attn: + tokens = tokens[:, : N - self.num_virtual_tracks] + + tokens = tokens + init_tokens + + # Apply output LayerNorm before final projection + tokens = self.output_norm(tokens) + flow = self.flow_head(tokens) + + return flow, None + + +class CorrBlock: + def __init__(self, fmaps, num_levels=4, radius=4, multiple_track_feats=False, padding_mode="zeros"): + """ + Build a pyramid of feature maps from the input. + + fmaps: Tensor (B, S, C, H, W) + num_levels: number of pyramid levels (each downsampled by factor 2) + radius: search radius for sampling correlation + multiple_track_feats: if True, split the target features per pyramid level + padding_mode: passed to grid_sample / bilinear_sampler + """ + B, S, C, H, W = fmaps.shape + self.S, self.C, self.H, self.W = S, C, H, W + self.num_levels = num_levels + self.radius = radius + self.padding_mode = padding_mode + self.multiple_track_feats = multiple_track_feats + + # Build pyramid: each level is half the spatial resolution of the previous + self.fmaps_pyramid = [fmaps] # level 0 is full resolution + current_fmaps = fmaps + for i in range(num_levels - 1): + B, S, C, H, W = current_fmaps.shape + # Merge batch & sequence dimensions + current_fmaps = current_fmaps.reshape(B * S, C, H, W) + # Avg pool down by factor 2 + current_fmaps = F.avg_pool2d(current_fmaps, kernel_size=2, stride=2) + _, _, H_new, W_new = current_fmaps.shape + current_fmaps = current_fmaps.reshape(B, S, C, H_new, W_new) + self.fmaps_pyramid.append(current_fmaps) + + # Precompute a delta grid (of shape (2r+1, 2r+1, 2)) for sampling. + # This grid is added to the (scaled) coordinate centroids. + r = self.radius + dx = torch.linspace(-r, r, 2 * r + 1, device=fmaps.device, dtype=fmaps.dtype) + dy = torch.linspace(-r, r, 2 * r + 1, device=fmaps.device, dtype=fmaps.dtype) + # delta: for every (dy,dx) displacement (i.e. Δx, Δy) + self.delta = torch.stack(torch.meshgrid(dy, dx, indexing="ij"), dim=-1) # shape: (2r+1, 2r+1, 2) + + def corr_sample(self, targets, coords): + """ + Instead of storing the entire correlation pyramid, we compute each level's correlation + volume, sample it immediately, then discard it. This saves GPU memory. + + Args: + targets: Tensor (B, S, N, C) — features for the current targets. + coords: Tensor (B, S, N, 2) — coordinates at full resolution. + + Returns: + Tensor (B, S, N, L) where L = num_levels * (2*radius+1)**2 (concatenated sampled correlations) + """ + B, S, N, C = targets.shape + + # If you have multiple track features, split them per level. + if self.multiple_track_feats: + targets_split = torch.split(targets, C // self.num_levels, dim=-1) + + out_pyramid = [] + for i, fmaps in enumerate(self.fmaps_pyramid): + # Get current spatial resolution H, W for this pyramid level. + B, S, C, H, W = fmaps.shape + # Reshape feature maps for correlation computation: + # fmap2s: (B, S, C, H*W) + fmap2s = fmaps.view(B, S, C, H * W) + # Choose appropriate target features. + fmap1 = targets_split[i] if self.multiple_track_feats else targets # shape: (B, S, N, C) + + # Compute correlation directly + corrs = compute_corr_level(fmap1, fmap2s, C) + corrs = corrs.view(B, S, N, H, W) + + # Prepare sampling grid: + # Scale down the coordinates for the current level. + centroid_lvl = coords.reshape(B * S * N, 1, 1, 2) / (2**i) + # Make sure our precomputed delta grid is on the same device/dtype. + delta_lvl = self.delta.to(coords.device).to(coords.dtype) + # Now the grid for grid_sample is: + # coords_lvl = centroid_lvl + delta_lvl (broadcasted over grid) + coords_lvl = centroid_lvl + delta_lvl.view(1, 2 * self.radius + 1, 2 * self.radius + 1, 2) + + # Sample from the correlation volume using bilinear interpolation. + # We reshape corrs to (B * S * N, 1, H, W) so grid_sample acts over each target. + corrs_sampled = bilinear_sampler( + corrs.reshape(B * S * N, 1, H, W), coords_lvl, padding_mode=self.padding_mode + ) + # The sampled output is (B * S * N, 1, 2r+1, 2r+1). Flatten the last two dims. + corrs_sampled = corrs_sampled.view(B, S, N, -1) # Now shape: (B, S, N, (2r+1)^2) + out_pyramid.append(corrs_sampled) + + # Concatenate all levels along the last dimension. + out = torch.cat(out_pyramid, dim=-1).contiguous() + return out + + +def compute_corr_level(fmap1, fmap2s, C): + # fmap1: (B, S, N, C) + # fmap2s: (B, S, C, H*W) + corrs = torch.matmul(fmap1, fmap2s) # (B, S, N, H*W) + corrs = corrs.view(fmap1.shape[0], fmap1.shape[1], fmap1.shape[2], -1) # (B, S, N, H*W) + return corrs / math.sqrt(C) diff --git a/vggt/heads/track_modules/modules.py b/vggt/heads/track_modules/modules.py new file mode 100644 index 0000000000000000000000000000000000000000..4b090ddc4a9db01c8dd3564f9053e1ca9cdde93a --- /dev/null +++ b/vggt/heads/track_modules/modules.py @@ -0,0 +1,218 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + + +import torch +import torch.nn as nn +import torch.nn.functional as F +from functools import partial +from typing import Callable +import collections +from torch import Tensor +from itertools import repeat + + +# From PyTorch internals +def _ntuple(n): + def parse(x): + if isinstance(x, collections.abc.Iterable) and not isinstance(x, str): + return tuple(x) + return tuple(repeat(x, n)) + + return parse + + +def exists(val): + return val is not None + + +def default(val, d): + return val if exists(val) else d + + +to_2tuple = _ntuple(2) + + +class ResidualBlock(nn.Module): + """ + ResidualBlock: construct a block of two conv layers with residual connections + """ + + def __init__(self, in_planes, planes, norm_fn="group", stride=1, kernel_size=3): + super(ResidualBlock, self).__init__() + + self.conv1 = nn.Conv2d( + in_planes, + planes, + kernel_size=kernel_size, + padding=1, + stride=stride, + padding_mode="zeros", + ) + self.conv2 = nn.Conv2d( + planes, + planes, + kernel_size=kernel_size, + padding=1, + padding_mode="zeros", + ) + self.relu = nn.ReLU(inplace=True) + + num_groups = planes // 8 + + if norm_fn == "group": + self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + if not stride == 1: + self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + + elif norm_fn == "batch": + self.norm1 = nn.BatchNorm2d(planes) + self.norm2 = nn.BatchNorm2d(planes) + if not stride == 1: + self.norm3 = nn.BatchNorm2d(planes) + + elif norm_fn == "instance": + self.norm1 = nn.InstanceNorm2d(planes) + self.norm2 = nn.InstanceNorm2d(planes) + if not stride == 1: + self.norm3 = nn.InstanceNorm2d(planes) + + elif norm_fn == "none": + self.norm1 = nn.Sequential() + self.norm2 = nn.Sequential() + if not stride == 1: + self.norm3 = nn.Sequential() + else: + raise NotImplementedError + + if stride == 1: + self.downsample = None + else: + self.downsample = nn.Sequential( + nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), + self.norm3, + ) + + def forward(self, x): + y = x + y = self.relu(self.norm1(self.conv1(y))) + y = self.relu(self.norm2(self.conv2(y))) + + if self.downsample is not None: + x = self.downsample(x) + + return self.relu(x + y) + + +class Mlp(nn.Module): + """MLP as used in Vision Transformer, MLP-Mixer and related networks""" + + def __init__( + self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + norm_layer=None, + bias=True, + drop=0.0, + use_conv=False, + ): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + bias = to_2tuple(bias) + drop_probs = to_2tuple(drop) + linear_layer = partial(nn.Conv2d, kernel_size=1) if use_conv else nn.Linear + + self.fc1 = linear_layer(in_features, hidden_features, bias=bias[0]) + self.act = act_layer() + self.drop1 = nn.Dropout(drop_probs[0]) + self.fc2 = linear_layer(hidden_features, out_features, bias=bias[1]) + self.drop2 = nn.Dropout(drop_probs[1]) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop1(x) + x = self.fc2(x) + x = self.drop2(x) + return x + + +class AttnBlock(nn.Module): + def __init__( + self, + hidden_size, + num_heads, + attn_class: Callable[..., nn.Module] = nn.MultiheadAttention, + mlp_ratio=4.0, + **block_kwargs + ): + """ + Self attention block + """ + super().__init__() + + self.norm1 = nn.LayerNorm(hidden_size) + self.norm2 = nn.LayerNorm(hidden_size) + + self.attn = attn_class(embed_dim=hidden_size, num_heads=num_heads, batch_first=True, **block_kwargs) + + mlp_hidden_dim = int(hidden_size * mlp_ratio) + + self.mlp = Mlp(in_features=hidden_size, hidden_features=mlp_hidden_dim, drop=0) + + def forward(self, x, mask=None): + # Prepare the mask for PyTorch's attention (it expects a different format) + # attn_mask = mask if mask is not None else None + # Normalize before attention + x = self.norm1(x) + + # PyTorch's MultiheadAttention returns attn_output, attn_output_weights + # attn_output, _ = self.attn(x, x, x, attn_mask=attn_mask) + + attn_output, _ = self.attn(x, x, x) + + # Add & Norm + x = x + attn_output + x = x + self.mlp(self.norm2(x)) + return x + + +class CrossAttnBlock(nn.Module): + def __init__(self, hidden_size, context_dim, num_heads=1, mlp_ratio=4.0, **block_kwargs): + """ + Cross attention block + """ + super().__init__() + + self.norm1 = nn.LayerNorm(hidden_size) + self.norm_context = nn.LayerNorm(hidden_size) + self.norm2 = nn.LayerNorm(hidden_size) + + self.cross_attn = nn.MultiheadAttention( + embed_dim=hidden_size, num_heads=num_heads, batch_first=True, **block_kwargs + ) + + mlp_hidden_dim = int(hidden_size * mlp_ratio) + + self.mlp = Mlp(in_features=hidden_size, hidden_features=mlp_hidden_dim, drop=0) + + def forward(self, x, context, mask=None): + # Normalize inputs + x = self.norm1(x) + context = self.norm_context(context) + + # Apply cross attention + # Note: nn.MultiheadAttention returns attn_output, attn_output_weights + attn_output, _ = self.cross_attn(x, context, context, attn_mask=mask) + + # Add & Norm + x = x + attn_output + x = x + self.mlp(self.norm2(x)) + return x diff --git a/vggt/heads/track_modules/utils.py b/vggt/heads/track_modules/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..51d01d39cdc10388a04dab5db7cf409b31bde766 --- /dev/null +++ b/vggt/heads/track_modules/utils.py @@ -0,0 +1,226 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +# Modified from https://github.com/facebookresearch/vggsfm +# and https://github.com/facebookresearch/co-tracker/tree/main + + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from typing import Optional, Tuple, Union + + +def get_2d_sincos_pos_embed(embed_dim: int, grid_size: Union[int, Tuple[int, int]], return_grid=False) -> torch.Tensor: + """ + This function initializes a grid and generates a 2D positional embedding using sine and cosine functions. + It is a wrapper of get_2d_sincos_pos_embed_from_grid. + Args: + - embed_dim: The embedding dimension. + - grid_size: The grid size. + Returns: + - pos_embed: The generated 2D positional embedding. + """ + if isinstance(grid_size, tuple): + grid_size_h, grid_size_w = grid_size + else: + grid_size_h = grid_size_w = grid_size + grid_h = torch.arange(grid_size_h, dtype=torch.float) + grid_w = torch.arange(grid_size_w, dtype=torch.float) + grid = torch.meshgrid(grid_w, grid_h, indexing="xy") + grid = torch.stack(grid, dim=0) + grid = grid.reshape([2, 1, grid_size_h, grid_size_w]) + pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid) + if return_grid: + return ( + pos_embed.reshape(1, grid_size_h, grid_size_w, -1).permute(0, 3, 1, 2), + grid, + ) + return pos_embed.reshape(1, grid_size_h, grid_size_w, -1).permute(0, 3, 1, 2) + + +def get_2d_sincos_pos_embed_from_grid(embed_dim: int, grid: torch.Tensor) -> torch.Tensor: + """ + This function generates a 2D positional embedding from a given grid using sine and cosine functions. + + Args: + - embed_dim: The embedding dimension. + - grid: The grid to generate the embedding from. + + Returns: + - emb: The generated 2D positional embedding. + """ + assert embed_dim % 2 == 0 + + # use half of dimensions to encode grid_h + emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2) + emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2) + + emb = torch.cat([emb_h, emb_w], dim=2) # (H*W, D) + return emb + + +def get_1d_sincos_pos_embed_from_grid(embed_dim: int, pos: torch.Tensor) -> torch.Tensor: + """ + This function generates a 1D positional embedding from a given grid using sine and cosine functions. + + Args: + - embed_dim: The embedding dimension. + - pos: The position to generate the embedding from. + + Returns: + - emb: The generated 1D positional embedding. + """ + assert embed_dim % 2 == 0 + omega = torch.arange(embed_dim // 2, dtype=torch.double) + omega /= embed_dim / 2.0 + omega = 1.0 / 10000**omega # (D/2,) + + pos = pos.reshape(-1) # (M,) + out = torch.einsum("m,d->md", pos, omega) # (M, D/2), outer product + + emb_sin = torch.sin(out) # (M, D/2) + emb_cos = torch.cos(out) # (M, D/2) + + emb = torch.cat([emb_sin, emb_cos], dim=1) # (M, D) + return emb[None].float() + + +def get_2d_embedding(xy: torch.Tensor, C: int, cat_coords: bool = True) -> torch.Tensor: + """ + This function generates a 2D positional embedding from given coordinates using sine and cosine functions. + + Args: + - xy: The coordinates to generate the embedding from. + - C: The size of the embedding. + - cat_coords: A flag to indicate whether to concatenate the original coordinates to the embedding. + + Returns: + - pe: The generated 2D positional embedding. + """ + B, N, D = xy.shape + assert D == 2 + + x = xy[:, :, 0:1] + y = xy[:, :, 1:2] + div_term = (torch.arange(0, C, 2, device=xy.device, dtype=torch.float32) * (1000.0 / C)).reshape(1, 1, int(C / 2)) + + pe_x = torch.zeros(B, N, C, device=xy.device, dtype=torch.float32) + pe_y = torch.zeros(B, N, C, device=xy.device, dtype=torch.float32) + + pe_x[:, :, 0::2] = torch.sin(x * div_term) + pe_x[:, :, 1::2] = torch.cos(x * div_term) + + pe_y[:, :, 0::2] = torch.sin(y * div_term) + pe_y[:, :, 1::2] = torch.cos(y * div_term) + + pe = torch.cat([pe_x, pe_y], dim=2) # (B, N, C*3) + if cat_coords: + pe = torch.cat([xy, pe], dim=2) # (B, N, C*3+3) + return pe + + +def bilinear_sampler(input, coords, align_corners=True, padding_mode="border"): + r"""Sample a tensor using bilinear interpolation + + `bilinear_sampler(input, coords)` samples a tensor :attr:`input` at + coordinates :attr:`coords` using bilinear interpolation. It is the same + as `torch.nn.functional.grid_sample()` but with a different coordinate + convention. + + The input tensor is assumed to be of shape :math:`(B, C, H, W)`, where + :math:`B` is the batch size, :math:`C` is the number of channels, + :math:`H` is the height of the image, and :math:`W` is the width of the + image. The tensor :attr:`coords` of shape :math:`(B, H_o, W_o, 2)` is + interpreted as an array of 2D point coordinates :math:`(x_i,y_i)`. + + Alternatively, the input tensor can be of size :math:`(B, C, T, H, W)`, + in which case sample points are triplets :math:`(t_i,x_i,y_i)`. Note + that in this case the order of the components is slightly different + from `grid_sample()`, which would expect :math:`(x_i,y_i,t_i)`. + + If `align_corners` is `True`, the coordinate :math:`x` is assumed to be + in the range :math:`[0,W-1]`, with 0 corresponding to the center of the + left-most image pixel :math:`W-1` to the center of the right-most + pixel. + + If `align_corners` is `False`, the coordinate :math:`x` is assumed to + be in the range :math:`[0,W]`, with 0 corresponding to the left edge of + the left-most pixel :math:`W` to the right edge of the right-most + pixel. + + Similar conventions apply to the :math:`y` for the range + :math:`[0,H-1]` and :math:`[0,H]` and to :math:`t` for the range + :math:`[0,T-1]` and :math:`[0,T]`. + + Args: + input (Tensor): batch of input images. + coords (Tensor): batch of coordinates. + align_corners (bool, optional): Coordinate convention. Defaults to `True`. + padding_mode (str, optional): Padding mode. Defaults to `"border"`. + + Returns: + Tensor: sampled points. + """ + coords = coords.detach().clone() + ############################################################ + # IMPORTANT: + coords = coords.to(input.device).to(input.dtype) + ############################################################ + + sizes = input.shape[2:] + + assert len(sizes) in [2, 3] + + if len(sizes) == 3: + # t x y -> x y t to match dimensions T H W in grid_sample + coords = coords[..., [1, 2, 0]] + + if align_corners: + scale = torch.tensor( + [2 / max(size - 1, 1) for size in reversed(sizes)], device=coords.device, dtype=coords.dtype + ) + else: + scale = torch.tensor([2 / size for size in reversed(sizes)], device=coords.device, dtype=coords.dtype) + + coords.mul_(scale) # coords = coords * scale + coords.sub_(1) # coords = coords - 1 + + return F.grid_sample(input, coords, align_corners=align_corners, padding_mode=padding_mode) + + +def sample_features4d(input, coords): + r"""Sample spatial features + + `sample_features4d(input, coords)` samples the spatial features + :attr:`input` represented by a 4D tensor :math:`(B, C, H, W)`. + + The field is sampled at coordinates :attr:`coords` using bilinear + interpolation. :attr:`coords` is assumed to be of shape :math:`(B, R, + 2)`, where each sample has the format :math:`(x_i, y_i)`. This uses the + same convention as :func:`bilinear_sampler` with `align_corners=True`. + + The output tensor has one feature per point, and has shape :math:`(B, + R, C)`. + + Args: + input (Tensor): spatial features. + coords (Tensor): points. + + Returns: + Tensor: sampled features. + """ + + B, _, _, _ = input.shape + + # B R 2 -> B R 1 2 + coords = coords.unsqueeze(2) + + # B C R 1 + feats = bilinear_sampler(input, coords) + + return feats.permute(0, 2, 1, 3).view(B, -1, feats.shape[1] * feats.shape[3]) # B C R 1 -> B R C diff --git a/vggt/heads/utils.py b/vggt/heads/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d7af1f68fa0ce0a48d11a708d53aa20aa8f78ba2 --- /dev/null +++ b/vggt/heads/utils.py @@ -0,0 +1,108 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +import torch.nn as nn + + +def position_grid_to_embed(pos_grid: torch.Tensor, embed_dim: int, omega_0: float = 100) -> torch.Tensor: + """ + Convert 2D position grid (HxWx2) to sinusoidal embeddings (HxWxC) + + Args: + pos_grid: Tensor of shape (H, W, 2) containing 2D coordinates + embed_dim: Output channel dimension for embeddings + + Returns: + Tensor of shape (H, W, embed_dim) with positional embeddings + """ + H, W, grid_dim = pos_grid.shape + assert grid_dim == 2 + pos_flat = pos_grid.reshape(-1, grid_dim) # Flatten to (H*W, 2) + + # Process x and y coordinates separately + emb_x = make_sincos_pos_embed(embed_dim // 2, pos_flat[:, 0], omega_0=omega_0) # [1, H*W, D/2] + emb_y = make_sincos_pos_embed(embed_dim // 2, pos_flat[:, 1], omega_0=omega_0) # [1, H*W, D/2] + + # Combine and reshape + emb = torch.cat([emb_x, emb_y], dim=-1) # [1, H*W, D] + + return emb.view(H, W, embed_dim) # [H, W, D] + + +def make_sincos_pos_embed(embed_dim: int, pos: torch.Tensor, omega_0: float = 100) -> torch.Tensor: + """ + This function generates a 1D positional embedding from a given grid using sine and cosine functions. + + Args: + - embed_dim: The embedding dimension. + - pos: The position to generate the embedding from. + + Returns: + - emb: The generated 1D positional embedding. + """ + assert embed_dim % 2 == 0 + omega = torch.arange(embed_dim // 2, dtype=torch.double, device=pos.device) + omega /= embed_dim / 2.0 + omega = 1.0 / omega_0**omega # (D/2,) + + pos = pos.reshape(-1) # (M,) + out = torch.einsum("m,d->md", pos, omega) # (M, D/2), outer product + + emb_sin = torch.sin(out) # (M, D/2) + emb_cos = torch.cos(out) # (M, D/2) + + emb = torch.cat([emb_sin, emb_cos], dim=1) # (M, D) + return emb.float() + + +# Inspired by https://github.com/microsoft/moge + + +def create_uv_grid( + width: int, height: int, aspect_ratio: float = None, dtype: torch.dtype = None, device: torch.device = None +) -> torch.Tensor: + """ + Create a normalized UV grid of shape (width, height, 2). + + The grid spans horizontally and vertically according to an aspect ratio, + ensuring the top-left corner is at (-x_span, -y_span) and the bottom-right + corner is at (x_span, y_span), normalized by the diagonal of the plane. + + Args: + width (int): Number of points horizontally. + height (int): Number of points vertically. + aspect_ratio (float, optional): Width-to-height ratio. Defaults to width/height. + dtype (torch.dtype, optional): Data type of the resulting tensor. + device (torch.device, optional): Device on which the tensor is created. + + Returns: + torch.Tensor: A (width, height, 2) tensor of UV coordinates. + """ + # Derive aspect ratio if not explicitly provided + if aspect_ratio is None: + aspect_ratio = float(width) / float(height) + + # Compute normalized spans for X and Y + diag_factor = (aspect_ratio**2 + 1.0) ** 0.5 + span_x = aspect_ratio / diag_factor + span_y = 1.0 / diag_factor + + # Establish the linspace boundaries + left_x = -span_x * (width - 1) / width + right_x = span_x * (width - 1) / width + top_y = -span_y * (height - 1) / height + bottom_y = span_y * (height - 1) / height + + # Generate 1D coordinates + x_coords = torch.linspace(left_x, right_x, steps=width, dtype=dtype, device=device) + y_coords = torch.linspace(top_y, bottom_y, steps=height, dtype=dtype, device=device) + + # Create 2D meshgrid (width x height) and stack into UV + uu, vv = torch.meshgrid(x_coords, y_coords, indexing="xy") + uv_grid = torch.stack((uu, vv), dim=-1) + + return uv_grid diff --git a/vggt/layers/__init__.py b/vggt/layers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8120f4bc83066cb3f825ce32daa3b437f88486f1 --- /dev/null +++ b/vggt/layers/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from .mlp import Mlp +from .patch_embed import PatchEmbed +from .swiglu_ffn import SwiGLUFFN, SwiGLUFFNFused +from .block import NestedTensorBlock +from .attention import MemEffAttention diff --git a/vggt/layers/attention.py b/vggt/layers/attention.py new file mode 100644 index 0000000000000000000000000000000000000000..ab3089ce0c7493342ef0cf373dfe74a1df2b9563 --- /dev/null +++ b/vggt/layers/attention.py @@ -0,0 +1,98 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the Apache License, Version 2.0 +# found in the LICENSE file in the root directory of this source tree. + +# References: +# https://github.com/facebookresearch/dino/blob/master/vision_transformer.py +# https://github.com/rwightman/pytorch-image-models/tree/master/timm/models/vision_transformer.py + +import logging +import os +import warnings + +from torch import Tensor +from torch import nn +import torch.nn.functional as F + +XFORMERS_AVAILABLE = False + + +class Attention(nn.Module): + def __init__( + self, + dim: int, + num_heads: int = 8, + qkv_bias: bool = True, + proj_bias: bool = True, + attn_drop: float = 0.0, + proj_drop: float = 0.0, + norm_layer: nn.Module = nn.LayerNorm, + qk_norm: bool = False, + fused_attn: bool = True, # use F.scaled_dot_product_attention or not + rope=None, + ) -> None: + super().__init__() + assert dim % num_heads == 0, "dim should be divisible by num_heads" + self.num_heads = num_heads + self.head_dim = dim // num_heads + self.scale = self.head_dim**-0.5 + self.fused_attn = fused_attn + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() + self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim, bias=proj_bias) + self.proj_drop = nn.Dropout(proj_drop) + self.rope = rope + + def forward(self, x: Tensor, pos=None) -> Tensor: + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + q, k, v = qkv.unbind(0) + q, k = self.q_norm(q), self.k_norm(k) + + if self.rope is not None: + q = self.rope(q, pos) + k = self.rope(k, pos) + + if self.fused_attn: + x = F.scaled_dot_product_attention( + q, + k, + v, + dropout_p=self.attn_drop.p if self.training else 0.0, + ) + else: + q = q * self.scale + attn = q @ k.transpose(-2, -1) + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + x = attn @ v + + x = x.transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class MemEffAttention(Attention): + def forward(self, x: Tensor, attn_bias=None, pos=None) -> Tensor: + assert pos is None + if not XFORMERS_AVAILABLE: + if attn_bias is not None: + raise AssertionError("xFormers is required for using nested tensors") + return super().forward(x) + + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads) + + q, k, v = unbind(qkv, 2) + + x = memory_efficient_attention(q, k, v, attn_bias=attn_bias) + x = x.reshape([B, N, C]) + + x = self.proj(x) + x = self.proj_drop(x) + return x diff --git a/vggt/layers/block.py b/vggt/layers/block.py new file mode 100644 index 0000000000000000000000000000000000000000..5f89e4da7121effca97151d1d8429586e422346e --- /dev/null +++ b/vggt/layers/block.py @@ -0,0 +1,259 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the Apache License, Version 2.0 +# found in the LICENSE file in the root directory of this source tree. + +# References: +# https://github.com/facebookresearch/dino/blob/master/vision_transformer.py +# https://github.com/rwightman/pytorch-image-models/tree/master/timm/layers/patch_embed.py + +import logging +import os +from typing import Callable, List, Any, Tuple, Dict +import warnings + +import torch +from torch import nn, Tensor + +from .attention import Attention +from .drop_path import DropPath +from .layer_scale import LayerScale +from .mlp import Mlp + + +XFORMERS_AVAILABLE = False + + +class Block(nn.Module): + def __init__( + self, + dim: int, + num_heads: int, + mlp_ratio: float = 4.0, + qkv_bias: bool = True, + proj_bias: bool = True, + ffn_bias: bool = True, + drop: float = 0.0, + attn_drop: float = 0.0, + init_values=None, + drop_path: float = 0.0, + act_layer: Callable[..., nn.Module] = nn.GELU, + norm_layer: Callable[..., nn.Module] = nn.LayerNorm, + attn_class: Callable[..., nn.Module] = Attention, + ffn_layer: Callable[..., nn.Module] = Mlp, + qk_norm: bool = False, + fused_attn: bool = True, # use F.scaled_dot_product_attention or not + rope=None, + ) -> None: + super().__init__() + + self.norm1 = norm_layer(dim) + + self.attn = attn_class( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + proj_bias=proj_bias, + attn_drop=attn_drop, + proj_drop=drop, + qk_norm=qk_norm, + fused_attn=fused_attn, + rope=rope, + ) + + self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() + self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = ffn_layer( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop, + bias=ffn_bias, + ) + self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() + self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + + self.sample_drop_ratio = drop_path + + def forward(self, x: Tensor, pos=None) -> Tensor: + def attn_residual_func(x: Tensor, pos=None) -> Tensor: + return self.ls1(self.attn(self.norm1(x), pos=pos)) + + def ffn_residual_func(x: Tensor) -> Tensor: + return self.ls2(self.mlp(self.norm2(x))) + + if self.training and self.sample_drop_ratio > 0.1: + # the overhead is compensated only for a drop path rate larger than 0.1 + x = drop_add_residual_stochastic_depth( + x, + pos=pos, + residual_func=attn_residual_func, + sample_drop_ratio=self.sample_drop_ratio, + ) + x = drop_add_residual_stochastic_depth( + x, + residual_func=ffn_residual_func, + sample_drop_ratio=self.sample_drop_ratio, + ) + elif self.training and self.sample_drop_ratio > 0.0: + x = x + self.drop_path1(attn_residual_func(x, pos=pos)) + x = x + self.drop_path1(ffn_residual_func(x)) # FIXME: drop_path2 + else: + x = x + attn_residual_func(x, pos=pos) + x = x + ffn_residual_func(x) + return x + + +def drop_add_residual_stochastic_depth( + x: Tensor, + residual_func: Callable[[Tensor], Tensor], + sample_drop_ratio: float = 0.0, + pos=None, +) -> Tensor: + # 1) extract subset using permutation + b, n, d = x.shape + sample_subset_size = max(int(b * (1 - sample_drop_ratio)), 1) + brange = (torch.randperm(b, device=x.device))[:sample_subset_size] + x_subset = x[brange] + + # 2) apply residual_func to get residual + if pos is not None: + # if necessary, apply rope to the subset + pos = pos[brange] + residual = residual_func(x_subset, pos=pos) + else: + residual = residual_func(x_subset) + + x_flat = x.flatten(1) + residual = residual.flatten(1) + + residual_scale_factor = b / sample_subset_size + + # 3) add the residual + x_plus_residual = torch.index_add(x_flat, 0, brange, residual.to(dtype=x.dtype), alpha=residual_scale_factor) + return x_plus_residual.view_as(x) + + +def get_branges_scales(x, sample_drop_ratio=0.0): + b, n, d = x.shape + sample_subset_size = max(int(b * (1 - sample_drop_ratio)), 1) + brange = (torch.randperm(b, device=x.device))[:sample_subset_size] + residual_scale_factor = b / sample_subset_size + return brange, residual_scale_factor + + +def add_residual(x, brange, residual, residual_scale_factor, scaling_vector=None): + if scaling_vector is None: + x_flat = x.flatten(1) + residual = residual.flatten(1) + x_plus_residual = torch.index_add(x_flat, 0, brange, residual.to(dtype=x.dtype), alpha=residual_scale_factor) + else: + x_plus_residual = scaled_index_add( + x, brange, residual.to(dtype=x.dtype), scaling=scaling_vector, alpha=residual_scale_factor + ) + return x_plus_residual + + +attn_bias_cache: Dict[Tuple, Any] = {} + + +def get_attn_bias_and_cat(x_list, branges=None): + """ + this will perform the index select, cat the tensors, and provide the attn_bias from cache + """ + batch_sizes = [b.shape[0] for b in branges] if branges is not None else [x.shape[0] for x in x_list] + all_shapes = tuple((b, x.shape[1]) for b, x in zip(batch_sizes, x_list)) + if all_shapes not in attn_bias_cache.keys(): + seqlens = [] + for b, x in zip(batch_sizes, x_list): + for _ in range(b): + seqlens.append(x.shape[1]) + attn_bias = fmha.BlockDiagonalMask.from_seqlens(seqlens) + attn_bias._batch_sizes = batch_sizes + attn_bias_cache[all_shapes] = attn_bias + + if branges is not None: + cat_tensors = index_select_cat([x.flatten(1) for x in x_list], branges).view(1, -1, x_list[0].shape[-1]) + else: + tensors_bs1 = tuple(x.reshape([1, -1, *x.shape[2:]]) for x in x_list) + cat_tensors = torch.cat(tensors_bs1, dim=1) + + return attn_bias_cache[all_shapes], cat_tensors + + +def drop_add_residual_stochastic_depth_list( + x_list: List[Tensor], + residual_func: Callable[[Tensor, Any], Tensor], + sample_drop_ratio: float = 0.0, + scaling_vector=None, +) -> Tensor: + # 1) generate random set of indices for dropping samples in the batch + branges_scales = [get_branges_scales(x, sample_drop_ratio=sample_drop_ratio) for x in x_list] + branges = [s[0] for s in branges_scales] + residual_scale_factors = [s[1] for s in branges_scales] + + # 2) get attention bias and index+concat the tensors + attn_bias, x_cat = get_attn_bias_and_cat(x_list, branges) + + # 3) apply residual_func to get residual, and split the result + residual_list = attn_bias.split(residual_func(x_cat, attn_bias=attn_bias)) # type: ignore + + outputs = [] + for x, brange, residual, residual_scale_factor in zip(x_list, branges, residual_list, residual_scale_factors): + outputs.append(add_residual(x, brange, residual, residual_scale_factor, scaling_vector).view_as(x)) + return outputs + + +class NestedTensorBlock(Block): + def forward_nested(self, x_list: List[Tensor]) -> List[Tensor]: + """ + x_list contains a list of tensors to nest together and run + """ + assert isinstance(self.attn, MemEffAttention) + + if self.training and self.sample_drop_ratio > 0.0: + + def attn_residual_func(x: Tensor, attn_bias=None) -> Tensor: + return self.attn(self.norm1(x), attn_bias=attn_bias) + + def ffn_residual_func(x: Tensor, attn_bias=None) -> Tensor: + return self.mlp(self.norm2(x)) + + x_list = drop_add_residual_stochastic_depth_list( + x_list, + residual_func=attn_residual_func, + sample_drop_ratio=self.sample_drop_ratio, + scaling_vector=self.ls1.gamma if isinstance(self.ls1, LayerScale) else None, + ) + x_list = drop_add_residual_stochastic_depth_list( + x_list, + residual_func=ffn_residual_func, + sample_drop_ratio=self.sample_drop_ratio, + scaling_vector=self.ls2.gamma if isinstance(self.ls1, LayerScale) else None, + ) + return x_list + else: + + def attn_residual_func(x: Tensor, attn_bias=None) -> Tensor: + return self.ls1(self.attn(self.norm1(x), attn_bias=attn_bias)) + + def ffn_residual_func(x: Tensor, attn_bias=None) -> Tensor: + return self.ls2(self.mlp(self.norm2(x))) + + attn_bias, x = get_attn_bias_and_cat(x_list) + x = x + attn_residual_func(x, attn_bias=attn_bias) + x = x + ffn_residual_func(x) + return attn_bias.split(x) + + def forward(self, x_or_x_list): + if isinstance(x_or_x_list, Tensor): + return super().forward(x_or_x_list) + elif isinstance(x_or_x_list, list): + if not XFORMERS_AVAILABLE: + raise AssertionError("xFormers is required for using nested tensors") + return self.forward_nested(x_or_x_list) + else: + raise AssertionError diff --git a/vggt/layers/drop_path.py b/vggt/layers/drop_path.py new file mode 100644 index 0000000000000000000000000000000000000000..1d640e0b969b8dcba96260243473700b4e5b24b5 --- /dev/null +++ b/vggt/layers/drop_path.py @@ -0,0 +1,34 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the Apache License, Version 2.0 +# found in the LICENSE file in the root directory of this source tree. + +# References: +# https://github.com/facebookresearch/dino/blob/master/vision_transformer.py +# https://github.com/rwightman/pytorch-image-models/tree/master/timm/layers/drop.py + + +from torch import nn + + +def drop_path(x, drop_prob: float = 0.0, training: bool = False): + if drop_prob == 0.0 or not training: + return x + keep_prob = 1 - drop_prob + shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets + random_tensor = x.new_empty(shape).bernoulli_(keep_prob) + if keep_prob > 0.0: + random_tensor.div_(keep_prob) + output = x * random_tensor + return output + + +class DropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" + + def __init__(self, drop_prob=None): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) diff --git a/vggt/layers/layer_scale.py b/vggt/layers/layer_scale.py new file mode 100644 index 0000000000000000000000000000000000000000..51df0d7ce61f2b41fa9e6369f52391dd7fe7d386 --- /dev/null +++ b/vggt/layers/layer_scale.py @@ -0,0 +1,27 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the Apache License, Version 2.0 +# found in the LICENSE file in the root directory of this source tree. + +# Modified from: https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/vision_transformer.py#L103-L110 + +from typing import Union + +import torch +from torch import Tensor +from torch import nn + + +class LayerScale(nn.Module): + def __init__( + self, + dim: int, + init_values: Union[float, Tensor] = 1e-5, + inplace: bool = False, + ) -> None: + super().__init__() + self.inplace = inplace + self.gamma = nn.Parameter(init_values * torch.ones(dim)) + + def forward(self, x: Tensor) -> Tensor: + return x.mul_(self.gamma) if self.inplace else x * self.gamma diff --git a/vggt/layers/mlp.py b/vggt/layers/mlp.py new file mode 100644 index 0000000000000000000000000000000000000000..bbf9432aae9258612caeae910a7bde17999e328e --- /dev/null +++ b/vggt/layers/mlp.py @@ -0,0 +1,40 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the Apache License, Version 2.0 +# found in the LICENSE file in the root directory of this source tree. + +# References: +# https://github.com/facebookresearch/dino/blob/master/vision_transformer.py +# https://github.com/rwightman/pytorch-image-models/tree/master/timm/layers/mlp.py + + +from typing import Callable, Optional + +from torch import Tensor, nn + + +class Mlp(nn.Module): + def __init__( + self, + in_features: int, + hidden_features: Optional[int] = None, + out_features: Optional[int] = None, + act_layer: Callable[..., nn.Module] = nn.GELU, + drop: float = 0.0, + bias: bool = True, + ) -> None: + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features, bias=bias) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features, bias=bias) + self.drop = nn.Dropout(drop) + + def forward(self, x: Tensor) -> Tensor: + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x diff --git a/vggt/layers/patch_embed.py b/vggt/layers/patch_embed.py new file mode 100644 index 0000000000000000000000000000000000000000..8b7c0804784a42cf80c0297d110dcc68cc85b339 --- /dev/null +++ b/vggt/layers/patch_embed.py @@ -0,0 +1,88 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the Apache License, Version 2.0 +# found in the LICENSE file in the root directory of this source tree. + +# References: +# https://github.com/facebookresearch/dino/blob/master/vision_transformer.py +# https://github.com/rwightman/pytorch-image-models/tree/master/timm/layers/patch_embed.py + +from typing import Callable, Optional, Tuple, Union + +from torch import Tensor +import torch.nn as nn + + +def make_2tuple(x): + if isinstance(x, tuple): + assert len(x) == 2 + return x + + assert isinstance(x, int) + return (x, x) + + +class PatchEmbed(nn.Module): + """ + 2D image to patch embedding: (B,C,H,W) -> (B,N,D) + + Args: + img_size: Image size. + patch_size: Patch token size. + in_chans: Number of input image channels. + embed_dim: Number of linear projection output channels. + norm_layer: Normalization layer. + """ + + def __init__( + self, + img_size: Union[int, Tuple[int, int]] = 224, + patch_size: Union[int, Tuple[int, int]] = 16, + in_chans: int = 3, + embed_dim: int = 768, + norm_layer: Optional[Callable] = None, + flatten_embedding: bool = True, + ) -> None: + super().__init__() + + image_HW = make_2tuple(img_size) + patch_HW = make_2tuple(patch_size) + patch_grid_size = ( + image_HW[0] // patch_HW[0], + image_HW[1] // patch_HW[1], + ) + + self.img_size = image_HW + self.patch_size = patch_HW + self.patches_resolution = patch_grid_size + self.num_patches = patch_grid_size[0] * patch_grid_size[1] + + self.in_chans = in_chans + self.embed_dim = embed_dim + + self.flatten_embedding = flatten_embedding + + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_HW, stride=patch_HW) + self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() + + def forward(self, x: Tensor) -> Tensor: + _, _, H, W = x.shape + patch_H, patch_W = self.patch_size + + assert H % patch_H == 0, f"Input image height {H} is not a multiple of patch height {patch_H}" + assert W % patch_W == 0, f"Input image width {W} is not a multiple of patch width: {patch_W}" + + x = self.proj(x) # B C H W + H, W = x.size(2), x.size(3) + x = x.flatten(2).transpose(1, 2) # B HW C + x = self.norm(x) + if not self.flatten_embedding: + x = x.reshape(-1, H, W, self.embed_dim) # B H W C + return x + + def flops(self) -> float: + Ho, Wo = self.patches_resolution + flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1]) + if self.norm is not None: + flops += Ho * Wo * self.embed_dim + return flops diff --git a/vggt/layers/rope.py b/vggt/layers/rope.py new file mode 100644 index 0000000000000000000000000000000000000000..4d5d33304e55dbd05687bd86752a47a80e5f82df --- /dev/null +++ b/vggt/layers/rope.py @@ -0,0 +1,188 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the Apache License, Version 2.0 +# found in the LICENSE file in the root directory of this source tree. + + +# Implementation of 2D Rotary Position Embeddings (RoPE). + +# This module provides a clean implementation of 2D Rotary Position Embeddings, +# which extends the original RoPE concept to handle 2D spatial positions. + +# Inspired by: +# https://github.com/meta-llama/codellama/blob/main/llama/model.py +# https://github.com/naver-ai/rope-vit + + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from typing import Dict, Tuple + + +class PositionGetter: + """Generates and caches 2D spatial positions for patches in a grid. + + This class efficiently manages the generation of spatial coordinates for patches + in a 2D grid, caching results to avoid redundant computations. + + Attributes: + position_cache: Dictionary storing precomputed position tensors for different + grid dimensions. + """ + + def __init__(self): + """Initializes the position generator with an empty cache.""" + self.position_cache: Dict[Tuple[int, int], torch.Tensor] = {} + + def __call__(self, batch_size: int, height: int, width: int, device: torch.device) -> torch.Tensor: + """Generates spatial positions for a batch of patches. + + Args: + batch_size: Number of samples in the batch. + height: Height of the grid in patches. + width: Width of the grid in patches. + device: Target device for the position tensor. + + Returns: + Tensor of shape (batch_size, height*width, 2) containing y,x coordinates + for each position in the grid, repeated for each batch item. + """ + if (height, width) not in self.position_cache: + y_coords = torch.arange(height, device=device) + x_coords = torch.arange(width, device=device) + positions = torch.cartesian_prod(y_coords, x_coords) + self.position_cache[height, width] = positions + + cached_positions = self.position_cache[height, width] + return cached_positions.view(1, height * width, 2).expand(batch_size, -1, -1).clone() + + +class RotaryPositionEmbedding2D(nn.Module): + """2D Rotary Position Embedding implementation. + + This module applies rotary position embeddings to input tokens based on their + 2D spatial positions. It handles the position-dependent rotation of features + separately for vertical and horizontal dimensions. + + Args: + frequency: Base frequency for the position embeddings. Default: 100.0 + scaling_factor: Scaling factor for frequency computation. Default: 1.0 + + Attributes: + base_frequency: Base frequency for computing position embeddings. + scaling_factor: Factor to scale the computed frequencies. + frequency_cache: Cache for storing precomputed frequency components. + """ + + def __init__(self, frequency: float = 100.0, scaling_factor: float = 1.0): + """Initializes the 2D RoPE module.""" + super().__init__() + self.base_frequency = frequency + self.scaling_factor = scaling_factor + self.frequency_cache: Dict[Tuple, Tuple[torch.Tensor, torch.Tensor]] = {} + + def _compute_frequency_components( + self, dim: int, seq_len: int, device: torch.device, dtype: torch.dtype + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Computes frequency components for rotary embeddings. + + Args: + dim: Feature dimension (must be even). + seq_len: Maximum sequence length. + device: Target device for computations. + dtype: Data type for the computed tensors. + + Returns: + Tuple of (cosine, sine) tensors for frequency components. + """ + cache_key = (dim, seq_len, device, dtype) + if cache_key not in self.frequency_cache: + # Compute frequency bands + exponents = torch.arange(0, dim, 2, device=device).float() / dim + inv_freq = 1.0 / (self.base_frequency**exponents) + + # Generate position-dependent frequencies + positions = torch.arange(seq_len, device=device, dtype=inv_freq.dtype) + angles = torch.einsum("i,j->ij", positions, inv_freq) + + # Compute and cache frequency components + angles = angles.to(dtype) + angles = torch.cat((angles, angles), dim=-1) + cos_components = angles.cos().to(dtype) + sin_components = angles.sin().to(dtype) + self.frequency_cache[cache_key] = (cos_components, sin_components) + + return self.frequency_cache[cache_key] + + @staticmethod + def _rotate_features(x: torch.Tensor) -> torch.Tensor: + """Performs feature rotation by splitting and recombining feature dimensions. + + Args: + x: Input tensor to rotate. + + Returns: + Rotated feature tensor. + """ + feature_dim = x.shape[-1] + x1, x2 = x[..., : feature_dim // 2], x[..., feature_dim // 2 :] + return torch.cat((-x2, x1), dim=-1) + + def _apply_1d_rope( + self, tokens: torch.Tensor, positions: torch.Tensor, cos_comp: torch.Tensor, sin_comp: torch.Tensor + ) -> torch.Tensor: + """Applies 1D rotary position embeddings along one dimension. + + Args: + tokens: Input token features. + positions: Position indices. + cos_comp: Cosine components for rotation. + sin_comp: Sine components for rotation. + + Returns: + Tokens with applied rotary position embeddings. + """ + # Embed positions with frequency components + cos = F.embedding(positions, cos_comp)[:, None, :, :] + sin = F.embedding(positions, sin_comp)[:, None, :, :] + + # Apply rotation + return (tokens * cos) + (self._rotate_features(tokens) * sin) + + def forward(self, tokens: torch.Tensor, positions: torch.Tensor) -> torch.Tensor: + """Applies 2D rotary position embeddings to input tokens. + + Args: + tokens: Input tensor of shape (batch_size, n_heads, n_tokens, dim). + The feature dimension (dim) must be divisible by 4. + positions: Position tensor of shape (batch_size, n_tokens, 2) containing + the y and x coordinates for each token. + + Returns: + Tensor of same shape as input with applied 2D rotary position embeddings. + + Raises: + AssertionError: If input dimensions are invalid or positions are malformed. + """ + # Validate inputs + assert tokens.size(-1) % 2 == 0, "Feature dimension must be even" + assert positions.ndim == 3 and positions.shape[-1] == 2, "Positions must have shape (batch_size, n_tokens, 2)" + + # Compute feature dimension for each spatial direction + feature_dim = tokens.size(-1) // 2 + + # Get frequency components + max_position = int(positions.max()) + 1 + cos_comp, sin_comp = self._compute_frequency_components(feature_dim, max_position, tokens.device, tokens.dtype) + + # Split features for vertical and horizontal processing + vertical_features, horizontal_features = tokens.chunk(2, dim=-1) + + # Apply RoPE separately for each dimension + vertical_features = self._apply_1d_rope(vertical_features, positions[..., 0], cos_comp, sin_comp) + horizontal_features = self._apply_1d_rope(horizontal_features, positions[..., 1], cos_comp, sin_comp) + + # Combine processed features + return torch.cat((vertical_features, horizontal_features), dim=-1) diff --git a/vggt/layers/swiglu_ffn.py b/vggt/layers/swiglu_ffn.py new file mode 100644 index 0000000000000000000000000000000000000000..54fe8e90b7bedf6fbdbf09c6215844e3cc63f857 --- /dev/null +++ b/vggt/layers/swiglu_ffn.py @@ -0,0 +1,72 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the Apache License, Version 2.0 +# found in the LICENSE file in the root directory of this source tree. + +import os +from typing import Callable, Optional +import warnings + +from torch import Tensor, nn +import torch.nn.functional as F + + +class SwiGLUFFN(nn.Module): + def __init__( + self, + in_features: int, + hidden_features: Optional[int] = None, + out_features: Optional[int] = None, + act_layer: Callable[..., nn.Module] = None, + drop: float = 0.0, + bias: bool = True, + ) -> None: + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.w12 = nn.Linear(in_features, 2 * hidden_features, bias=bias) + self.w3 = nn.Linear(hidden_features, out_features, bias=bias) + + def forward(self, x: Tensor) -> Tensor: + x12 = self.w12(x) + x1, x2 = x12.chunk(2, dim=-1) + hidden = F.silu(x1) * x2 + return self.w3(hidden) + + +XFORMERS_ENABLED = os.environ.get("XFORMERS_DISABLED") is None +# try: +# if XFORMERS_ENABLED: +# from xformers.ops import SwiGLU + +# XFORMERS_AVAILABLE = True +# warnings.warn("xFormers is available (SwiGLU)") +# else: +# warnings.warn("xFormers is disabled (SwiGLU)") +# raise ImportError +# except ImportError: +SwiGLU = SwiGLUFFN +XFORMERS_AVAILABLE = False + +# warnings.warn("xFormers is not available (SwiGLU)") + + +class SwiGLUFFNFused(SwiGLU): + def __init__( + self, + in_features: int, + hidden_features: Optional[int] = None, + out_features: Optional[int] = None, + act_layer: Callable[..., nn.Module] = None, + drop: float = 0.0, + bias: bool = True, + ) -> None: + out_features = out_features or in_features + hidden_features = hidden_features or in_features + hidden_features = (int(hidden_features * 2 / 3) + 7) // 8 * 8 + super().__init__( + in_features=in_features, + hidden_features=hidden_features, + out_features=out_features, + bias=bias, + ) diff --git a/vggt/layers/vision_transformer.py b/vggt/layers/vision_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..120cbe6c26650d212e50aefc497669abdc937467 --- /dev/null +++ b/vggt/layers/vision_transformer.py @@ -0,0 +1,407 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the Apache License, Version 2.0 +# found in the LICENSE file in the root directory of this source tree. + +# References: +# https://github.com/facebookresearch/dino/blob/main/vision_transformer.py +# https://github.com/rwightman/pytorch-image-models/tree/master/timm/models/vision_transformer.py + +from functools import partial +import math +import logging +from typing import Sequence, Tuple, Union, Callable + +import torch +import torch.nn as nn +from torch.utils.checkpoint import checkpoint +from torch.nn.init import trunc_normal_ +from . import Mlp, PatchEmbed, SwiGLUFFNFused, MemEffAttention, NestedTensorBlock as Block + +logger = logging.getLogger("dinov2") + + +def named_apply(fn: Callable, module: nn.Module, name="", depth_first=True, include_root=False) -> nn.Module: + if not depth_first and include_root: + fn(module=module, name=name) + for child_name, child_module in module.named_children(): + child_name = ".".join((name, child_name)) if name else child_name + named_apply(fn=fn, module=child_module, name=child_name, depth_first=depth_first, include_root=True) + if depth_first and include_root: + fn(module=module, name=name) + return module + + +class BlockChunk(nn.ModuleList): + def forward(self, x): + for b in self: + x = b(x) + return x + + +class DinoVisionTransformer(nn.Module): + def __init__( + self, + img_size=224, + patch_size=16, + in_chans=3, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4.0, + qkv_bias=True, + ffn_bias=True, + proj_bias=True, + drop_path_rate=0.0, + drop_path_uniform=False, + init_values=None, # for layerscale: None or 0 => no layerscale + embed_layer=PatchEmbed, + act_layer=nn.GELU, + block_fn=Block, + ffn_layer="mlp", + block_chunks=1, + num_register_tokens=0, + interpolate_antialias=False, + interpolate_offset=0.1, + qk_norm=False, + ): + """ + Args: + img_size (int, tuple): input image size + patch_size (int, tuple): patch size + in_chans (int): number of input channels + embed_dim (int): embedding dimension + depth (int): depth of transformer + num_heads (int): number of attention heads + mlp_ratio (int): ratio of mlp hidden dim to embedding dim + qkv_bias (bool): enable bias for qkv if True + proj_bias (bool): enable bias for proj in attn if True + ffn_bias (bool): enable bias for ffn if True + drop_path_rate (float): stochastic depth rate + drop_path_uniform (bool): apply uniform drop rate across blocks + weight_init (str): weight init scheme + init_values (float): layer-scale init values + embed_layer (nn.Module): patch embedding layer + act_layer (nn.Module): MLP activation layer + block_fn (nn.Module): transformer block class + ffn_layer (str): "mlp", "swiglu", "swiglufused" or "identity" + block_chunks: (int) split block sequence into block_chunks units for FSDP wrap + num_register_tokens: (int) number of extra cls tokens (so-called "registers") + interpolate_antialias: (str) flag to apply anti-aliasing when interpolating positional embeddings + interpolate_offset: (float) work-around offset to apply when interpolating positional embeddings + """ + super().__init__() + norm_layer = partial(nn.LayerNorm, eps=1e-6) + + # tricky but makes it work + self.use_checkpoint = False + # + + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + self.num_tokens = 1 + self.n_blocks = depth + self.num_heads = num_heads + self.patch_size = patch_size + self.num_register_tokens = num_register_tokens + self.interpolate_antialias = interpolate_antialias + self.interpolate_offset = interpolate_offset + + self.patch_embed = embed_layer(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) + num_patches = self.patch_embed.num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim)) + assert num_register_tokens >= 0 + self.register_tokens = ( + nn.Parameter(torch.zeros(1, num_register_tokens, embed_dim)) if num_register_tokens else None + ) + + if drop_path_uniform is True: + dpr = [drop_path_rate] * depth + else: + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + + if ffn_layer == "mlp": + logger.info("using MLP layer as FFN") + ffn_layer = Mlp + elif ffn_layer == "swiglufused" or ffn_layer == "swiglu": + logger.info("using SwiGLU layer as FFN") + ffn_layer = SwiGLUFFNFused + elif ffn_layer == "identity": + logger.info("using Identity layer as FFN") + + def f(*args, **kwargs): + return nn.Identity() + + ffn_layer = f + else: + raise NotImplementedError + + blocks_list = [ + block_fn( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + proj_bias=proj_bias, + ffn_bias=ffn_bias, + drop_path=dpr[i], + norm_layer=norm_layer, + act_layer=act_layer, + ffn_layer=ffn_layer, + init_values=init_values, + qk_norm=qk_norm, + ) + for i in range(depth) + ] + if block_chunks > 0: + self.chunked_blocks = True + chunked_blocks = [] + chunksize = depth // block_chunks + for i in range(0, depth, chunksize): + # this is to keep the block index consistent if we chunk the block list + chunked_blocks.append([nn.Identity()] * i + blocks_list[i : i + chunksize]) + self.blocks = nn.ModuleList([BlockChunk(p) for p in chunked_blocks]) + else: + self.chunked_blocks = False + self.blocks = nn.ModuleList(blocks_list) + + self.norm = norm_layer(embed_dim) + self.head = nn.Identity() + + self.mask_token = nn.Parameter(torch.zeros(1, embed_dim)) + + self.init_weights() + + def init_weights(self): + trunc_normal_(self.pos_embed, std=0.02) + nn.init.normal_(self.cls_token, std=1e-6) + if self.register_tokens is not None: + nn.init.normal_(self.register_tokens, std=1e-6) + named_apply(init_weights_vit_timm, self) + + def interpolate_pos_encoding(self, x, w, h): + previous_dtype = x.dtype + npatch = x.shape[1] - 1 + N = self.pos_embed.shape[1] - 1 + if npatch == N and w == h: + return self.pos_embed + pos_embed = self.pos_embed.float() + class_pos_embed = pos_embed[:, 0] + patch_pos_embed = pos_embed[:, 1:] + dim = x.shape[-1] + w0 = w // self.patch_size + h0 = h // self.patch_size + M = int(math.sqrt(N)) # Recover the number of patches in each dimension + assert N == M * M + kwargs = {} + if self.interpolate_offset: + # Historical kludge: add a small number to avoid floating point error in the interpolation, see https://github.com/facebookresearch/dino/issues/8 + # Note: still needed for backward-compatibility, the underlying operators are using both output size and scale factors + sx = float(w0 + self.interpolate_offset) / M + sy = float(h0 + self.interpolate_offset) / M + kwargs["scale_factor"] = (sx, sy) + else: + # Simply specify an output size instead of a scale factor + kwargs["size"] = (w0, h0) + patch_pos_embed = nn.functional.interpolate( + patch_pos_embed.reshape(1, M, M, dim).permute(0, 3, 1, 2), + mode="bicubic", + antialias=self.interpolate_antialias, + **kwargs, + ) + assert (w0, h0) == patch_pos_embed.shape[-2:] + patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) + return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1).to(previous_dtype) + + def prepare_tokens_with_masks(self, x, masks=None): + B, nc, w, h = x.shape + x = self.patch_embed(x) + if masks is not None: + x = torch.where(masks.unsqueeze(-1), self.mask_token.to(x.dtype).unsqueeze(0), x) + + x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1) + x = x + self.interpolate_pos_encoding(x, w, h) + + if self.register_tokens is not None: + x = torch.cat( + ( + x[:, :1], + self.register_tokens.expand(x.shape[0], -1, -1), + x[:, 1:], + ), + dim=1, + ) + + return x + + def forward_features_list(self, x_list, masks_list): + x = [self.prepare_tokens_with_masks(x, masks) for x, masks in zip(x_list, masks_list)] + + for blk in self.blocks: + if self.use_checkpoint: + x = checkpoint(blk, x, use_reentrant=self.use_reentrant) + else: + x = blk(x) + + all_x = x + output = [] + for x, masks in zip(all_x, masks_list): + x_norm = self.norm(x) + output.append( + { + "x_norm_clstoken": x_norm[:, 0], + "x_norm_regtokens": x_norm[:, 1 : self.num_register_tokens + 1], + "x_norm_patchtokens": x_norm[:, self.num_register_tokens + 1 :], + "x_prenorm": x, + "masks": masks, + } + ) + return output + + def forward_features(self, x, masks=None): + if isinstance(x, list): + return self.forward_features_list(x, masks) + + x = self.prepare_tokens_with_masks(x, masks) + + for blk in self.blocks: + if self.use_checkpoint: + x = checkpoint(blk, x, use_reentrant=self.use_reentrant) + else: + x = blk(x) + + x_norm = self.norm(x) + return { + "x_norm_clstoken": x_norm[:, 0], + "x_norm_regtokens": x_norm[:, 1 : self.num_register_tokens + 1], + "x_norm_patchtokens": x_norm[:, self.num_register_tokens + 1 :], + "x_prenorm": x, + "masks": masks, + } + + def _get_intermediate_layers_not_chunked(self, x, n=1): + x = self.prepare_tokens_with_masks(x) + # If n is an int, take the n last blocks. If it's a list, take them + output, total_block_len = [], len(self.blocks) + blocks_to_take = range(total_block_len - n, total_block_len) if isinstance(n, int) else n + for i, blk in enumerate(self.blocks): + x = blk(x) + if i in blocks_to_take: + output.append(x) + assert len(output) == len(blocks_to_take), f"only {len(output)} / {len(blocks_to_take)} blocks found" + return output + + def _get_intermediate_layers_chunked(self, x, n=1): + x = self.prepare_tokens_with_masks(x) + output, i, total_block_len = [], 0, len(self.blocks[-1]) + # If n is an int, take the n last blocks. If it's a list, take them + blocks_to_take = range(total_block_len - n, total_block_len) if isinstance(n, int) else n + for block_chunk in self.blocks: + for blk in block_chunk[i:]: # Passing the nn.Identity() + x = blk(x) + if i in blocks_to_take: + output.append(x) + i += 1 + assert len(output) == len(blocks_to_take), f"only {len(output)} / {len(blocks_to_take)} blocks found" + return output + + def get_intermediate_layers( + self, + x: torch.Tensor, + n: Union[int, Sequence] = 1, # Layers or n last layers to take + reshape: bool = False, + return_class_token: bool = False, + norm=True, + ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]]]: + if self.chunked_blocks: + outputs = self._get_intermediate_layers_chunked(x, n) + else: + outputs = self._get_intermediate_layers_not_chunked(x, n) + if norm: + outputs = [self.norm(out) for out in outputs] + class_tokens = [out[:, 0] for out in outputs] + outputs = [out[:, 1 + self.num_register_tokens :] for out in outputs] + if reshape: + B, _, w, h = x.shape + outputs = [ + out.reshape(B, w // self.patch_size, h // self.patch_size, -1).permute(0, 3, 1, 2).contiguous() + for out in outputs + ] + if return_class_token: + return tuple(zip(outputs, class_tokens)) + return tuple(outputs) + + def forward(self, *args, is_training=True, **kwargs): + ret = self.forward_features(*args, **kwargs) + if is_training: + return ret + else: + return self.head(ret["x_norm_clstoken"]) + + +def init_weights_vit_timm(module: nn.Module, name: str = ""): + """ViT weight initialization, original timm impl (for reproducibility)""" + if isinstance(module, nn.Linear): + trunc_normal_(module.weight, std=0.02) + if module.bias is not None: + nn.init.zeros_(module.bias) + + +def vit_small(patch_size=16, num_register_tokens=0, **kwargs): + model = DinoVisionTransformer( + patch_size=patch_size, + embed_dim=384, + depth=12, + num_heads=6, + mlp_ratio=4, + block_fn=partial(Block, attn_class=MemEffAttention), + num_register_tokens=num_register_tokens, + **kwargs, + ) + return model + + +def vit_base(patch_size=16, num_register_tokens=0, **kwargs): + model = DinoVisionTransformer( + patch_size=patch_size, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + block_fn=partial(Block, attn_class=MemEffAttention), + num_register_tokens=num_register_tokens, + **kwargs, + ) + return model + + +def vit_large(patch_size=16, num_register_tokens=0, **kwargs): + model = DinoVisionTransformer( + patch_size=patch_size, + embed_dim=1024, + depth=24, + num_heads=16, + mlp_ratio=4, + block_fn=partial(Block, attn_class=MemEffAttention), + num_register_tokens=num_register_tokens, + **kwargs, + ) + return model + + +def vit_giant2(patch_size=16, num_register_tokens=0, **kwargs): + """ + Close to ViT-giant, with embed-dim 1536 and 24 heads => embed-dim per head 64 + """ + model = DinoVisionTransformer( + patch_size=patch_size, + embed_dim=1536, + depth=40, + num_heads=24, + mlp_ratio=4, + block_fn=partial(Block, attn_class=MemEffAttention), + num_register_tokens=num_register_tokens, + **kwargs, + ) + return model diff --git a/vggt/models/aggregator.py b/vggt/models/aggregator.py new file mode 100644 index 0000000000000000000000000000000000000000..393f9920a24b05eca3eb82f7db8bd024f9c1636e --- /dev/null +++ b/vggt/models/aggregator.py @@ -0,0 +1,331 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import logging +import torch +import torch.nn as nn +import torch.nn.functional as F +from typing import Optional, Tuple, Union, List, Dict, Any + +from vggt.layers import PatchEmbed +from vggt.layers.block import Block +from vggt.layers.rope import RotaryPositionEmbedding2D, PositionGetter +from vggt.layers.vision_transformer import vit_small, vit_base, vit_large, vit_giant2 + +logger = logging.getLogger(__name__) + +_RESNET_MEAN = [0.485, 0.456, 0.406] +_RESNET_STD = [0.229, 0.224, 0.225] + + +class Aggregator(nn.Module): + """ + The Aggregator applies alternating-attention over input frames, + as described in VGGT: Visual Geometry Grounded Transformer. + + + Args: + img_size (int): Image size in pixels. + patch_size (int): Size of each patch for PatchEmbed. + embed_dim (int): Dimension of the token embeddings. + depth (int): Number of blocks. + num_heads (int): Number of attention heads. + mlp_ratio (float): Ratio of MLP hidden dim to embedding dim. + num_register_tokens (int): Number of register tokens. + block_fn (nn.Module): The block type used for attention (Block by default). + qkv_bias (bool): Whether to include bias in QKV projections. + proj_bias (bool): Whether to include bias in the output projection. + ffn_bias (bool): Whether to include bias in MLP layers. + patch_embed (str): Type of patch embed. e.g., "conv" or "dinov2_vitl14_reg". + aa_order (list[str]): The order of alternating attention, e.g. ["frame", "global"]. + aa_block_size (int): How many blocks to group under each attention type before switching. If not necessary, set to 1. + qk_norm (bool): Whether to apply QK normalization. + rope_freq (int): Base frequency for rotary embedding. -1 to disable. + init_values (float): Init scale for layer scale. + """ + + def __init__( + self, + img_size=518, + patch_size=14, + embed_dim=1024, + depth=24, + num_heads=16, + mlp_ratio=4.0, + num_register_tokens=4, + block_fn=Block, + qkv_bias=True, + proj_bias=True, + ffn_bias=True, + patch_embed="dinov2_vitl14_reg", + aa_order=["frame", "global"], + aa_block_size=1, + qk_norm=True, + rope_freq=100, + init_values=0.01, + ): + super().__init__() + + self.__build_patch_embed__(patch_embed, img_size, patch_size, num_register_tokens, embed_dim=embed_dim) + + # Initialize rotary position embedding if frequency > 0 + self.rope = RotaryPositionEmbedding2D(frequency=rope_freq) if rope_freq > 0 else None + self.position_getter = PositionGetter() if self.rope is not None else None + + self.frame_blocks = nn.ModuleList( + [ + block_fn( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + proj_bias=proj_bias, + ffn_bias=ffn_bias, + init_values=init_values, + qk_norm=qk_norm, + rope=self.rope, + ) + for _ in range(depth) + ] + ) + + self.global_blocks = nn.ModuleList( + [ + block_fn( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + proj_bias=proj_bias, + ffn_bias=ffn_bias, + init_values=init_values, + qk_norm=qk_norm, + rope=self.rope, + ) + for _ in range(depth) + ] + ) + + self.depth = depth + self.aa_order = aa_order + self.patch_size = patch_size + self.aa_block_size = aa_block_size + + # Validate that depth is divisible by aa_block_size + if self.depth % self.aa_block_size != 0: + raise ValueError(f"depth ({depth}) must be divisible by aa_block_size ({aa_block_size})") + + self.aa_block_num = self.depth // self.aa_block_size + + # Note: We have two camera tokens, one for the first frame and one for the rest + # The same applies for register tokens + self.camera_token = nn.Parameter(torch.randn(1, 2, 1, embed_dim)) + self.register_token = nn.Parameter(torch.randn(1, 2, num_register_tokens, embed_dim)) + + # The patch tokens start after the camera and register tokens + self.patch_start_idx = 1 + num_register_tokens + + # Initialize parameters with small values + nn.init.normal_(self.camera_token, std=1e-6) + nn.init.normal_(self.register_token, std=1e-6) + + # Register normalization constants as buffers + for name, value in ( + ("_resnet_mean", _RESNET_MEAN), + ("_resnet_std", _RESNET_STD), + ): + self.register_buffer( + name, + torch.FloatTensor(value).view(1, 1, 3, 1, 1), + persistent=False, + ) + + def __build_patch_embed__( + self, + patch_embed, + img_size, + patch_size, + num_register_tokens, + interpolate_antialias=True, + interpolate_offset=0.0, + block_chunks=0, + init_values=1.0, + embed_dim=1024, + ): + """ + Build the patch embed layer. If 'conv', we use a + simple PatchEmbed conv layer. Otherwise, we use a vision transformer. + """ + + if "conv" in patch_embed: + self.patch_embed = PatchEmbed(img_size=img_size, patch_size=patch_size, in_chans=3, embed_dim=embed_dim) + else: + vit_models = { + "dinov2_vitl14_reg": vit_large, + "dinov2_vitb14_reg": vit_base, + "dinov2_vits14_reg": vit_small, + "dinov2_vitg2_reg": vit_giant2, + } + + self.patch_embed = vit_models[patch_embed]( + img_size=img_size, + patch_size=patch_size, + num_register_tokens=num_register_tokens, + interpolate_antialias=interpolate_antialias, + interpolate_offset=interpolate_offset, + block_chunks=block_chunks, + init_values=init_values, + ) + + # Disable gradient updates for mask token + if hasattr(self.patch_embed, "mask_token"): + self.patch_embed.mask_token.requires_grad_(False) + + def forward( + self, + images: torch.Tensor, + ) -> Tuple[List[torch.Tensor], int]: + """ + Args: + images (torch.Tensor): Input images with shape [B, S, 3, H, W], in range [0, 1]. + B: batch size, S: sequence length, 3: RGB channels, H: height, W: width + + Returns: + (list[torch.Tensor], int): + The list of outputs from the attention blocks, + and the patch_start_idx indicating where patch tokens begin. + """ + B, S, C_in, H, W = images.shape + + if C_in != 3: + raise ValueError(f"Expected 3 input channels, got {C_in}") + + # Normalize images and reshape for patch embed + images = (images - self._resnet_mean) / self._resnet_std + + # Reshape to [B*S, C, H, W] for patch embedding + images = images.view(B * S, C_in, H, W) + patch_tokens = self.patch_embed(images) + + if isinstance(patch_tokens, dict): + patch_tokens = patch_tokens["x_norm_patchtokens"] + + _, P, C = patch_tokens.shape + + # Expand camera and register tokens to match batch size and sequence length + camera_token = slice_expand_and_flatten(self.camera_token, B, S) + register_token = slice_expand_and_flatten(self.register_token, B, S) + + # Concatenate special tokens with patch tokens + tokens = torch.cat([camera_token, register_token, patch_tokens], dim=1) + + pos = None + if self.rope is not None: + pos = self.position_getter(B * S, H // self.patch_size, W // self.patch_size, device=images.device) + + if self.patch_start_idx > 0: + # do not use position embedding for special tokens (camera and register tokens) + # so set pos to 0 for the special tokens + pos = pos + 1 + pos_special = torch.zeros(B * S, self.patch_start_idx, 2).to(images.device).to(pos.dtype) + pos = torch.cat([pos_special, pos], dim=1) + + # update P because we added special tokens + _, P, C = tokens.shape + + frame_idx = 0 + global_idx = 0 + output_list = [] + + for _ in range(self.aa_block_num): + for attn_type in self.aa_order: + if attn_type == "frame": + tokens, frame_idx, frame_intermediates = self._process_frame_attention( + tokens, B, S, P, C, frame_idx, pos=pos + ) + elif attn_type == "global": + tokens, global_idx, global_intermediates = self._process_global_attention( + tokens, B, S, P, C, global_idx, pos=pos + ) + else: + raise ValueError(f"Unknown attention type: {attn_type}") + + for i in range(len(frame_intermediates)): + # concat frame and global intermediates, [B x S x P x 2C] + concat_inter = torch.cat([frame_intermediates[i], global_intermediates[i]], dim=-1) + output_list.append(concat_inter) + + del concat_inter + del frame_intermediates + del global_intermediates + return output_list, self.patch_start_idx + + def _process_frame_attention(self, tokens, B, S, P, C, frame_idx, pos=None): + """ + Process frame attention blocks. We keep tokens in shape (B*S, P, C). + """ + # If needed, reshape tokens or positions: + if tokens.shape != (B * S, P, C): + tokens = tokens.view(B, S, P, C).view(B * S, P, C) + + if pos is not None and pos.shape != (B * S, P, 2): + pos = pos.view(B, S, P, 2).view(B * S, P, 2) + + intermediates = [] + + # by default, self.aa_block_size=1, which processes one block at a time + for _ in range(self.aa_block_size): + tokens = self.frame_blocks[frame_idx](tokens, pos=pos) + frame_idx += 1 + intermediates.append(tokens.view(B, S, P, C)) + + return tokens, frame_idx, intermediates + + def _process_global_attention(self, tokens, B, S, P, C, global_idx, pos=None): + """ + Process global attention blocks. We keep tokens in shape (B, S*P, C). + """ + if tokens.shape != (B, S * P, C): + tokens = tokens.view(B, S, P, C).view(B, S * P, C) + + if pos is not None and pos.shape != (B, S * P, 2): + pos = pos.view(B, S, P, 2).view(B, S * P, 2) + + intermediates = [] + + # by default, self.aa_block_size=1, which processes one block at a time + for _ in range(self.aa_block_size): + tokens = self.global_blocks[global_idx](tokens, pos=pos) + global_idx += 1 + intermediates.append(tokens.view(B, S, P, C)) + + return tokens, global_idx, intermediates + + +def slice_expand_and_flatten(token_tensor, B, S): + """ + Processes specialized tokens with shape (1, 2, X, C) for multi-frame processing: + 1) Uses the first position (index=0) for the first frame only + 2) Uses the second position (index=1) for all remaining frames (S-1 frames) + 3) Expands both to match batch size B + 4) Concatenates to form (B, S, X, C) where each sequence has 1 first-position token + followed by (S-1) second-position tokens + 5) Flattens to (B*S, X, C) for processing + + Returns: + torch.Tensor: Processed tokens with shape (B*S, X, C) + """ + + # Slice out the "query" tokens => shape (1, 1, ...) + query = token_tensor[:, 0:1, ...].expand(B, 1, *token_tensor.shape[2:]) + # Slice out the "other" tokens => shape (1, S-1, ...) + others = token_tensor[:, 1:, ...].expand(B, S - 1, *token_tensor.shape[2:]) + # Concatenate => shape (B, S, ...) + combined = torch.cat([query, others], dim=1) + + # Finally flatten => shape (B*S, ...) + combined = combined.view(B * S, *combined.shape[2:]) + return combined diff --git a/vggt/models/vggt.py b/vggt/models/vggt.py new file mode 100644 index 0000000000000000000000000000000000000000..75587dc2cd16ca54466c0200dbfebff06578dbe3 --- /dev/null +++ b/vggt/models/vggt.py @@ -0,0 +1,96 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +import torch.nn as nn +from huggingface_hub import PyTorchModelHubMixin # used for model hub + +from vggt.models.aggregator import Aggregator +from vggt.heads.camera_head import CameraHead +from vggt.heads.dpt_head import DPTHead +from vggt.heads.track_head import TrackHead + + +class VGGT(nn.Module, PyTorchModelHubMixin): + def __init__(self, img_size=518, patch_size=14, embed_dim=1024): + super().__init__() + + self.aggregator = Aggregator(img_size=img_size, patch_size=patch_size, embed_dim=embed_dim) + self.camera_head = CameraHead(dim_in=2 * embed_dim) + self.point_head = DPTHead(dim_in=2 * embed_dim, output_dim=4, activation="inv_log", conf_activation="expp1") + self.depth_head = DPTHead(dim_in=2 * embed_dim, output_dim=2, activation="exp", conf_activation="expp1") + self.track_head = TrackHead(dim_in=2 * embed_dim, patch_size=patch_size) + + def forward( + self, + images: torch.Tensor, + query_points: torch.Tensor = None, + ): + """ + Forward pass of the VGGT model. + + Args: + images (torch.Tensor): Input images with shape [S, 3, H, W] or [B, S, 3, H, W], in range [0, 1]. + B: batch size, S: sequence length, 3: RGB channels, H: height, W: width + query_points (torch.Tensor, optional): Query points for tracking, in pixel coordinates. + Shape: [N, 2] or [B, N, 2], where N is the number of query points. + Default: None + + Returns: + dict: A dictionary containing the following predictions: + - pose_enc (torch.Tensor): Camera pose encoding with shape [B, S, 9] (from the last iteration) + - depth (torch.Tensor): Predicted depth maps with shape [B, S, H, W, 1] + - depth_conf (torch.Tensor): Confidence scores for depth predictions with shape [B, S, H, W] + - world_points (torch.Tensor): 3D world coordinates for each pixel with shape [B, S, H, W, 3] + - world_points_conf (torch.Tensor): Confidence scores for world points with shape [B, S, H, W] + - images (torch.Tensor): Original input images, preserved for visualization + + If query_points is provided, also includes: + - track (torch.Tensor): Point tracks with shape [B, S, N, 2] (from the last iteration), in pixel coordinates + - vis (torch.Tensor): Visibility scores for tracked points with shape [B, S, N] + - conf (torch.Tensor): Confidence scores for tracked points with shape [B, S, N] + """ + + # If without batch dimension, add it + if len(images.shape) == 4: + images = images.unsqueeze(0) + if query_points is not None and len(query_points.shape) == 2: + query_points = query_points.unsqueeze(0) + + aggregated_tokens_list, patch_start_idx = self.aggregator(images) + + predictions = {} + + with torch.cuda.amp.autocast(enabled=False): + if self.camera_head is not None: + pose_enc_list = self.camera_head(aggregated_tokens_list) + predictions["pose_enc"] = pose_enc_list[-1] # pose encoding of the last iteration + + if self.depth_head is not None: + depth, depth_conf = self.depth_head( + aggregated_tokens_list, images=images, patch_start_idx=patch_start_idx + ) + predictions["depth"] = depth + predictions["depth_conf"] = depth_conf + + if self.point_head is not None: + pts3d, pts3d_conf = self.point_head( + aggregated_tokens_list, images=images, patch_start_idx=patch_start_idx + ) + predictions["world_points"] = pts3d + predictions["world_points_conf"] = pts3d_conf + + if self.track_head is not None and query_points is not None: + track_list, vis, conf = self.track_head( + aggregated_tokens_list, images=images, patch_start_idx=patch_start_idx, query_points=query_points + ) + predictions["track"] = track_list[-1] # track of the last iteration + predictions["vis"] = vis + predictions["conf"] = conf + + predictions["images"] = images + + return predictions diff --git a/vggt/utils/geometry.py b/vggt/utils/geometry.py new file mode 100644 index 0000000000000000000000000000000000000000..8ebd25dbc6cac6b0095956524c4f0628410dd5cb --- /dev/null +++ b/vggt/utils/geometry.py @@ -0,0 +1,166 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import os +import torch +import numpy as np + + +def unproject_depth_map_to_point_map( + depth_map: np.ndarray, extrinsics_cam: np.ndarray, intrinsics_cam: np.ndarray +) -> np.ndarray: + """ + Unproject a batch of depth maps to 3D world coordinates. + + Args: + depth_map (np.ndarray): Batch of depth maps of shape (S, H, W, 1) or (S, H, W) + extrinsics_cam (np.ndarray): Batch of camera extrinsic matrices of shape (S, 3, 4) + intrinsics_cam (np.ndarray): Batch of camera intrinsic matrices of shape (S, 3, 3) + + Returns: + np.ndarray: Batch of 3D world coordinates of shape (S, H, W, 3) + """ + if isinstance(depth_map, torch.Tensor): + depth_map = depth_map.cpu().numpy() + if isinstance(extrinsics_cam, torch.Tensor): + extrinsics_cam = extrinsics_cam.cpu().numpy() + if isinstance(intrinsics_cam, torch.Tensor): + intrinsics_cam = intrinsics_cam.cpu().numpy() + + world_points_list = [] + for frame_idx in range(depth_map.shape[0]): + cur_world_points, _, _ = depth_to_world_coords_points( + depth_map[frame_idx].squeeze(-1), extrinsics_cam[frame_idx], intrinsics_cam[frame_idx] + ) + world_points_list.append(cur_world_points) + world_points_array = np.stack(world_points_list, axis=0) + + return world_points_array + + +def depth_to_world_coords_points( + depth_map: np.ndarray, + extrinsic: np.ndarray, + intrinsic: np.ndarray, + eps=1e-8, +) -> tuple[np.ndarray, np.ndarray, np.ndarray]: + """ + Convert a depth map to world coordinates. + + Args: + depth_map (np.ndarray): Depth map of shape (H, W). + intrinsic (np.ndarray): Camera intrinsic matrix of shape (3, 3). + extrinsic (np.ndarray): Camera extrinsic matrix of shape (3, 4). OpenCV camera coordinate convention, cam from world. + + Returns: + tuple[np.ndarray, np.ndarray]: World coordinates (H, W, 3) and valid depth mask (H, W). + """ + if depth_map is None: + return None, None, None + + # Valid depth mask + point_mask = depth_map > eps + + # Convert depth map to camera coordinates + cam_coords_points = depth_to_cam_coords_points(depth_map, intrinsic) + + # Multiply with the inverse of extrinsic matrix to transform to world coordinates + # extrinsic_inv is 4x4 (note closed_form_inverse_OpenCV is batched, the output is (N, 4, 4)) + cam_to_world_extrinsic = closed_form_inverse_se3(extrinsic[None])[0] + + R_cam_to_world = cam_to_world_extrinsic[:3, :3] + t_cam_to_world = cam_to_world_extrinsic[:3, 3] + + # Apply the rotation and translation to the camera coordinates + world_coords_points = np.dot(cam_coords_points, R_cam_to_world.T) + t_cam_to_world # HxWx3, 3x3 -> HxWx3 + # world_coords_points = np.einsum("ij,hwj->hwi", R_cam_to_world, cam_coords_points) + t_cam_to_world + + return world_coords_points, cam_coords_points, point_mask + + +def depth_to_cam_coords_points(depth_map: np.ndarray, intrinsic: np.ndarray) -> tuple[np.ndarray, np.ndarray]: + """ + Convert a depth map to camera coordinates. + + Args: + depth_map (np.ndarray): Depth map of shape (H, W). + intrinsic (np.ndarray): Camera intrinsic matrix of shape (3, 3). + + Returns: + tuple[np.ndarray, np.ndarray]: Camera coordinates (H, W, 3) + """ + H, W = depth_map.shape + assert intrinsic.shape == (3, 3), "Intrinsic matrix must be 3x3" + assert intrinsic[0, 1] == 0 and intrinsic[1, 0] == 0, "Intrinsic matrix must have zero skew" + + # Intrinsic parameters + fu, fv = intrinsic[0, 0], intrinsic[1, 1] + cu, cv = intrinsic[0, 2], intrinsic[1, 2] + + # Generate grid of pixel coordinates + u, v = np.meshgrid(np.arange(W), np.arange(H)) + + # Unproject to camera coordinates + x_cam = (u - cu) * depth_map / fu + y_cam = (v - cv) * depth_map / fv + z_cam = depth_map + + # Stack to form camera coordinates + cam_coords = np.stack((x_cam, y_cam, z_cam), axis=-1).astype(np.float32) + + return cam_coords + + +def closed_form_inverse_se3(se3, R=None, T=None): + """ + Compute the inverse of each 4x4 (or 3x4) SE3 matrix in a batch. + + If `R` and `T` are provided, they must correspond to the rotation and translation + components of `se3`. Otherwise, they will be extracted from `se3`. + + Args: + se3: Nx4x4 or Nx3x4 array or tensor of SE3 matrices. + R (optional): Nx3x3 array or tensor of rotation matrices. + T (optional): Nx3x1 array or tensor of translation vectors. + + Returns: + Inverted SE3 matrices with the same type and device as `se3`. + + Shapes: + se3: (N, 4, 4) + R: (N, 3, 3) + T: (N, 3, 1) + """ + # Check if se3 is a numpy array or a torch tensor + is_numpy = isinstance(se3, np.ndarray) + + # Validate shapes + if se3.shape[-2:] != (4, 4) and se3.shape[-2:] != (3, 4): + raise ValueError(f"se3 must be of shape (N,4,4), got {se3.shape}.") + + # Extract R and T if not provided + if R is None: + R = se3[:, :3, :3] # (N,3,3) + if T is None: + T = se3[:, :3, 3:] # (N,3,1) + + # Transpose R + if is_numpy: + # Compute the transpose of the rotation for NumPy + R_transposed = np.transpose(R, (0, 2, 1)) + # -R^T t for NumPy + top_right = -np.matmul(R_transposed, T) + inverted_matrix = np.tile(np.eye(4), (len(R), 1, 1)) + else: + R_transposed = R.transpose(1, 2) # (N,3,3) + top_right = -torch.bmm(R_transposed, T) # (N,3,1) + inverted_matrix = torch.eye(4, 4)[None].repeat(len(R), 1, 1) + inverted_matrix = inverted_matrix.to(R.dtype).to(R.device) + + inverted_matrix[:, :3, :3] = R_transposed + inverted_matrix[:, :3, 3:] = top_right + + return inverted_matrix diff --git a/vggt/utils/load_fn.py b/vggt/utils/load_fn.py new file mode 100644 index 0000000000000000000000000000000000000000..d786e98a950f880342da9a13664be4fa32eb0bfa --- /dev/null +++ b/vggt/utils/load_fn.py @@ -0,0 +1,146 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +from PIL import Image +from torchvision import transforms as TF + + +def load_and_preprocess_images(image_path_list, mode="crop"): + """ + A quick start function to load and preprocess images for model input. + This assumes the images should have the same shape for easier batching, but our model can also work well with different shapes. + + Args: + image_path_list (list): List of paths to image files + mode (str, optional): Preprocessing mode, either "crop" or "pad". + - "crop" (default): Sets width to 518px and center crops height if needed. + - "pad": Preserves all pixels by making the largest dimension 518px + and padding the smaller dimension to reach a square shape. + + Returns: + torch.Tensor: Batched tensor of preprocessed images with shape (N, 3, H, W) + + Raises: + ValueError: If the input list is empty or if mode is invalid + + Notes: + - Images with different dimensions will be padded with white (value=1.0) + - A warning is printed when images have different shapes + - When mode="crop": The function ensures width=518px while maintaining aspect ratio + and height is center-cropped if larger than 518px + - When mode="pad": The function ensures the largest dimension is 518px while maintaining aspect ratio + and the smaller dimension is padded to reach a square shape (518x518) + - Dimensions are adjusted to be divisible by 14 for compatibility with model requirements + """ + # Check for empty list + if len(image_path_list) == 0: + raise ValueError("At least 1 image is required") + + # Validate mode + if mode not in ["crop", "pad"]: + raise ValueError("Mode must be either 'crop' or 'pad'") + + images = [] + shapes = set() + to_tensor = TF.ToTensor() + target_size = 518 + + # First process all images and collect their shapes + for image_path in image_path_list: + + # Open image + img = Image.open(image_path) + + # If there's an alpha channel, blend onto white background: + if img.mode == "RGBA": + # Create white background + background = Image.new("RGBA", img.size, (255, 255, 255, 255)) + # Alpha composite onto the white background + img = Image.alpha_composite(background, img) + + # Now convert to "RGB" (this step assigns white for transparent areas) + img = img.convert("RGB") + + width, height = img.size + + if mode == "pad": + # Make the largest dimension 518px while maintaining aspect ratio + if width >= height: + new_width = target_size + new_height = round(height * (new_width / width) / 14) * 14 # Make divisible by 14 + else: + new_height = target_size + new_width = round(width * (new_height / height) / 14) * 14 # Make divisible by 14 + else: # mode == "crop" + # Original behavior: set width to 518px + new_width = target_size + # Calculate height maintaining aspect ratio, divisible by 14 + new_height = round(height * (new_width / width) / 14) * 14 + + # Resize with new dimensions (width, height) + img = img.resize((new_width, new_height), Image.Resampling.BICUBIC) + img = to_tensor(img) # Convert to tensor (0, 1) + + # Center crop height if it's larger than 518 (only in crop mode) + if mode == "crop" and new_height > target_size: + start_y = (new_height - target_size) // 2 + img = img[:, start_y : start_y + target_size, :] + + # For pad mode, pad to make a square of target_size x target_size + if mode == "pad": + h_padding = target_size - img.shape[1] + w_padding = target_size - img.shape[2] + + if h_padding > 0 or w_padding > 0: + pad_top = h_padding // 2 + pad_bottom = h_padding - pad_top + pad_left = w_padding // 2 + pad_right = w_padding - pad_left + + # Pad with white (value=1.0) + img = torch.nn.functional.pad( + img, (pad_left, pad_right, pad_top, pad_bottom), mode="constant", value=1.0 + ) + + shapes.add((img.shape[1], img.shape[2])) + images.append(img) + + # Check if we have different shapes + # In theory our model can also work well with different shapes + if len(shapes) > 1: + print(f"Warning: Found images with different shapes: {shapes}") + # Find maximum dimensions + max_height = max(shape[0] for shape in shapes) + max_width = max(shape[1] for shape in shapes) + + # Pad images if necessary + padded_images = [] + for img in images: + h_padding = max_height - img.shape[1] + w_padding = max_width - img.shape[2] + + if h_padding > 0 or w_padding > 0: + pad_top = h_padding // 2 + pad_bottom = h_padding - pad_top + pad_left = w_padding // 2 + pad_right = w_padding - pad_left + + img = torch.nn.functional.pad( + img, (pad_left, pad_right, pad_top, pad_bottom), mode="constant", value=1.0 + ) + padded_images.append(img) + images = padded_images + + images = torch.stack(images) # concatenate images + + # Ensure correct shape when single image + if len(image_path_list) == 1: + # Verify shape is (1, C, H, W) + if images.dim() == 3: + images = images.unsqueeze(0) + + return images diff --git a/vggt/utils/pose_enc.py b/vggt/utils/pose_enc.py new file mode 100644 index 0000000000000000000000000000000000000000..2f98b0878cb13451b8cdb80074349cbf2644c5fa --- /dev/null +++ b/vggt/utils/pose_enc.py @@ -0,0 +1,130 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +from .rotation import quat_to_mat, mat_to_quat + + +def extri_intri_to_pose_encoding( + extrinsics, + intrinsics, + image_size_hw=None, # e.g., (256, 512) + pose_encoding_type="absT_quaR_FoV", +): + """Convert camera extrinsics and intrinsics to a compact pose encoding. + + This function transforms camera parameters into a unified pose encoding format, + which can be used for various downstream tasks like pose prediction or representation. + + Args: + extrinsics (torch.Tensor): Camera extrinsic parameters with shape BxSx3x4, + where B is batch size and S is sequence length. + In OpenCV coordinate system (x-right, y-down, z-forward), representing camera from world transformation. + The format is [R|t] where R is a 3x3 rotation matrix and t is a 3x1 translation vector. + intrinsics (torch.Tensor): Camera intrinsic parameters with shape BxSx3x3. + Defined in pixels, with format: + [[fx, 0, cx], + [0, fy, cy], + [0, 0, 1]] + where fx, fy are focal lengths and (cx, cy) is the principal point + image_size_hw (tuple): Tuple of (height, width) of the image in pixels. + Required for computing field of view values. For example: (256, 512). + pose_encoding_type (str): Type of pose encoding to use. Currently only + supports "absT_quaR_FoV" (absolute translation, quaternion rotation, field of view). + + Returns: + torch.Tensor: Encoded camera pose parameters with shape BxSx9. + For "absT_quaR_FoV" type, the 9 dimensions are: + - [:3] = absolute translation vector T (3D) + - [3:7] = rotation as quaternion quat (4D) + - [7:] = field of view (2D) + """ + + # extrinsics: BxSx3x4 + # intrinsics: BxSx3x3 + + if pose_encoding_type == "absT_quaR_FoV": + R = extrinsics[:, :, :3, :3] # BxSx3x3 + T = extrinsics[:, :, :3, 3] # BxSx3 + + quat = mat_to_quat(R) + # Note the order of h and w here + H, W = image_size_hw + fov_h = 2 * torch.atan((H / 2) / intrinsics[..., 1, 1]) + fov_w = 2 * torch.atan((W / 2) / intrinsics[..., 0, 0]) + pose_encoding = torch.cat([T, quat, fov_h[..., None], fov_w[..., None]], dim=-1).float() + else: + raise NotImplementedError + + return pose_encoding + + +def pose_encoding_to_extri_intri( + pose_encoding, + image_size_hw=None, # e.g., (256, 512) + pose_encoding_type="absT_quaR_FoV", + build_intrinsics=True, +): + """Convert a pose encoding back to camera extrinsics and intrinsics. + + This function performs the inverse operation of extri_intri_to_pose_encoding, + reconstructing the full camera parameters from the compact encoding. + + Args: + pose_encoding (torch.Tensor): Encoded camera pose parameters with shape BxSx9, + where B is batch size and S is sequence length. + For "absT_quaR_FoV" type, the 9 dimensions are: + - [:3] = absolute translation vector T (3D) + - [3:7] = rotation as quaternion quat (4D) + - [7:] = field of view (2D) + image_size_hw (tuple): Tuple of (height, width) of the image in pixels. + Required for reconstructing intrinsics from field of view values. + For example: (256, 512). + pose_encoding_type (str): Type of pose encoding used. Currently only + supports "absT_quaR_FoV" (absolute translation, quaternion rotation, field of view). + build_intrinsics (bool): Whether to reconstruct the intrinsics matrix. + If False, only extrinsics are returned and intrinsics will be None. + + Returns: + tuple: (extrinsics, intrinsics) + - extrinsics (torch.Tensor): Camera extrinsic parameters with shape BxSx3x4. + In OpenCV coordinate system (x-right, y-down, z-forward), representing camera from world + transformation. The format is [R|t] where R is a 3x3 rotation matrix and t is + a 3x1 translation vector. + - intrinsics (torch.Tensor or None): Camera intrinsic parameters with shape BxSx3x3, + or None if build_intrinsics is False. Defined in pixels, with format: + [[fx, 0, cx], + [0, fy, cy], + [0, 0, 1]] + where fx, fy are focal lengths and (cx, cy) is the principal point, + assumed to be at the center of the image (W/2, H/2). + """ + + intrinsics = None + + if pose_encoding_type == "absT_quaR_FoV": + T = pose_encoding[..., :3] + quat = pose_encoding[..., 3:7] + fov_h = pose_encoding[..., 7] + fov_w = pose_encoding[..., 8] + + R = quat_to_mat(quat) + extrinsics = torch.cat([R, T[..., None]], dim=-1) + + if build_intrinsics: + H, W = image_size_hw + fy = (H / 2.0) / torch.tan(fov_h / 2.0) + fx = (W / 2.0) / torch.tan(fov_w / 2.0) + intrinsics = torch.zeros(pose_encoding.shape[:2] + (3, 3), device=pose_encoding.device) + intrinsics[..., 0, 0] = fx + intrinsics[..., 1, 1] = fy + intrinsics[..., 0, 2] = W / 2 + intrinsics[..., 1, 2] = H / 2 + intrinsics[..., 2, 2] = 1.0 # Set the homogeneous coordinate to 1 + else: + raise NotImplementedError + + return extrinsics, intrinsics diff --git a/vggt/utils/rotation.py b/vggt/utils/rotation.py new file mode 100644 index 0000000000000000000000000000000000000000..657583e6915437c824c192d51939990b589a14fa --- /dev/null +++ b/vggt/utils/rotation.py @@ -0,0 +1,138 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +# Modified from PyTorch3D, https://github.com/facebookresearch/pytorch3d + +import torch +import numpy as np +import torch.nn.functional as F + + +def quat_to_mat(quaternions: torch.Tensor) -> torch.Tensor: + """ + Quaternion Order: XYZW or say ijkr, scalar-last + + Convert rotations given as quaternions to rotation matrices. + Args: + quaternions: quaternions with real part last, + as tensor of shape (..., 4). + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + i, j, k, r = torch.unbind(quaternions, -1) + # pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`. + two_s = 2.0 / (quaternions * quaternions).sum(-1) + + o = torch.stack( + ( + 1 - two_s * (j * j + k * k), + two_s * (i * j - k * r), + two_s * (i * k + j * r), + two_s * (i * j + k * r), + 1 - two_s * (i * i + k * k), + two_s * (j * k - i * r), + two_s * (i * k - j * r), + two_s * (j * k + i * r), + 1 - two_s * (i * i + j * j), + ), + -1, + ) + return o.reshape(quaternions.shape[:-1] + (3, 3)) + + +def mat_to_quat(matrix: torch.Tensor) -> torch.Tensor: + """ + Convert rotations given as rotation matrices to quaternions. + + Args: + matrix: Rotation matrices as tensor of shape (..., 3, 3). + + Returns: + quaternions with real part last, as tensor of shape (..., 4). + Quaternion Order: XYZW or say ijkr, scalar-last + """ + if matrix.size(-1) != 3 or matrix.size(-2) != 3: + raise ValueError(f"Invalid rotation matrix shape {matrix.shape}.") + + batch_dim = matrix.shape[:-2] + m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.unbind(matrix.reshape(batch_dim + (9,)), dim=-1) + + q_abs = _sqrt_positive_part( + torch.stack( + [ + 1.0 + m00 + m11 + m22, + 1.0 + m00 - m11 - m22, + 1.0 - m00 + m11 - m22, + 1.0 - m00 - m11 + m22, + ], + dim=-1, + ) + ) + + # we produce the desired quaternion multiplied by each of r, i, j, k + quat_by_rijk = torch.stack( + [ + # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and + # `int`. + torch.stack([q_abs[..., 0] ** 2, m21 - m12, m02 - m20, m10 - m01], dim=-1), + # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and + # `int`. + torch.stack([m21 - m12, q_abs[..., 1] ** 2, m10 + m01, m02 + m20], dim=-1), + # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and + # `int`. + torch.stack([m02 - m20, m10 + m01, q_abs[..., 2] ** 2, m12 + m21], dim=-1), + # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and + # `int`. + torch.stack([m10 - m01, m20 + m02, m21 + m12, q_abs[..., 3] ** 2], dim=-1), + ], + dim=-2, + ) + + # We floor here at 0.1 but the exact level is not important; if q_abs is small, + # the candidate won't be picked. + flr = torch.tensor(0.1).to(dtype=q_abs.dtype, device=q_abs.device) + quat_candidates = quat_by_rijk / (2.0 * q_abs[..., None].max(flr)) + + # if not for numerical problems, quat_candidates[i] should be same (up to a sign), + # forall i; we pick the best-conditioned one (with the largest denominator) + out = quat_candidates[F.one_hot(q_abs.argmax(dim=-1), num_classes=4) > 0.5, :].reshape(batch_dim + (4,)) + + # Convert from rijk to ijkr + out = out[..., [1, 2, 3, 0]] + + out = standardize_quaternion(out) + + return out + + +def _sqrt_positive_part(x: torch.Tensor) -> torch.Tensor: + """ + Returns torch.sqrt(torch.max(0, x)) + but with a zero subgradient where x is 0. + """ + ret = torch.zeros_like(x) + positive_mask = x > 0 + if torch.is_grad_enabled(): + ret[positive_mask] = torch.sqrt(x[positive_mask]) + else: + ret = torch.where(positive_mask, torch.sqrt(x), ret) + return ret + + +def standardize_quaternion(quaternions: torch.Tensor) -> torch.Tensor: + """ + Convert a unit quaternion to a standard form: one in which the real + part is non negative. + + Args: + quaternions: Quaternions with real part last, + as tensor of shape (..., 4). + + Returns: + Standardized quaternions as tensor of shape (..., 4). + """ + return torch.where(quaternions[..., 3:4] < 0, -quaternions, quaternions) diff --git a/vggt/utils/visual_track.py b/vggt/utils/visual_track.py new file mode 100644 index 0000000000000000000000000000000000000000..796c114ccba00b5f7850e04b9444a6cd5c44b154 --- /dev/null +++ b/vggt/utils/visual_track.py @@ -0,0 +1,239 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import cv2 +import torch +import numpy as np +import os + + +def color_from_xy(x, y, W, H, cmap_name="hsv"): + """ + Map (x, y) -> color in (R, G, B). + 1) Normalize x,y to [0,1]. + 2) Combine them into a single scalar c in [0,1]. + 3) Use matplotlib's colormap to convert c -> (R,G,B). + + You can customize step 2, e.g., c = (x + y)/2, or some function of (x, y). + """ + import matplotlib.cm + import matplotlib.colors + + x_norm = x / max(W - 1, 1) + y_norm = y / max(H - 1, 1) + # Simple combination: + c = (x_norm + y_norm) / 2.0 + + cmap = matplotlib.cm.get_cmap(cmap_name) + # cmap(c) -> (r,g,b,a) in [0,1] + rgba = cmap(c) + r, g, b = rgba[0], rgba[1], rgba[2] + return (r, g, b) # in [0,1], RGB order + + +def get_track_colors_by_position(tracks_b, vis_mask_b=None, image_width=None, image_height=None, cmap_name="hsv"): + """ + Given all tracks in one sample (b), compute a (N,3) array of RGB color values + in [0,255]. The color is determined by the (x,y) position in the first + visible frame for each track. + + Args: + tracks_b: Tensor of shape (S, N, 2). (x,y) for each track in each frame. + vis_mask_b: (S, N) boolean mask; if None, assume all are visible. + image_width, image_height: used for normalizing (x, y). + cmap_name: for matplotlib (e.g., 'hsv', 'rainbow', 'jet'). + + Returns: + track_colors: np.ndarray of shape (N, 3), each row is (R,G,B) in [0,255]. + """ + S, N, _ = tracks_b.shape + track_colors = np.zeros((N, 3), dtype=np.uint8) + + if vis_mask_b is None: + # treat all as visible + vis_mask_b = torch.ones(S, N, dtype=torch.bool, device=tracks_b.device) + + for i in range(N): + # Find first visible frame for track i + visible_frames = torch.where(vis_mask_b[:, i])[0] + if len(visible_frames) == 0: + # track is never visible; just assign black or something + track_colors[i] = (0, 0, 0) + continue + + first_s = int(visible_frames[0].item()) + # use that frame's (x,y) + x, y = tracks_b[first_s, i].tolist() + + # map (x,y) -> (R,G,B) in [0,1] + r, g, b = color_from_xy(x, y, W=image_width, H=image_height, cmap_name=cmap_name) + # scale to [0,255] + r, g, b = int(r * 255), int(g * 255), int(b * 255) + track_colors[i] = (r, g, b) + + return track_colors + + +def visualize_tracks_on_images( + images, + tracks, + track_vis_mask=None, + out_dir="track_visuals_concat_by_xy", + image_format="CHW", # "CHW" or "HWC" + normalize_mode="[0,1]", + cmap_name="hsv", # e.g. "hsv", "rainbow", "jet" + frames_per_row=4, # New parameter for grid layout + save_grid=True, # Flag to control whether to save the grid image +): + """ + Visualizes frames in a grid layout with specified frames per row. + Each track's color is determined by its (x,y) position + in the first visible frame (or frame 0 if always visible). + Finally convert the BGR result to RGB before saving. + Also saves each individual frame as a separate PNG file. + + Args: + images: torch.Tensor (S, 3, H, W) if CHW or (S, H, W, 3) if HWC. + tracks: torch.Tensor (S, N, 2), last dim = (x, y). + track_vis_mask: torch.Tensor (S, N) or None. + out_dir: folder to save visualizations. + image_format: "CHW" or "HWC". + normalize_mode: "[0,1]", "[-1,1]", or None for direct raw -> 0..255 + cmap_name: a matplotlib colormap name for color_from_xy. + frames_per_row: number of frames to display in each row of the grid. + save_grid: whether to save all frames in one grid image. + + Returns: + None (saves images in out_dir). + """ + + if len(tracks.shape) == 4: + tracks = tracks.squeeze(0) + images = images.squeeze(0) + if track_vis_mask is not None: + track_vis_mask = track_vis_mask.squeeze(0) + + import matplotlib + + matplotlib.use("Agg") # for non-interactive (optional) + + os.makedirs(out_dir, exist_ok=True) + + S = images.shape[0] + _, N, _ = tracks.shape # (S, N, 2) + + # Move to CPU + images = images.cpu().clone() + tracks = tracks.cpu().clone() + if track_vis_mask is not None: + track_vis_mask = track_vis_mask.cpu().clone() + + # Infer H, W from images shape + if image_format == "CHW": + # e.g. images[s].shape = (3, H, W) + H, W = images.shape[2], images.shape[3] + else: + # e.g. images[s].shape = (H, W, 3) + H, W = images.shape[1], images.shape[2] + + # Pre-compute the color for each track i based on first visible position + track_colors_rgb = get_track_colors_by_position( + tracks, # shape (S, N, 2) + vis_mask_b=track_vis_mask if track_vis_mask is not None else None, + image_width=W, + image_height=H, + cmap_name=cmap_name, + ) + + # We'll accumulate each frame's drawn image in a list + frame_images = [] + + for s in range(S): + # shape => either (3, H, W) or (H, W, 3) + img = images[s] + + # Convert to (H, W, 3) + if image_format == "CHW": + img = img.permute(1, 2, 0) # (H, W, 3) + # else "HWC", do nothing + + img = img.numpy().astype(np.float32) + + # Scale to [0,255] if needed + if normalize_mode == "[0,1]": + img = np.clip(img, 0, 1) * 255.0 + elif normalize_mode == "[-1,1]": + img = (img + 1.0) * 0.5 * 255.0 + img = np.clip(img, 0, 255.0) + # else no normalization + + # Convert to uint8 + img = img.astype(np.uint8) + + # For drawing in OpenCV, convert to BGR + img_bgr = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) + + # Draw each visible track + cur_tracks = tracks[s] # shape (N, 2) + if track_vis_mask is not None: + valid_indices = torch.where(track_vis_mask[s])[0] + else: + valid_indices = range(N) + + cur_tracks_np = cur_tracks.numpy() + for i in valid_indices: + x, y = cur_tracks_np[i] + pt = (int(round(x)), int(round(y))) + + # track_colors_rgb[i] is (R,G,B). For OpenCV circle, we need BGR + R, G, B = track_colors_rgb[i] + color_bgr = (int(B), int(G), int(R)) + cv2.circle(img_bgr, pt, radius=3, color=color_bgr, thickness=-1) + + # Convert back to RGB for consistent final saving: + img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB) + + # Save individual frame + frame_path = os.path.join(out_dir, f"frame_{s:04d}.png") + # Convert to BGR for OpenCV imwrite + frame_bgr = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2BGR) + cv2.imwrite(frame_path, frame_bgr) + + frame_images.append(img_rgb) + + # Only create and save the grid image if save_grid is True + if save_grid: + # Calculate grid dimensions + num_rows = (S + frames_per_row - 1) // frames_per_row # Ceiling division + + # Create a grid of images + grid_img = None + for row in range(num_rows): + start_idx = row * frames_per_row + end_idx = min(start_idx + frames_per_row, S) + + # Concatenate this row horizontally + row_img = np.concatenate(frame_images[start_idx:end_idx], axis=1) + + # If this row has fewer than frames_per_row images, pad with black + if end_idx - start_idx < frames_per_row: + padding_width = (frames_per_row - (end_idx - start_idx)) * W + padding = np.zeros((H, padding_width, 3), dtype=np.uint8) + row_img = np.concatenate([row_img, padding], axis=1) + + # Add this row to the grid + if grid_img is None: + grid_img = row_img + else: + grid_img = np.concatenate([grid_img, row_img], axis=0) + + out_path = os.path.join(out_dir, "tracks_grid.png") + # Convert back to BGR for OpenCV imwrite + grid_img_bgr = cv2.cvtColor(grid_img, cv2.COLOR_RGB2BGR) + cv2.imwrite(out_path, grid_img_bgr) + print(f"[INFO] Saved color-by-XY track visualization grid -> {out_path}") + + print(f"[INFO] Saved {S} individual frames to {out_dir}/frame_*.png") diff --git a/vision_tower.py b/vision_tower.py new file mode 100644 index 0000000000000000000000000000000000000000..0a047d6ae065be86200a234d74993fce93a3deac --- /dev/null +++ b/vision_tower.py @@ -0,0 +1,279 @@ +# import sys +# sys.path.append("..") + +import torch +from torch import nn +import torch.nn.init as init +import torch.nn.functional as F + +from paths import * +from typing import Dict, List, Optional, Set, Tuple, Union +import os + +from contextlib import nullcontext +from vggt.models.vggt import VGGT +from vggt.utils.pose_enc import pose_encoding_to_extri_intri +from vggt.layers import Mlp +from vggt.layers.block import Block +from vggt.heads.head_act import activate_pose + +class OriAny_CameraHead(nn.Module): + """ + CameraHead predicts camera parameters from token representations using iterative refinement. + It applies a series of transformer blocks (the "trunk") to dedicated camera tokens. + """ + def __init__( + self, + dim_in: int = 2048, + trunk_depth: int = 4, + pose_encoding_type: str = "OriAny", + num_heads: int = 16, + mlp_ratio: int = 4, + init_values: float = 0.01, + ): + super().__init__() + + if pose_encoding_type == "OriAny": + self.target_dim = 360+180+360+2 + else: + raise ValueError(f"Unsupported camera encoding type: {pose_encoding_type}") + + self.trunk_depth = trunk_depth + + # Build the trunk using a sequence of transformer blocks. + self.trunk = nn.Sequential( + *[ + Block( + dim=dim_in, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + init_values=init_values, + ) + for _ in range(trunk_depth) + ] + ) + + # Normalizations for camera token and trunk output. + self.token_norm = nn.LayerNorm(dim_in) + self.trunk_norm = nn.LayerNorm(dim_in) + + # Learnable empty camera pose token. + self.empty_pose_tokens = nn.Parameter(torch.zeros(1, 1, self.target_dim)) + self.embed_pose = nn.Linear(self.target_dim, dim_in) + + # Module for producing modulation parameters: shift, scale, and a gate. + self.poseLN_modulation = nn.Sequential(nn.SiLU(), nn.Linear(dim_in, 3 * dim_in, bias=True)) + + # Adaptive layer normalization without affine parameters. + self.adaln_norm = nn.LayerNorm(dim_in, elementwise_affine=False, eps=1e-6) + self.pose_branch = Mlp( + in_features=dim_in, + hidden_features=dim_in // 2, + out_features=self.target_dim, + drop=0, + ) + + def forward(self, aggregated_tokens_list: list, num_iterations: int = 4) -> list: + """ + Forward pass to predict camera parameters. + + Args: + aggregated_tokens_list (list): List of token tensors from the network; + the last tensor is used for prediction. + num_iterations (int, optional): Number of iterative refinement steps. Defaults to 4. + + Returns: + list: A list of predicted camera encodings (post-activation) from each iteration. + """ + # Use tokens from the last block for camera prediction. + tokens = aggregated_tokens_list[-1] + + # Extract the camera tokens + pose_tokens = tokens[:, :, 0] + pose_tokens = self.token_norm(pose_tokens) + + pred_pose_enc_list = self.trunk_fn(pose_tokens, num_iterations) + return pred_pose_enc_list + + def trunk_fn(self, pose_tokens: torch.Tensor, num_iterations: int) -> list: + """ + Iteratively refine camera pose predictions. + + Args: + pose_tokens (torch.Tensor): Normalized camera tokens with shape [B, 1, C]. + num_iterations (int): Number of refinement iterations. + + Returns: + list: List of activated camera encodings from each iteration. + """ + B, S, C = pose_tokens.shape # S is expected to be 1. + pred_pose_enc = None + pred_pose_enc_list = [] + + for _ in range(num_iterations): + # Use a learned empty pose for the first iteration. + if pred_pose_enc is None: + module_input = self.embed_pose(self.empty_pose_tokens.expand(B, S, -1)) + else: + # Detach the previous prediction to avoid backprop through time. + pred_pose_enc = pred_pose_enc.detach() + module_input = self.embed_pose(pred_pose_enc) + + # Generate modulation parameters and split them into shift, scale, and gate components. + shift_msa, scale_msa, gate_msa = self.poseLN_modulation(module_input).chunk(3, dim=-1) + + # Adaptive layer normalization and modulation. + pose_tokens_modulated = gate_msa * modulate(self.adaln_norm(pose_tokens), shift_msa, scale_msa) + pose_tokens_modulated = pose_tokens_modulated + pose_tokens + + pose_tokens_modulated = self.trunk(pose_tokens_modulated) + # Compute the delta update for the pose encoding. + pred_pose_enc_delta = self.pose_branch(self.trunk_norm(pose_tokens_modulated)) + + if pred_pose_enc is None: + pred_pose_enc = pred_pose_enc_delta + else: + pred_pose_enc = pred_pose_enc + pred_pose_enc_delta + + # Apply final activation functions for translation, quaternion, and field-of-view. + # activated_pose = activate_pose( + # pred_pose_enc, + # trans_act=self.trans_act, + # quat_act=self.quat_act, + # fl_act=self.fl_act, + # ) + # pred_pose_enc_list.append(activated_pose) + pred_pose_enc_list.append(pred_pose_enc) + + return pred_pose_enc_list + +def modulate(x: torch.Tensor, shift: torch.Tensor, scale: torch.Tensor) -> torch.Tensor: + """ + Modulate the input tensor using scaling and shifting parameters. + """ + # modified from https://github.com/facebookresearch/DiT/blob/796c29e532f47bba17c5b9c5eb39b9354b8b7c64/models.py#L19 + return x * (1 + scale) + shift + +def load_patch_embed_weights(model, checkpoint_path): + # 1. 加载 checkpoint + checkpoint = torch.load(checkpoint_path, map_location="cpu") + + # 2. 获取 state_dict + state_dict = checkpoint.get("state_dict", checkpoint) + + # 3. 过滤只包含 aggregator.patch_embed 的参数 + patch_embed_state = { + k.replace("aggregator.patch_embed.", ""): v + for k, v in state_dict.items() + if k.startswith("aggregator.patch_embed.") + } + + # 4. 加载到目标模块 + missing_keys, unexpected_keys = model.aggregator.patch_embed.load_state_dict( + patch_embed_state, strict=False + ) + + print("Loaded patch_embed weights.") + print("Missing keys:", missing_keys) + print("Unexpected keys:", unexpected_keys) + +class VGGT_OriAny_Ref(nn.Module): + def __init__(self, + dtype, + out_dim, + nopretrain + ) -> None: + super().__init__() + self.vggt = VGGT() + + self.dtype = dtype + self.ref_sampler = MLP_dim(in_dim=2048, out_dim=out_dim) + self.ref_sampler.apply(init_weights) + self.tgt_sampler = MLP_dim(in_dim=2048, out_dim=out_dim) + self.tgt_sampler.apply(init_weights) + + def forward(self, img_inputs): + device = self.get_device() + + with torch.amp.autocast(device_type='cuda', dtype=self.dtype): + if img_inputs.shape == 4: + img_inputs = img_inputs[None] + aggregated_tokens_list, ps_idx = self.vggt.aggregator(img_inputs) + + # Predict Cameras + # pose_enc = self.oriany_camera_head(aggregated_tokens_list)[-1] + # Extrinsic and intrinsic matrices, following OpenCV convention (camera from world) + # extrinsic, intrinsic = pose_encoding_to_extri_intri(pose_enc, images.shape[-2:]) + + # Use tokens from the last block for camera prediction. + tokens = aggregated_tokens_list[-1] + # Extract the camera tokens + pose_tokens = tokens[:, :, 0] + # tokens = aggregated_tokens_list[-1] + + B, S, C = pose_tokens.shape + if S>1: + # 分离每个 batch 的第一个 token 和其余 token + ref_tokens = pose_tokens[:, 0, :] # shape: (B, C) + tgt_tokens = pose_tokens[:, 1:, :] # shape: (B, S-1, C) + + # 下采样 + ref_feat = self.ref_sampler(ref_tokens) # shape: (B, C'),假设输出 channel 为 C' + tgt_feat = self.tgt_sampler(tgt_tokens.reshape(B * (S - 1), C)) # shape: (B*(S-1), C') + + # 合并结果 + pose_enc = torch.cat([ + ref_feat.unsqueeze(1), # (B, 1, C') + tgt_feat.view(B, S - 1, -1) # (B, S-1, C') + ], dim=1) # 最终 shape: (B*S, C') + else: + pose_enc = self.ref_sampler(pose_tokens.view(B*S,C)) + return pose_enc + + def get_device(self): + return next(self.parameters()).device +def init_weights(m): + if isinstance(m, nn.Linear): + init.xavier_uniform_(m.weight) + if m.bias is not None: + init.constant_(m.bias, 0) + +def get_activation(activation): + if activation.lower() == 'gelu': + return nn.GELU() + elif activation.lower() == 'rrelu': + return nn.RReLU(inplace=True) + elif activation.lower() == 'selu': + return nn.SELU(inplace=True) + elif activation.lower() == 'silu': + return nn.SiLU(inplace=True) + elif activation.lower() == 'hardswish': + return nn.Hardswish(inplace=True) + elif activation.lower() == 'leakyrelu': + return nn.LeakyReLU(inplace=True) + elif activation.lower() == 'sigmoid': + return nn.Sigmoid() + elif activation.lower() == 'tanh': + return nn.Tanh() + else: + return nn.ReLU(inplace=True) + +class MLP_dim(nn.Module): + def __init__( + self, in_dim=512, out_dim=1024, bias=True, activation='relu'): + super().__init__() + self.act = get_activation(activation) + self.net1 = nn.Sequential( + nn.Linear(in_dim, int(out_dim), bias=bias), + nn.BatchNorm1d(int(out_dim)), + self.act + ) + self.net2 = nn.Sequential( + nn.Linear(int(out_dim), out_dim, bias=bias), + nn.BatchNorm1d(out_dim) + ) + + def forward(self, x): + return self.net2(self.net1(x)) + +