commit 6d62d873faafec671d3eb2188424778f84104a66 Author: nikitakaraevv Date: Mon Jul 17 17:49:06 2023 -0700 Initial commit diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..f913b6a --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,80 @@ +# Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to make participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic +address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a +professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies within all project spaces, and it also applies when +an individual is representing the project or its community in public spaces. +Examples of representing a project or community include using an official +project e-mail address, posting via an official social media account, or acting +as an appointed representative at an online or offline event. Representation of +a project may be further defined and clarified by project maintainers. + +This Code of Conduct also applies outside the project spaces when there is a +reasonable belief that an individual's behavior may have a negative impact on +the project or its community. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at . All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see +https://www.contributor-covenant.org/faq \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..f3ed8c2 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,28 @@ +# CoTracker +We want to make contributing to this project as easy and transparent as possible. + +## Pull Requests +We actively welcome your pull requests. + +1. Fork the repo and create your branch from `main`. +2. If you've changed APIs, update the documentation. +3. Make sure your code lints. +4. If you haven't already, complete the Contributor License Agreement ("CLA"). + +## Contributor License Agreement ("CLA") +In order to accept your pull request, we need you to submit a CLA. You only need +to do this once to work on any of Meta's open source projects. + +Complete your CLA here: + +## Issues +We use GitHub issues to track public bugs. Please ensure your description is +clear and has sufficient instructions to be able to reproduce the issue. + +Meta has a [bounty program](https://www.facebook.com/whitehat/) for the safe +disclosure of security bugs. In those cases, please go through the process +outlined on that page and do not file a public issue. + +## License +By contributing to CoTracker, you agree that your contributions will be licensed +under the LICENSE file in the root directory of this source tree. \ No newline at end of file diff --git a/LICENSE.md b/LICENSE.md new file mode 100644 index 0000000..e395ca3 --- /dev/null +++ b/LICENSE.md @@ -0,0 +1,399 @@ +Attribution-NonCommercial 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-NonCommercial 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-NonCommercial 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + d. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + e. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + f. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + g. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + h. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + i. NonCommercial means not primarily intended for or directed towards + commercial advantage or monetary compensation. For purposes of + this Public License, the exchange of the Licensed Material for + other material subject to Copyright and Similar Rights by digital + file-sharing or similar means is NonCommercial provided there is + no payment of monetary compensation in connection with the + exchange. + + j. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + k. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + l. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part, for NonCommercial purposes only; and + + b. produce, reproduce, and Share Adapted Material for + NonCommercial purposes only. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties, including when + the Licensed Material is used other than for NonCommercial + purposes. + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + 4. If You Share Adapted Material You produce, the Adapter's + License You apply must not prevent recipients of the Adapted + Material from complying with this Public License. + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database for NonCommercial purposes + only; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material; and + + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + +======================================================================= + +Creative Commons is not a party to its public +licenses. Notwithstanding, Creative Commons may elect to apply one of +its public licenses to material it publishes and in those instances +will be considered the “Licensor.” The text of the Creative Commons +public licenses is dedicated to the public domain under the CC0 Public +Domain Dedication. Except for the limited purpose of indicating that +material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the +public licenses. + +Creative Commons may be contacted at creativecommons.org. \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..1185a50 --- /dev/null +++ b/README.md @@ -0,0 +1,94 @@ +# CoTracker: It is Better to Track Together + +**[Meta AI Research, FAIR](https://ai.facebook.com/research/)**; **[University of Oxford, VGG](https://www.robots.ox.ac.uk/~vgg/)** + +[Nikita Karaev](https://nikitakaraevv.github.io/), [Ignacio Rocco](https://www.irocco.info/), [Benjamin Graham](https://ai.facebook.com/people/benjamin-graham/), [Natalia Neverova](https://nneverova.github.io/), [Andrea Vedaldi](https://www.robots.ox.ac.uk/~vedaldi/), [Christian Rupprecht](https://chrirupp.github.io/) + +[[`Paper`]()] [[`Project`](https://co-tracker.github.io/)] [[`BibTeX`](#citing-cotracker)] + +![bmx-bumps](./assets/bmx-bumps.gif) + +**CoTracker** is a fast transformer-based model that can track any point in a video. It brings to tracking some of the benefits of Optical Flow. + +CoTracker can track: +- **Every pixel** within a video +- Points sampled on a regular grid on any video frame +- Manually selected points + +Try these tracking modes for yourself with our [Colab demo](https://github.com/facebookresearch/co-tracker/notebooks/demo.ipynb). + + + +## Installation Instructions +Ensure you have both PyTorch and TorchVision installed on your system. Follow the instructions [here](https://pytorch.org/get-started/locally/) for the installation. We strongly recommend installing both PyTorch and TorchVision with CUDA support. + +## Steps to Install CoTracker and its dependencies: +``` +git clone https://github.com/facebookresearch/co-tracker +cd co-tracker +pip install -e . +pip install opencv-python einops timm matplotlib moviepy flow_vis +``` + + +## Model Weights Download: +``` +mkdir checkpoints +cd checkpoints +wget https://dl.fbaipublicfiles.com/cotracker/cotracker_stride_4_wind_8.pth +wget https://dl.fbaipublicfiles.com/cotracker/cotracker_stride_4_wind_12.pth +wget https://dl.fbaipublicfiles.com/cotracker/cotracker_stride_8_wind_16.pth +cd .. +``` + + +## Running the Demo: +Try our [Colab demo](https://github.com/facebookresearch/co-tracker/notebooks/demo.ipynb) or run a local demo with 10*10 points sampled on a grid on the first frame of a video: +``` +python demo.py --grid_size 10 +``` + +## Evaluation +To reproduce the results presented in the paper, download the following datasets: +- [TAP-Vid](https://github.com/deepmind/tapnet) +- [BADJA](https://github.com/benjiebob/BADJA) +- [ZJU-Mocap (FastCapture)](https://arxiv.org/abs/2303.11898) + +And install the necessary dependencies: +``` +pip install hydra-core==1.1.0 mediapy tensorboard +``` +Then, execute the following command to evaluate on BADJA: +``` +python ./cotracker/evaluation/evaluate.py --config-name eval_badja exp_dir=./eval_outputs dataset_root=your/badja/path +``` + +## Training +To train the CoTracker as described in our paper, you first need to generate annotations for [Google Kubric](https://github.com/google-research/kubric) MOVI-f dataset. Instructions for annotation generation can be found [here](https://github.com/deepmind/tapnet). + +Once you have the annotated dataset, you need to make sure you followed the steps for evaluation setup and install the training dependencies: +``` +pip install pytorch_lightning==1.6.0 +``` + launch training on Kubric. Our model was trained using 32 GPUs, and you can adjust the parameters to best suit your hardware setup. +``` +python train.py --batch_size 1 --num_workers 28 \ +--num_steps 50000 --ckpt_path ./ --model_name cotracker \ +--save_freq 200 --sequence_len 24 --eval_datasets tapvid_davis_first badja \ +--traj_per_sample 256 --sliding_window_len 8 --updateformer_space_depth 6 --updateformer_time_depth 6 \ +--save_every_n_epoch 10 --evaluate_every_n_epoch 10 --model_stride 4 +``` + +## License +The majority of CoTracker is licensed under CC-BY-NC, however portions of the project are available under separate license terms: Particle Video Revisited is licensed under the MIT license, TAP-Vid is licensed under the Apache 2.0 license. + +## Citing CoTracker +If you find our repository useful, please consider giving it a star ⭐ and citing our paper in your work: +``` +@article{karaev2023cotracker, + title={CoTracker: It is Better to Track Together}, + author={Nikita Karaev and Ignacio Rocco and Benjamin Graham and Natalia Neverova and Andrea Vedaldi and Christian Rupprecht}, + journal={arxiv}, + year={2023} +} +``` \ No newline at end of file diff --git a/assets/apple.mp4 b/assets/apple.mp4 new file mode 100644 index 0000000..4721891 Binary files /dev/null and b/assets/apple.mp4 differ diff --git a/assets/apple_mask.png b/assets/apple_mask.png new file mode 100644 index 0000000..0c8391d Binary files /dev/null and b/assets/apple_mask.png differ diff --git a/assets/bmx-bumps.gif b/assets/bmx-bumps.gif new file mode 100644 index 0000000..6dfd301 Binary files /dev/null and b/assets/bmx-bumps.gif differ diff --git a/cotracker/__init__.py b/cotracker/__init__.py new file mode 100644 index 0000000..5277f46 --- /dev/null +++ b/cotracker/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/cotracker/datasets/__init__.py b/cotracker/datasets/__init__.py new file mode 100644 index 0000000..5277f46 --- /dev/null +++ b/cotracker/datasets/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/cotracker/datasets/badja_dataset.py b/cotracker/datasets/badja_dataset.py new file mode 100644 index 0000000..e1cdc90 --- /dev/null +++ b/cotracker/datasets/badja_dataset.py @@ -0,0 +1,390 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +import numpy as np +import os + +import json +import imageio +import cv2 + +from enum import Enum + +from cotracker.datasets.utils import CoTrackerData, resize_sample + +IGNORE_ANIMALS = [ + # "bear.json", + # "camel.json", + "cat_jump.json" + # "cows.json", + # "dog.json", + # "dog-agility.json", + # "horsejump-high.json", + # "horsejump-low.json", + # "impala0.json", + # "rs_dog.json" + "tiger.json" +] + + +class SMALJointCatalog(Enum): + # body_0 = 0 + # body_1 = 1 + # body_2 = 2 + # body_3 = 3 + # body_4 = 4 + # body_5 = 5 + # body_6 = 6 + # upper_right_0 = 7 + upper_right_1 = 8 + upper_right_2 = 9 + upper_right_3 = 10 + # upper_left_0 = 11 + upper_left_1 = 12 + upper_left_2 = 13 + upper_left_3 = 14 + neck_lower = 15 + # neck_upper = 16 + # lower_right_0 = 17 + lower_right_1 = 18 + lower_right_2 = 19 + lower_right_3 = 20 + # lower_left_0 = 21 + lower_left_1 = 22 + lower_left_2 = 23 + lower_left_3 = 24 + tail_0 = 25 + # tail_1 = 26 + # tail_2 = 27 + tail_3 = 28 + # tail_4 = 29 + # tail_5 = 30 + tail_6 = 31 + jaw = 32 + nose = 33 # ADDED JOINT FOR VERTEX 1863 + # chin = 34 # ADDED JOINT FOR VERTEX 26 + right_ear = 35 # ADDED JOINT FOR VERTEX 149 + left_ear = 36 # ADDED JOINT FOR VERTEX 2124 + + +class SMALJointInfo: + def __init__(self): + # These are the + self.annotated_classes = np.array( + [ + 8, + 9, + 10, # upper_right + 12, + 13, + 14, # upper_left + 15, # neck + 18, + 19, + 20, # lower_right + 22, + 23, + 24, # lower_left + 25, + 28, + 31, # tail + 32, + 33, # head + 35, # right_ear + 36, + ] + ) # left_ear + + self.annotated_markers = np.array( + [ + cv2.MARKER_CROSS, + cv2.MARKER_STAR, + cv2.MARKER_TRIANGLE_DOWN, + cv2.MARKER_CROSS, + cv2.MARKER_STAR, + cv2.MARKER_TRIANGLE_DOWN, + cv2.MARKER_CROSS, + cv2.MARKER_CROSS, + cv2.MARKER_STAR, + cv2.MARKER_TRIANGLE_DOWN, + cv2.MARKER_CROSS, + cv2.MARKER_STAR, + cv2.MARKER_TRIANGLE_DOWN, + cv2.MARKER_CROSS, + cv2.MARKER_STAR, + cv2.MARKER_TRIANGLE_DOWN, + cv2.MARKER_CROSS, + cv2.MARKER_STAR, + cv2.MARKER_CROSS, + cv2.MARKER_CROSS, + ] + ) + + self.joint_regions = np.array( + [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 2, + 2, + 2, + 2, + 3, + 3, + 4, + 4, + 4, + 4, + 5, + 5, + 5, + 5, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 7, + 7, + 7, + 8, + 9, + ] + ) + + self.annotated_joint_region = self.joint_regions[self.annotated_classes] + self.region_colors = np.array( + [ + [250, 190, 190], # body, light pink + [60, 180, 75], # upper_right, green + [230, 25, 75], # upper_left, red + [128, 0, 0], # neck, maroon + [0, 130, 200], # lower_right, blue + [255, 255, 25], # lower_left, yellow + [240, 50, 230], # tail, majenta + [245, 130, 48], # jaw / nose / chin, orange + [29, 98, 115], # right_ear, turquoise + [255, 153, 204], + ] + ) # left_ear, pink + + self.joint_colors = np.array(self.region_colors)[self.annotated_joint_region] + + +class BADJAData: + def __init__(self, data_root, complete=False): + annotations_path = os.path.join(data_root, "joint_annotations") + + self.animal_dict = {} + self.animal_count = 0 + self.smal_joint_info = SMALJointInfo() + for __, animal_json in enumerate(sorted(os.listdir(annotations_path))): + if animal_json not in IGNORE_ANIMALS: + json_path = os.path.join(annotations_path, animal_json) + with open(json_path) as json_data: + animal_joint_data = json.load(json_data) + + filenames = [] + segnames = [] + joints = [] + visible = [] + + first_path = animal_joint_data[0]["segmentation_path"] + last_path = animal_joint_data[-1]["segmentation_path"] + first_frame = first_path.split("/")[-1] + last_frame = last_path.split("/")[-1] + + if not "extra_videos" in first_path: + animal = first_path.split("/")[-2] + + first_frame_int = int(first_frame.split(".")[0]) + last_frame_int = int(last_frame.split(".")[0]) + + for fr in range(first_frame_int, last_frame_int + 1): + ref_file_name = os.path.join( + data_root, + "DAVIS/JPEGImages/Full-Resolution/%s/%05d.jpg" + % (animal, fr), + ) + ref_seg_name = os.path.join( + data_root, + "DAVIS/Annotations/Full-Resolution/%s/%05d.png" + % (animal, fr), + ) + + foundit = False + for ind, image_annotation in enumerate(animal_joint_data): + file_name = os.path.join( + data_root, image_annotation["image_path"] + ) + seg_name = os.path.join( + data_root, image_annotation["segmentation_path"] + ) + + if file_name == ref_file_name: + foundit = True + label_ind = ind + + if foundit: + image_annotation = animal_joint_data[label_ind] + file_name = os.path.join( + data_root, image_annotation["image_path"] + ) + seg_name = os.path.join( + data_root, image_annotation["segmentation_path"] + ) + joint = np.array(image_annotation["joints"]) + vis = np.array(image_annotation["visibility"]) + else: + file_name = ref_file_name + seg_name = ref_seg_name + joint = None + vis = None + + filenames.append(file_name) + segnames.append(seg_name) + joints.append(joint) + visible.append(vis) + + if len(filenames): + self.animal_dict[self.animal_count] = ( + filenames, + segnames, + joints, + visible, + ) + self.animal_count += 1 + print("Loaded BADJA dataset") + + def get_loader(self): + for __ in range(int(1e6)): + animal_id = np.random.choice(len(self.animal_dict.keys())) + filenames, segnames, joints, visible = self.animal_dict[animal_id] + + image_id = np.random.randint(0, len(filenames)) + + seg_file = segnames[image_id] + image_file = filenames[image_id] + + joints = joints[image_id].copy() + joints = joints[self.smal_joint_info.annotated_classes] + visible = visible[image_id][self.smal_joint_info.annotated_classes] + + rgb_img = imageio.imread(image_file) # , mode='RGB') + sil_img = imageio.imread(seg_file) # , mode='RGB') + + rgb_h, rgb_w, _ = rgb_img.shape + sil_img = cv2.resize(sil_img, (rgb_w, rgb_h), cv2.INTER_NEAREST) + + yield rgb_img, sil_img, joints, visible, image_file + + def get_video(self, animal_id): + filenames, segnames, joint, visible = self.animal_dict[animal_id] + + rgbs = [] + segs = [] + joints = [] + visibles = [] + + for s in range(len(filenames)): + image_file = filenames[s] + rgb_img = imageio.imread(image_file) # , mode='RGB') + rgb_h, rgb_w, _ = rgb_img.shape + + seg_file = segnames[s] + sil_img = imageio.imread(seg_file) # , mode='RGB') + sil_img = cv2.resize(sil_img, (rgb_w, rgb_h), cv2.INTER_NEAREST) + + jo = joint[s] + + if jo is not None: + joi = joint[s].copy() + joi = joi[self.smal_joint_info.annotated_classes] + vis = visible[s][self.smal_joint_info.annotated_classes] + else: + joi = None + vis = None + + rgbs.append(rgb_img) + segs.append(sil_img) + joints.append(joi) + visibles.append(vis) + + return rgbs, segs, joints, visibles, filenames[0] + + +class BadjaDataset(torch.utils.data.Dataset): + def __init__( + self, data_root, max_seq_len=1000, dataset_resolution=(384, 512) + ): + + self.data_root = data_root + self.badja_data = BADJAData(data_root) + self.max_seq_len = max_seq_len + self.dataset_resolution = dataset_resolution + print( + "found %d unique videos in %s" + % (self.badja_data.animal_count, self.data_root) + ) + + def __getitem__(self, index): + + rgbs, segs, joints, visibles, filename = self.badja_data.get_video(index) + S = len(rgbs) + H, W, __ = rgbs[0].shape + H, W, __ = segs[0].shape + + N, __ = joints[0].shape + + # let's eliminate the Nones + # note the first one is guaranteed present + for s in range(1, S): + if joints[s] is None: + joints[s] = np.zeros_like(joints[0]) + visibles[s] = np.zeros_like(visibles[0]) + + # eliminate the mystery dim + segs = [seg[:, :, 0] for seg in segs] + + rgbs = np.stack(rgbs, 0) + segs = np.stack(segs, 0) + trajs = np.stack(joints, 0) + visibles = np.stack(visibles, 0) + + rgbs = torch.from_numpy(rgbs).reshape(S, H, W, 3).permute(0, 3, 1, 2).float() + segs = torch.from_numpy(segs).reshape(S, 1, H, W).float() + trajs = torch.from_numpy(trajs).reshape(S, N, 2).float() + visibles = torch.from_numpy(visibles).reshape(S, N) + + rgbs = rgbs[: self.max_seq_len] + segs = segs[: self.max_seq_len] + trajs = trajs[: self.max_seq_len] + visibles = visibles[: self.max_seq_len] + # apparently the coords are in yx order + trajs = torch.flip(trajs, [2]) + + if "extra_videos" in filename: + seq_name = filename.split("/")[-3] + else: + seq_name = filename.split("/")[-2] + + rgbs, trajs, segs = resize_sample(rgbs, trajs, segs, self.dataset_resolution) + + return CoTrackerData(rgbs, segs, trajs, visibles, seq_name=seq_name) + + def __len__(self): + return self.badja_data.animal_count diff --git a/cotracker/datasets/fast_capture_dataset.py b/cotracker/datasets/fast_capture_dataset.py new file mode 100644 index 0000000..86a7850 --- /dev/null +++ b/cotracker/datasets/fast_capture_dataset.py @@ -0,0 +1,72 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import os +import torch + +# from PIL import Image +import imageio +import numpy as np +from cotracker.datasets.utils import CoTrackerData, resize_sample + + +class FastCaptureDataset(torch.utils.data.Dataset): + def __init__( + self, + data_root, + max_seq_len=50, + max_num_points=20, + dataset_resolution=(384, 512), + ): + + self.data_root = data_root + self.seq_names = os.listdir(os.path.join(data_root, "renders_local_rm")) + self.pth_dir = os.path.join(data_root, "zju_tracking") + self.max_seq_len = max_seq_len + self.max_num_points = max_num_points + self.dataset_resolution = dataset_resolution + print("found %d unique videos in %s" % (len(self.seq_names), self.data_root)) + + def __getitem__(self, index): + seq_name = self.seq_names[index] + spath = os.path.join(self.data_root, "renders_local_rm", seq_name) + pthpath = os.path.join(self.pth_dir, seq_name + ".pth") + + rgbs = [] + img_paths = sorted(os.listdir(spath)) + for i, img_path in enumerate(img_paths): + if i < self.max_seq_len: + rgbs.append(imageio.imread(os.path.join(spath, img_path))) + + annot_dict = torch.load(pthpath) + traj_2d = annot_dict["traj_2d"][:, :, : self.max_seq_len] + visibility = annot_dict["visibility"][:, : self.max_seq_len] + + S = len(rgbs) + H, W, __ = rgbs[0].shape + *_, S = traj_2d.shape + visibile_pts_first_frame_inds = (visibility[:, 0] > 0).nonzero(as_tuple=False)[ + :, 0 + ] + torch.manual_seed(0) + point_inds = torch.randperm(len(visibile_pts_first_frame_inds))[ + : self.max_num_points + ] + visible_inds_sampled = visibile_pts_first_frame_inds[point_inds] + + rgbs = np.stack(rgbs, 0) + rgbs = torch.from_numpy(rgbs).reshape(S, H, W, 3).permute(0, 3, 1, 2).float() + + segs = torch.ones(S, 1, H, W).float() + trajs = traj_2d[visible_inds_sampled].permute(2, 0, 1).float() + visibles = visibility[visible_inds_sampled].permute(1, 0) + + rgbs, trajs, segs = resize_sample(rgbs, trajs, segs, self.dataset_resolution) + + return CoTrackerData(rgbs, segs, trajs, visibles, seq_name=seq_name) + + def __len__(self): + return len(self.seq_names) diff --git a/cotracker/datasets/kubric_movif_dataset.py b/cotracker/datasets/kubric_movif_dataset.py new file mode 100644 index 0000000..c58b4e7 --- /dev/null +++ b/cotracker/datasets/kubric_movif_dataset.py @@ -0,0 +1,494 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import os +import torch + +import imageio +import numpy as np + +from cotracker.datasets.utils import CoTrackerData +from torchvision.transforms import ColorJitter, GaussianBlur +from PIL import Image +import cv2 + + +class CoTrackerDataset(torch.utils.data.Dataset): + def __init__( + self, + data_root, + crop_size=(384, 512), + seq_len=24, + traj_per_sample=768, + sample_vis_1st_frame=False, + use_augs=False, + ): + super(CoTrackerDataset, self).__init__() + np.random.seed(0) + torch.manual_seed(0) + self.data_root = data_root + self.seq_len = seq_len + self.traj_per_sample = traj_per_sample + self.sample_vis_1st_frame = sample_vis_1st_frame + self.use_augs = use_augs + self.crop_size = crop_size + + # photometric augmentation + self.photo_aug = ColorJitter( + brightness=0.2, contrast=0.2, saturation=0.2, hue=0.25 / 3.14 + ) + self.blur_aug = GaussianBlur(11, sigma=(0.1, 2.0)) + + self.blur_aug_prob = 0.25 + self.color_aug_prob = 0.25 + + # occlusion augmentation + self.eraser_aug_prob = 0.5 + self.eraser_bounds = [2, 100] + self.eraser_max = 10 + + # occlusion augmentation + self.replace_aug_prob = 0.5 + self.replace_bounds = [2, 100] + self.replace_max = 10 + + # spatial augmentations + self.pad_bounds = [0, 100] + self.crop_size = crop_size + self.resize_lim = [0.25, 2.0] # sample resizes from here + self.resize_delta = 0.2 + self.max_crop_offset = 50 + + self.do_flip = True + self.h_flip_prob = 0.5 + self.v_flip_prob = 0.5 + + def getitem_helper(self, index): + return NotImplementedError + + def __getitem__(self, index): + gotit = False + + sample, gotit = self.getitem_helper(index) + if not gotit: + print("warning: sampling failed") + # fake sample, so we can still collate + sample = CoTrackerData( + video=torch.zeros( + (self.seq_len, 3, self.crop_size[0], self.crop_size[1]) + ), + segmentation=torch.zeros( + (self.seq_len, 1, self.crop_size[0], self.crop_size[1]) + ), + trajectory=torch.zeros((self.seq_len, self.traj_per_sample, 2)), + visibility=torch.zeros((self.seq_len, self.traj_per_sample)), + valid=torch.zeros((self.seq_len, self.traj_per_sample)), + ) + + return sample, gotit + + def add_photometric_augs(self, rgbs, trajs, visibles, eraser=True, replace=True): + T, N, _ = trajs.shape + + S = len(rgbs) + H, W = rgbs[0].shape[:2] + assert S == T + + if eraser: + ############ eraser transform (per image after the first) ############ + rgbs = [rgb.astype(np.float32) for rgb in rgbs] + for i in range(1, S): + if np.random.rand() < self.eraser_aug_prob: + for _ in range( + np.random.randint(1, self.eraser_max + 1) + ): # number of times to occlude + + xc = np.random.randint(0, W) + yc = np.random.randint(0, H) + dx = np.random.randint( + self.eraser_bounds[0], self.eraser_bounds[1] + ) + dy = np.random.randint( + self.eraser_bounds[0], self.eraser_bounds[1] + ) + x0 = np.clip(xc - dx / 2, 0, W - 1).round().astype(np.int32) + x1 = np.clip(xc + dx / 2, 0, W - 1).round().astype(np.int32) + y0 = np.clip(yc - dy / 2, 0, H - 1).round().astype(np.int32) + y1 = np.clip(yc + dy / 2, 0, H - 1).round().astype(np.int32) + + mean_color = np.mean( + rgbs[i][y0:y1, x0:x1, :].reshape(-1, 3), axis=0 + ) + rgbs[i][y0:y1, x0:x1, :] = mean_color + + occ_inds = np.logical_and( + np.logical_and(trajs[i, :, 0] >= x0, trajs[i, :, 0] < x1), + np.logical_and(trajs[i, :, 1] >= y0, trajs[i, :, 1] < y1), + ) + visibles[i, occ_inds] = 0 + rgbs = [rgb.astype(np.uint8) for rgb in rgbs] + + if replace: + + rgbs_alt = [ + np.array(self.photo_aug(Image.fromarray(rgb)), dtype=np.uint8) + for rgb in rgbs + ] + rgbs_alt = [ + np.array(self.photo_aug(Image.fromarray(rgb)), dtype=np.uint8) + for rgb in rgbs_alt + ] + + ############ replace transform (per image after the first) ############ + rgbs = [rgb.astype(np.float32) for rgb in rgbs] + rgbs_alt = [rgb.astype(np.float32) for rgb in rgbs_alt] + for i in range(1, S): + if np.random.rand() < self.replace_aug_prob: + for _ in range( + np.random.randint(1, self.replace_max + 1) + ): # number of times to occlude + xc = np.random.randint(0, W) + yc = np.random.randint(0, H) + dx = np.random.randint( + self.replace_bounds[0], self.replace_bounds[1] + ) + dy = np.random.randint( + self.replace_bounds[0], self.replace_bounds[1] + ) + x0 = np.clip(xc - dx / 2, 0, W - 1).round().astype(np.int32) + x1 = np.clip(xc + dx / 2, 0, W - 1).round().astype(np.int32) + y0 = np.clip(yc - dy / 2, 0, H - 1).round().astype(np.int32) + y1 = np.clip(yc + dy / 2, 0, H - 1).round().astype(np.int32) + + wid = x1 - x0 + hei = y1 - y0 + y00 = np.random.randint(0, H - hei) + x00 = np.random.randint(0, W - wid) + fr = np.random.randint(0, S) + rep = rgbs_alt[fr][y00 : y00 + hei, x00 : x00 + wid, :] + rgbs[i][y0:y1, x0:x1, :] = rep + + occ_inds = np.logical_and( + np.logical_and(trajs[i, :, 0] >= x0, trajs[i, :, 0] < x1), + np.logical_and(trajs[i, :, 1] >= y0, trajs[i, :, 1] < y1), + ) + visibles[i, occ_inds] = 0 + rgbs = [rgb.astype(np.uint8) for rgb in rgbs] + + ############ photometric augmentation ############ + if np.random.rand() < self.color_aug_prob: + # random per-frame amount of aug + rgbs = [ + np.array(self.photo_aug(Image.fromarray(rgb)), dtype=np.uint8) + for rgb in rgbs + ] + + if np.random.rand() < self.blur_aug_prob: + # random per-frame amount of blur + rgbs = [ + np.array(self.blur_aug(Image.fromarray(rgb)), dtype=np.uint8) + for rgb in rgbs + ] + + return rgbs, trajs, visibles + + def add_spatial_augs(self, rgbs, trajs, visibles): + T, N, __ = trajs.shape + + S = len(rgbs) + H, W = rgbs[0].shape[:2] + assert S == T + + rgbs = [rgb.astype(np.float32) for rgb in rgbs] + + ############ spatial transform ############ + + # padding + pad_x0 = np.random.randint(self.pad_bounds[0], self.pad_bounds[1]) + pad_x1 = np.random.randint(self.pad_bounds[0], self.pad_bounds[1]) + pad_y0 = np.random.randint(self.pad_bounds[0], self.pad_bounds[1]) + pad_y1 = np.random.randint(self.pad_bounds[0], self.pad_bounds[1]) + + rgbs = [ + np.pad(rgb, ((pad_y0, pad_y1), (pad_x0, pad_x1), (0, 0))) for rgb in rgbs + ] + trajs[:, :, 0] += pad_x0 + trajs[:, :, 1] += pad_y0 + H, W = rgbs[0].shape[:2] + + # scaling + stretching + scale = np.random.uniform(self.resize_lim[0], self.resize_lim[1]) + scale_x = scale + scale_y = scale + H_new = H + W_new = W + + scale_delta_x = 0.0 + scale_delta_y = 0.0 + + rgbs_scaled = [] + for s in range(S): + if s == 1: + scale_delta_x = np.random.uniform(-self.resize_delta, self.resize_delta) + scale_delta_y = np.random.uniform(-self.resize_delta, self.resize_delta) + elif s > 1: + scale_delta_x = ( + scale_delta_x * 0.8 + + np.random.uniform(-self.resize_delta, self.resize_delta) * 0.2 + ) + scale_delta_y = ( + scale_delta_y * 0.8 + + np.random.uniform(-self.resize_delta, self.resize_delta) * 0.2 + ) + scale_x = scale_x + scale_delta_x + scale_y = scale_y + scale_delta_y + + # bring h/w closer + scale_xy = (scale_x + scale_y) * 0.5 + scale_x = scale_x * 0.5 + scale_xy * 0.5 + scale_y = scale_y * 0.5 + scale_xy * 0.5 + + # don't get too crazy + scale_x = np.clip(scale_x, 0.2, 2.0) + scale_y = np.clip(scale_y, 0.2, 2.0) + + H_new = int(H * scale_y) + W_new = int(W * scale_x) + + # make it at least slightly bigger than the crop area, + # so that the random cropping can add diversity + H_new = np.clip(H_new, self.crop_size[0] + 10, None) + W_new = np.clip(W_new, self.crop_size[1] + 10, None) + # recompute scale in case we clipped + scale_x = W_new / float(W) + scale_y = H_new / float(H) + + rgbs_scaled.append( + cv2.resize(rgbs[s], (W_new, H_new), interpolation=cv2.INTER_LINEAR) + ) + trajs[s, :, 0] *= scale_x + trajs[s, :, 1] *= scale_y + rgbs = rgbs_scaled + + ok_inds = visibles[0, :] > 0 + vis_trajs = trajs[:, ok_inds] # S,?,2 + + if vis_trajs.shape[1] > 0: + mid_x = np.mean(vis_trajs[0, :, 0]) + mid_y = np.mean(vis_trajs[0, :, 1]) + else: + mid_y = self.crop_size[0] + mid_x = self.crop_size[1] + + x0 = int(mid_x - self.crop_size[1] // 2) + y0 = int(mid_y - self.crop_size[0] // 2) + + offset_x = 0 + offset_y = 0 + + for s in range(S): + # on each frame, shift a bit more + if s == 1: + offset_x = np.random.randint( + -self.max_crop_offset, self.max_crop_offset + ) + offset_y = np.random.randint( + -self.max_crop_offset, self.max_crop_offset + ) + elif s > 1: + offset_x = int( + offset_x * 0.8 + + np.random.randint(-self.max_crop_offset, self.max_crop_offset + 1) + * 0.2 + ) + offset_y = int( + offset_y * 0.8 + + np.random.randint(-self.max_crop_offset, self.max_crop_offset + 1) + * 0.2 + ) + x0 = x0 + offset_x + y0 = y0 + offset_y + + H_new, W_new = rgbs[s].shape[:2] + if H_new == self.crop_size[0]: + y0 = 0 + else: + y0 = min(max(0, y0), H_new - self.crop_size[0] - 1) + + if W_new == self.crop_size[1]: + x0 = 0 + else: + x0 = min(max(0, x0), W_new - self.crop_size[1] - 1) + + rgbs[s] = rgbs[s][y0 : y0 + self.crop_size[0], x0 : x0 + self.crop_size[1]] + trajs[s, :, 0] -= x0 + trajs[s, :, 1] -= y0 + + H_new = self.crop_size[0] + W_new = self.crop_size[1] + + # flip + h_flipped = False + v_flipped = False + if self.do_flip: + # h flip + if np.random.rand() < self.h_flip_prob: + h_flipped = True + rgbs = [rgb[:, ::-1] for rgb in rgbs] + # v flip + if np.random.rand() < self.v_flip_prob: + v_flipped = True + rgbs = [rgb[::-1] for rgb in rgbs] + if h_flipped: + trajs[:, :, 0] = W_new - trajs[:, :, 0] + if v_flipped: + trajs[:, :, 1] = H_new - trajs[:, :, 1] + + return rgbs, trajs + + def crop(self, rgbs, trajs): + T, N, _ = trajs.shape + + S = len(rgbs) + H, W = rgbs[0].shape[:2] + assert S == T + + ############ spatial transform ############ + + H_new = H + W_new = W + + # simple random crop + y0 = ( + 0 + if self.crop_size[0] >= H_new + else np.random.randint(0, H_new - self.crop_size[0]) + ) + x0 = ( + 0 + if self.crop_size[1] >= W_new + else np.random.randint(0, W_new - self.crop_size[1]) + ) + rgbs = [ + rgb[y0 : y0 + self.crop_size[0], x0 : x0 + self.crop_size[1]] + for rgb in rgbs + ] + + trajs[:, :, 0] -= x0 + trajs[:, :, 1] -= y0 + + return rgbs, trajs + + +class KubricMovifDataset(CoTrackerDataset): + def __init__( + self, + data_root, + crop_size=(384, 512), + seq_len=24, + traj_per_sample=768, + sample_vis_1st_frame=False, + use_augs=False, + ): + super(KubricMovifDataset, self).__init__( + data_root=data_root, + crop_size=crop_size, + seq_len=seq_len, + traj_per_sample=traj_per_sample, + sample_vis_1st_frame=sample_vis_1st_frame, + use_augs=use_augs, + ) + + self.pad_bounds = [0, 25] + self.resize_lim = [0.75, 1.25] # sample resizes from here + self.resize_delta = 0.05 + self.max_crop_offset = 15 + self.seq_names = [ + fname + for fname in os.listdir(data_root) + if os.path.isdir(os.path.join(data_root, fname)) + ] + print("found %d unique videos in %s" % (len(self.seq_names), self.data_root)) + + def getitem_helper(self, index): + gotit = True + seq_name = self.seq_names[index] + + npy_path = os.path.join(self.data_root, seq_name, seq_name + ".npy") + rgb_path = os.path.join(self.data_root, seq_name, "frames") + + img_paths = sorted(os.listdir(rgb_path)) + rgbs = [] + for i, img_path in enumerate(img_paths): + rgbs.append(imageio.v2.imread(os.path.join(rgb_path, img_path))) + + rgbs = np.stack(rgbs) + annot_dict = np.load(npy_path, allow_pickle=True).item() + traj_2d = annot_dict["coords"] + visibility = annot_dict["visibility"] + + # random crop + assert self.seq_len <= len(rgbs) + if self.seq_len < len(rgbs): + start_ind = np.random.choice(len(rgbs) - self.seq_len, 1)[0] + + rgbs = rgbs[start_ind : start_ind + self.seq_len] + traj_2d = traj_2d[:, start_ind : start_ind + self.seq_len] + visibility = visibility[:, start_ind : start_ind + self.seq_len] + + traj_2d = np.transpose(traj_2d, (1, 0, 2)) + visibility = np.transpose(np.logical_not(visibility), (1, 0)) + if self.use_augs: + rgbs, traj_2d, visibility = self.add_photometric_augs( + rgbs, traj_2d, visibility + ) + rgbs, traj_2d = self.add_spatial_augs(rgbs, traj_2d, visibility) + else: + rgbs, traj_2d = self.crop(rgbs, traj_2d) + + visibility[traj_2d[:, :, 0] > self.crop_size[1] - 1] = False + visibility[traj_2d[:, :, 0] < 0] = False + visibility[traj_2d[:, :, 1] > self.crop_size[0] - 1] = False + visibility[traj_2d[:, :, 1] < 0] = False + + visibility = torch.from_numpy(visibility) + traj_2d = torch.from_numpy(traj_2d) + + visibile_pts_first_frame_inds = (visibility[0]).nonzero(as_tuple=False)[:, 0] + + if self.sample_vis_1st_frame: + visibile_pts_inds = visibile_pts_first_frame_inds + else: + visibile_pts_mid_frame_inds = (visibility[self.seq_len // 2]).nonzero( + as_tuple=False + )[:, 0] + visibile_pts_inds = torch.cat( + (visibile_pts_first_frame_inds, visibile_pts_mid_frame_inds), dim=0 + ) + point_inds = torch.randperm(len(visibile_pts_inds))[: self.traj_per_sample] + if len(point_inds) < self.traj_per_sample: + gotit = False + + visible_inds_sampled = visibile_pts_inds[point_inds] + + trajs = traj_2d[:, visible_inds_sampled].float() + visibles = visibility[:, visible_inds_sampled] + valids = torch.ones((self.seq_len, self.traj_per_sample)) + + rgbs = torch.from_numpy(np.stack(rgbs)).permute(0, 3, 1, 2).float() + segs = torch.ones((self.seq_len, 1, self.crop_size[0], self.crop_size[1])) + sample = CoTrackerData( + video=rgbs, + segmentation=segs, + trajectory=trajs, + visibility=visibles, + valid=valids, + seq_name=seq_name, + ) + return sample, gotit + + def __len__(self): + return len(self.seq_names) diff --git a/cotracker/datasets/tap_vid_datasets.py b/cotracker/datasets/tap_vid_datasets.py new file mode 100644 index 0000000..9197a26 --- /dev/null +++ b/cotracker/datasets/tap_vid_datasets.py @@ -0,0 +1,218 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import os +import io +import glob +import torch +import pickle +import numpy as np +import mediapy as media + +from PIL import Image +from typing import Mapping, Tuple, Union + +from cotracker.datasets.utils import CoTrackerData + +DatasetElement = Mapping[str, Mapping[str, Union[np.ndarray, str]]] + + +def resize_video(video: np.ndarray, output_size: Tuple[int, int]) -> np.ndarray: + """Resize a video to output_size.""" + # If you have a GPU, consider replacing this with a GPU-enabled resize op, + # such as a jitted jax.image.resize. It will make things faster. + return media.resize_video(video, output_size) + + +def sample_queries_first( + target_occluded: np.ndarray, + target_points: np.ndarray, + frames: np.ndarray, +) -> Mapping[str, np.ndarray]: + """Package a set of frames and tracks for use in TAPNet evaluations. + Given a set of frames and tracks with no query points, use the first + visible point in each track as the query. + Args: + target_occluded: Boolean occlusion flag, of shape [n_tracks, n_frames], + where True indicates occluded. + target_points: Position, of shape [n_tracks, n_frames, 2], where each point + is [x,y] scaled between 0 and 1. + frames: Video tensor, of shape [n_frames, height, width, 3]. Scaled between + -1 and 1. + Returns: + A dict with the keys: + video: Video tensor of shape [1, n_frames, height, width, 3] + query_points: Query points of shape [1, n_queries, 3] where + each point is [t, y, x] scaled to the range [-1, 1] + target_points: Target points of shape [1, n_queries, n_frames, 2] where + each point is [x, y] scaled to the range [-1, 1] + """ + valid = np.sum(~target_occluded, axis=1) > 0 + target_points = target_points[valid, :] + target_occluded = target_occluded[valid, :] + + query_points = [] + for i in range(target_points.shape[0]): + index = np.where(target_occluded[i] == 0)[0][0] + x, y = target_points[i, index, 0], target_points[i, index, 1] + query_points.append(np.array([index, y, x])) # [t, y, x] + query_points = np.stack(query_points, axis=0) + + return { + "video": frames[np.newaxis, ...], + "query_points": query_points[np.newaxis, ...], + "target_points": target_points[np.newaxis, ...], + "occluded": target_occluded[np.newaxis, ...], + } + + +def sample_queries_strided( + target_occluded: np.ndarray, + target_points: np.ndarray, + frames: np.ndarray, + query_stride: int = 5, +) -> Mapping[str, np.ndarray]: + """Package a set of frames and tracks for use in TAPNet evaluations. + + Given a set of frames and tracks with no query points, sample queries + strided every query_stride frames, ignoring points that are not visible + at the selected frames. + + Args: + target_occluded: Boolean occlusion flag, of shape [n_tracks, n_frames], + where True indicates occluded. + target_points: Position, of shape [n_tracks, n_frames, 2], where each point + is [x,y] scaled between 0 and 1. + frames: Video tensor, of shape [n_frames, height, width, 3]. Scaled between + -1 and 1. + query_stride: When sampling query points, search for un-occluded points + every query_stride frames and convert each one into a query. + + Returns: + A dict with the keys: + video: Video tensor of shape [1, n_frames, height, width, 3]. The video + has floats scaled to the range [-1, 1]. + query_points: Query points of shape [1, n_queries, 3] where + each point is [t, y, x] scaled to the range [-1, 1]. + target_points: Target points of shape [1, n_queries, n_frames, 2] where + each point is [x, y] scaled to the range [-1, 1]. + trackgroup: Index of the original track that each query point was + sampled from. This is useful for visualization. + """ + tracks = [] + occs = [] + queries = [] + trackgroups = [] + total = 0 + trackgroup = np.arange(target_occluded.shape[0]) + for i in range(0, target_occluded.shape[1], query_stride): + mask = target_occluded[:, i] == 0 + query = np.stack( + [ + i * np.ones(target_occluded.shape[0:1]), + target_points[:, i, 1], + target_points[:, i, 0], + ], + axis=-1, + ) + queries.append(query[mask]) + tracks.append(target_points[mask]) + occs.append(target_occluded[mask]) + trackgroups.append(trackgroup[mask]) + total += np.array(np.sum(target_occluded[:, i] == 0)) + + return { + "video": frames[np.newaxis, ...], + "query_points": np.concatenate(queries, axis=0)[np.newaxis, ...], + "target_points": np.concatenate(tracks, axis=0)[np.newaxis, ...], + "occluded": np.concatenate(occs, axis=0)[np.newaxis, ...], + "trackgroup": np.concatenate(trackgroups, axis=0)[np.newaxis, ...], + } + + +class TapVidDataset(torch.utils.data.Dataset): + def __init__( + self, + data_root, + dataset_type="davis", + resize_to_256=True, + queried_first=True, + ): + self.dataset_type = dataset_type + self.resize_to_256 = resize_to_256 + self.queried_first = queried_first + if self.dataset_type == "kinetics": + all_paths = glob.glob(os.path.join(data_root, "*_of_0010.pkl")) + points_dataset = [] + for pickle_path in all_paths: + with open(pickle_path, "rb") as f: + data = pickle.load(f) + points_dataset = points_dataset + data + self.points_dataset = points_dataset + else: + with open(data_root, "rb") as f: + self.points_dataset = pickle.load(f) + if self.dataset_type == "davis": + self.video_names = list(self.points_dataset.keys()) + print("found %d unique videos in %s" % (len(self.points_dataset), data_root)) + + def __getitem__(self, index): + if self.dataset_type == "davis": + video_name = self.video_names[index] + else: + video_name = index + video = self.points_dataset[video_name] + frames = video["video"] + + if isinstance(frames[0], bytes): + # TAP-Vid is stored and JPEG bytes rather than `np.ndarray`s. + def decode(frame): + byteio = io.BytesIO(frame) + img = Image.open(byteio) + return np.array(img) + + frames = np.array([decode(frame) for frame in frames]) + + target_points = self.points_dataset[video_name]["points"] + if self.resize_to_256: + frames = resize_video(frames, [256, 256]) + target_points *= np.array([256, 256]) + else: + target_points *= np.array([frames.shape[2], frames.shape[1]]) + + T, H, W, C = frames.shape + N, T, D = target_points.shape + + target_occ = self.points_dataset[video_name]["occluded"] + if self.queried_first: + converted = sample_queries_first(target_occ, target_points, frames) + else: + converted = sample_queries_strided(target_occ, target_points, frames) + assert converted["target_points"].shape[1] == converted["query_points"].shape[1] + + trajs = ( + torch.from_numpy(converted["target_points"])[0].permute(1, 0, 2).float() + ) # T, N, D + + rgbs = torch.from_numpy(frames).permute(0, 3, 1, 2).float() + segs = torch.ones(T, 1, H, W).float() + visibles = torch.logical_not(torch.from_numpy(converted["occluded"]))[ + 0 + ].permute( + 1, 0 + ) # T, N + query_points = torch.from_numpy(converted["query_points"])[0] # T, N + return CoTrackerData( + rgbs, + segs, + trajs, + visibles, + seq_name=str(video_name), + query_points=query_points, + ) + + def __len__(self): + return len(self.points_dataset) diff --git a/cotracker/datasets/utils.py b/cotracker/datasets/utils.py new file mode 100644 index 0000000..d4057ba --- /dev/null +++ b/cotracker/datasets/utils.py @@ -0,0 +1,114 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + + +import torch +import dataclasses +import torch.nn.functional as F +from dataclasses import dataclass +from typing import Any, Optional + + +@dataclass(eq=False) +class CoTrackerData: + """ + Dataclass for storing video tracks data. + """ + + video: torch.Tensor # B, S, C, H, W + segmentation: torch.Tensor # B, S, 1, H, W + trajectory: torch.Tensor # B, S, N, 2 + visibility: torch.Tensor # B, S, N + # optional data + valid: Optional[torch.Tensor] = None # B, S, N + seq_name: Optional[str] = None + query_points: Optional[torch.Tensor] = None # TapVID evaluation format + + +def collate_fn(batch): + """ + Collate function for video tracks data. + """ + video = torch.stack([b.video for b in batch], dim=0) + segmentation = torch.stack([b.segmentation for b in batch], dim=0) + trajectory = torch.stack([b.trajectory for b in batch], dim=0) + visibility = torch.stack([b.visibility for b in batch], dim=0) + query_points = None + if batch[0].query_points is not None: + query_points = torch.stack([b.query_points for b in batch], dim=0) + seq_name = [b.seq_name for b in batch] + + return CoTrackerData( + video, + segmentation, + trajectory, + visibility, + seq_name=seq_name, + query_points=query_points, + ) + + +def collate_fn_train(batch): + """ + Collate function for video tracks data during training. + """ + gotit = [gotit for _, gotit in batch] + video = torch.stack([b.video for b, _ in batch], dim=0) + segmentation = torch.stack([b.segmentation for b, _ in batch], dim=0) + trajectory = torch.stack([b.trajectory for b, _ in batch], dim=0) + visibility = torch.stack([b.visibility for b, _ in batch], dim=0) + valid = torch.stack([b.valid for b, _ in batch], dim=0) + seq_name = [b.seq_name for b, _ in batch] + return ( + CoTrackerData(video, segmentation, trajectory, visibility, valid, seq_name), + gotit, + ) + + +def try_to_cuda(t: Any) -> Any: + """ + Try to move the input variable `t` to a cuda device. + + Args: + t: Input. + + Returns: + t_cuda: `t` moved to a cuda device, if supported. + """ + try: + t = t.float().cuda() + except AttributeError: + pass + return t + + +def dataclass_to_cuda_(obj): + """ + Move all contents of a dataclass to cuda inplace if supported. + + Args: + batch: Input dataclass. + + Returns: + batch_cuda: `batch` moved to a cuda device, if supported. + """ + for f in dataclasses.fields(obj): + setattr(obj, f.name, try_to_cuda(getattr(obj, f.name))) + return obj + + +def resize_sample(rgbs, trajs_g, segs, interp_shape): + S, C, H, W = rgbs.shape + S, N, D = trajs_g.shape + + assert D == 2 + + rgbs = F.interpolate(rgbs, interp_shape, mode="bilinear") + segs = F.interpolate(segs, interp_shape, mode="nearest") + + trajs_g[:, :, 0] *= interp_shape[1] / W + trajs_g[:, :, 1] *= interp_shape[0] / H + return rgbs, trajs_g, segs diff --git a/cotracker/evaluation/__init__.py b/cotracker/evaluation/__init__.py new file mode 100644 index 0000000..5277f46 --- /dev/null +++ b/cotracker/evaluation/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/cotracker/evaluation/configs/eval_badja.yaml b/cotracker/evaluation/configs/eval_badja.yaml new file mode 100644 index 0000000..193f277 --- /dev/null +++ b/cotracker/evaluation/configs/eval_badja.yaml @@ -0,0 +1,6 @@ +defaults: + - default_config_eval +exp_dir: ./outputs/cotracker +dataset_name: badja + + \ No newline at end of file diff --git a/cotracker/evaluation/configs/eval_fastcapture.yaml b/cotracker/evaluation/configs/eval_fastcapture.yaml new file mode 100644 index 0000000..2046296 --- /dev/null +++ b/cotracker/evaluation/configs/eval_fastcapture.yaml @@ -0,0 +1,6 @@ +defaults: + - default_config_eval +exp_dir: ./outputs/cotracker +dataset_name: fastcapture + + \ No newline at end of file diff --git a/cotracker/evaluation/configs/eval_tapvid_davis_first.yaml b/cotracker/evaluation/configs/eval_tapvid_davis_first.yaml new file mode 100644 index 0000000..d37a6c9 --- /dev/null +++ b/cotracker/evaluation/configs/eval_tapvid_davis_first.yaml @@ -0,0 +1,6 @@ +defaults: + - default_config_eval +exp_dir: ./outputs/cotracker +dataset_name: tapvid_davis_first + + \ No newline at end of file diff --git a/cotracker/evaluation/configs/eval_tapvid_davis_strided.yaml b/cotracker/evaluation/configs/eval_tapvid_davis_strided.yaml new file mode 100644 index 0000000..6e3cf3c --- /dev/null +++ b/cotracker/evaluation/configs/eval_tapvid_davis_strided.yaml @@ -0,0 +1,6 @@ +defaults: + - default_config_eval +exp_dir: ./outputs/cotracker +dataset_name: tapvid_davis_strided + + \ No newline at end of file diff --git a/cotracker/evaluation/configs/eval_tapvid_kinetics_first.yaml b/cotracker/evaluation/configs/eval_tapvid_kinetics_first.yaml new file mode 100644 index 0000000..3be8914 --- /dev/null +++ b/cotracker/evaluation/configs/eval_tapvid_kinetics_first.yaml @@ -0,0 +1,6 @@ +defaults: + - default_config_eval +exp_dir: ./outputs/cotracker +dataset_name: tapvid_kinetics_first + + \ No newline at end of file diff --git a/cotracker/evaluation/core/__init__.py b/cotracker/evaluation/core/__init__.py new file mode 100644 index 0000000..5277f46 --- /dev/null +++ b/cotracker/evaluation/core/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/cotracker/evaluation/core/eval_utils.py b/cotracker/evaluation/core/eval_utils.py new file mode 100644 index 0000000..405aa8b --- /dev/null +++ b/cotracker/evaluation/core/eval_utils.py @@ -0,0 +1,144 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np + +from typing import Iterable, Mapping, Tuple, Union + + +def compute_tapvid_metrics( + query_points: np.ndarray, + gt_occluded: np.ndarray, + gt_tracks: np.ndarray, + pred_occluded: np.ndarray, + pred_tracks: np.ndarray, + query_mode: str, +) -> Mapping[str, np.ndarray]: + """Computes TAP-Vid metrics (Jaccard, Pts. Within Thresh, Occ. Acc.) + See the TAP-Vid paper for details on the metric computation. All inputs are + given in raster coordinates. The first three arguments should be the direct + outputs of the reader: the 'query_points', 'occluded', and 'target_points'. + The paper metrics assume these are scaled relative to 256x256 images. + pred_occluded and pred_tracks are your algorithm's predictions. + This function takes a batch of inputs, and computes metrics separately for + each video. The metrics for the full benchmark are a simple mean of the + metrics across the full set of videos. These numbers are between 0 and 1, + but the paper multiplies them by 100 to ease reading. + Args: + query_points: The query points, an in the format [t, y, x]. Its size is + [b, n, 3], where b is the batch size and n is the number of queries + gt_occluded: A boolean array of shape [b, n, t], where t is the number + of frames. True indicates that the point is occluded. + gt_tracks: The target points, of shape [b, n, t, 2]. Each point is + in the format [x, y] + pred_occluded: A boolean array of predicted occlusions, in the same + format as gt_occluded. + pred_tracks: An array of track predictions from your algorithm, in the + same format as gt_tracks. + query_mode: Either 'first' or 'strided', depending on how queries are + sampled. If 'first', we assume the prior knowledge that all points + before the query point are occluded, and these are removed from the + evaluation. + Returns: + A dict with the following keys: + occlusion_accuracy: Accuracy at predicting occlusion. + pts_within_{x} for x in [1, 2, 4, 8, 16]: Fraction of points + predicted to be within the given pixel threshold, ignoring occlusion + prediction. + jaccard_{x} for x in [1, 2, 4, 8, 16]: Jaccard metric for the given + threshold + average_pts_within_thresh: average across pts_within_{x} + average_jaccard: average across jaccard_{x} + """ + + metrics = {} + + # Don't evaluate the query point. Numpy doesn't have one_hot, so we + # replicate it by indexing into an identity matrix. + one_hot_eye = np.eye(gt_tracks.shape[2]) + query_frame = query_points[..., 0] + query_frame = np.round(query_frame).astype(np.int32) + evaluation_points = one_hot_eye[query_frame] == 0 + + # If we're using the first point on the track as a query, don't evaluate the + # other points. + if query_mode == "first": + for i in range(gt_occluded.shape[0]): + index = np.where(gt_occluded[i] == 0)[0][0] + evaluation_points[i, :index] = False + elif query_mode != "strided": + raise ValueError("Unknown query mode " + query_mode) + + # Occlusion accuracy is simply how often the predicted occlusion equals the + # ground truth. + occ_acc = ( + np.sum( + np.equal(pred_occluded, gt_occluded) & evaluation_points, + axis=(1, 2), + ) + / np.sum(evaluation_points) + ) + metrics["occlusion_accuracy"] = occ_acc + + # Next, convert the predictions and ground truth positions into pixel + # coordinates. + visible = np.logical_not(gt_occluded) + pred_visible = np.logical_not(pred_occluded) + all_frac_within = [] + all_jaccard = [] + for thresh in [1, 2, 4, 8, 16]: + # True positives are points that are within the threshold and where both + # the prediction and the ground truth are listed as visible. + within_dist = ( + np.sum( + np.square(pred_tracks - gt_tracks), + axis=-1, + ) + < np.square(thresh) + ) + is_correct = np.logical_and(within_dist, visible) + + # Compute the frac_within_threshold, which is the fraction of points + # within the threshold among points that are visible in the ground truth, + # ignoring whether they're predicted to be visible. + count_correct = np.sum( + is_correct & evaluation_points, + axis=(1, 2), + ) + count_visible_points = np.sum(visible & evaluation_points, axis=(1, 2)) + frac_correct = count_correct / count_visible_points + metrics["pts_within_" + str(thresh)] = frac_correct + all_frac_within.append(frac_correct) + + true_positives = np.sum( + is_correct & pred_visible & evaluation_points, axis=(1, 2) + ) + + # The denominator of the jaccard metric is the true positives plus + # false positives plus false negatives. However, note that true positives + # plus false negatives is simply the number of points in the ground truth + # which is easier to compute than trying to compute all three quantities. + # Thus we just add the number of points in the ground truth to the number + # of false positives. + # + # False positives are simply points that are predicted to be visible, + # but the ground truth is not visible or too far from the prediction. + gt_positives = np.sum(visible & evaluation_points, axis=(1, 2)) + false_positives = (~visible) & pred_visible + false_positives = false_positives | ((~within_dist) & pred_visible) + false_positives = np.sum(false_positives & evaluation_points, axis=(1, 2)) + jaccard = true_positives / (gt_positives + false_positives) + metrics["jaccard_" + str(thresh)] = jaccard + all_jaccard.append(jaccard) + metrics["average_jaccard"] = np.mean( + np.stack(all_jaccard, axis=1), + axis=1, + ) + metrics["average_pts_within_thresh"] = np.mean( + np.stack(all_frac_within, axis=1), + axis=1, + ) + return metrics diff --git a/cotracker/evaluation/core/evaluator.py b/cotracker/evaluation/core/evaluator.py new file mode 100644 index 0000000..9f4053b --- /dev/null +++ b/cotracker/evaluation/core/evaluator.py @@ -0,0 +1,252 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from collections import defaultdict +import os +from typing import Optional +import torch +from tqdm import tqdm +import numpy as np + +from torch.utils.tensorboard import SummaryWriter +from cotracker.datasets.utils import dataclass_to_cuda_ +from cotracker.utils.visualizer import Visualizer +from cotracker.models.core.model_utils import reduce_masked_mean +from cotracker.evaluation.core.eval_utils import compute_tapvid_metrics + +import logging + + +class Evaluator: + """ + A class defining the CoTracker evaluator. + """ + + def __init__(self, exp_dir) -> None: + # Visualization + self.exp_dir = exp_dir + os.makedirs(exp_dir, exist_ok=True) + self.visualization_filepaths = defaultdict(lambda: defaultdict(list)) + self.visualize_dir = os.path.join(exp_dir, "visualisations") + + def compute_metrics(self, metrics, sample, pred_trajectory, dataset_name): + if isinstance(pred_trajectory, tuple): + pred_trajectory, pred_visibility = pred_trajectory + else: + pred_visibility = None + if dataset_name == "badja": + sample.segmentation = (sample.segmentation > 0).float() + *_, N, _ = sample.trajectory.shape + accs = [] + accs_3px = [] + for s1 in range(1, sample.video.shape[1]): # target frame + for n in range(N): + vis = sample.visibility[0, s1, n] + if vis > 0: + coord_e = pred_trajectory[0, s1, n] # 2 + coord_g = sample.trajectory[0, s1, n] # 2 + dist = torch.sqrt(torch.sum((coord_e - coord_g) ** 2, dim=0)) + area = torch.sum(sample.segmentation[0, s1]) + # print_('0.2*sqrt(area)', 0.2*torch.sqrt(area)) + thr = 0.2 * torch.sqrt(area) + # correct = + accs.append((dist < thr).float()) + # print('thr',thr) + accs_3px.append((dist < 3.0).float()) + + res = torch.mean(torch.stack(accs)) * 100.0 + res_3px = torch.mean(torch.stack(accs_3px)) * 100.0 + metrics[sample.seq_name[0]] = res.item() + metrics[sample.seq_name[0] + "_accuracy"] = res_3px.item() + print(metrics) + print( + "avg", np.mean([v for k, v in metrics.items() if "accuracy" not in k]) + ) + print( + "avg acc 3px", + np.mean([v for k, v in metrics.items() if "accuracy" in k]), + ) + elif dataset_name == "fastcapture" or ("kubric" in dataset_name): + *_, N, _ = sample.trajectory.shape + accs = [] + for s1 in range(1, sample.video.shape[1]): # target frame + for n in range(N): + vis = sample.visibility[0, s1, n] + if vis > 0: + coord_e = pred_trajectory[0, s1, n] # 2 + coord_g = sample.trajectory[0, s1, n] # 2 + dist = torch.sqrt(torch.sum((coord_e - coord_g) ** 2, dim=0)) + thr = 3 + correct = (dist < thr).float() + accs.append(correct) + + res = torch.mean(torch.stack(accs)) * 100.0 + metrics[sample.seq_name[0] + "_accuracy"] = res.item() + print(metrics) + print("avg", np.mean([v for v in metrics.values()])) + elif "tapvid" in dataset_name: + B, T, N, D = sample.trajectory.shape + traj = sample.trajectory.clone() + thr = 0.9 + + if pred_visibility is None: + logging.warning("visibility is NONE") + pred_visibility = torch.zeros_like(sample.visibility) + + if not pred_visibility.dtype == torch.bool: + pred_visibility = pred_visibility > thr + + # pred_trajectory + query_points = sample.query_points.clone().cpu().numpy() + + pred_visibility = pred_visibility[:, :, :N] + pred_trajectory = pred_trajectory[:, :, :N] + + gt_tracks = traj.permute(0, 2, 1, 3).cpu().numpy() + gt_occluded = ( + torch.logical_not(sample.visibility.clone().permute(0, 2, 1)) + .cpu() + .numpy() + ) + + pred_occluded = ( + torch.logical_not(pred_visibility.clone().permute(0, 2, 1)) + .cpu() + .numpy() + ) + pred_tracks = pred_trajectory.permute(0, 2, 1, 3).cpu().numpy() + + out_metrics = compute_tapvid_metrics( + query_points, + gt_occluded, + gt_tracks, + pred_occluded, + pred_tracks, + query_mode="strided" if "strided" in dataset_name else "first", + ) + + metrics[sample.seq_name[0]] = out_metrics + for metric_name in out_metrics.keys(): + if "avg" not in metrics: + metrics["avg"] = {} + metrics["avg"][metric_name] = np.mean( + [v[metric_name] for k, v in metrics.items() if k != "avg"] + ) + + logging.info(f"Metrics: {out_metrics}") + logging.info(f"avg: {metrics['avg']}") + print("metrics", out_metrics) + print("avg", metrics["avg"]) + else: + rgbs = sample.video + trajs_g = sample.trajectory + valids = sample.valid + vis_g = sample.visibility + + B, S, C, H, W = rgbs.shape + assert C == 3 + B, S, N, D = trajs_g.shape + + assert torch.sum(valids) == B * S * N + + vis_g = (torch.sum(vis_g, dim=1, keepdim=True) >= 4).float().repeat(1, S, 1) + + ate = torch.norm(pred_trajectory - trajs_g, dim=-1) # B, S, N + + metrics["things_all"] = reduce_masked_mean(ate, valids).item() + metrics["things_vis"] = reduce_masked_mean(ate, valids * vis_g).item() + metrics["things_occ"] = reduce_masked_mean( + ate, valids * (1.0 - vis_g) + ).item() + + @torch.no_grad() + def evaluate_sequence( + self, + model, + test_dataloader: torch.utils.data.DataLoader, + dataset_name: str, + train_mode=False, + writer: Optional[SummaryWriter] = None, + step: Optional[int] = 0, + ): + metrics = {} + + vis = Visualizer( + save_dir=self.exp_dir, + fps=7, + ) + + for ind, sample in enumerate(tqdm(test_dataloader)): + if isinstance(sample, tuple): + sample, gotit = sample + if not all(gotit): + print("batch is None") + continue + dataclass_to_cuda_(sample) + + if ( + not train_mode + and hasattr(model, "sequence_len") + and (sample.visibility[:, : model.sequence_len].sum() == 0) + ): + print(f"skipping batch {ind}") + continue + + if "tapvid" in dataset_name: + queries = sample.query_points.clone().float() + + queries = torch.stack( + [ + queries[:, :, 0], + queries[:, :, 2], + queries[:, :, 1], + ], + dim=2, + ) + else: + queries = torch.cat( + [ + torch.zeros_like(sample.trajectory[:, 0, :, :1]), + sample.trajectory[:, 0], + ], + dim=2, + ) + + pred_tracks = model(sample.video, queries) + if "strided" in dataset_name: + + inv_video = sample.video.flip(1).clone() + inv_queries = queries.clone() + inv_queries[:, :, 0] = inv_video.shape[1] - inv_queries[:, :, 0] - 1 + + pred_trj, pred_vsb = pred_tracks + inv_pred_trj, inv_pred_vsb = model(inv_video, inv_queries) + + inv_pred_trj = inv_pred_trj.flip(1) + inv_pred_vsb = inv_pred_vsb.flip(1) + + mask = pred_trj == 0 + + pred_trj[mask] = inv_pred_trj[mask] + pred_vsb[mask[:, :, :, 0]] = inv_pred_vsb[mask[:, :, :, 0]] + + pred_tracks = pred_trj, pred_vsb + + if dataset_name == "badja" or dataset_name == "fastcapture": + seq_name = sample.seq_name[0] + else: + seq_name = str(ind) + + vis.visualize( + sample.video, + pred_tracks[0] if isinstance(pred_tracks, tuple) else pred_tracks, + filename=dataset_name + "_" + seq_name, + writer=writer, + step=step, + ) + + self.compute_metrics(metrics, sample, pred_tracks, dataset_name) + return metrics diff --git a/cotracker/evaluation/evaluate.py b/cotracker/evaluation/evaluate.py new file mode 100644 index 0000000..1995629 --- /dev/null +++ b/cotracker/evaluation/evaluate.py @@ -0,0 +1,179 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import json +import os +from dataclasses import dataclass, field + +import hydra +import numpy as np + +import torch +from omegaconf import OmegaConf + +from cotracker.datasets.badja_dataset import BadjaDataset +from cotracker.datasets.fast_capture_dataset import FastCaptureDataset +from cotracker.datasets.tap_vid_datasets import TapVidDataset +from cotracker.datasets.utils import collate_fn + +from cotracker.models.evaluation_predictor import EvaluationPredictor + +from cotracker.evaluation.core.evaluator import Evaluator +from cotracker.models.build_cotracker import ( + build_cotracker, +) + + +@dataclass(eq=False) +class DefaultConfig: + # Directory where all outputs of the experiment will be saved. + exp_dir: str = "./outputs" + + # Name of the dataset to be used for the evaluation. + dataset_name: str = "badja" + # The root directory of the dataset. + dataset_root: str = "./" + + # Path to the pre-trained model checkpoint to be used for the evaluation. + # The default value is the path to a specific CoTracker model checkpoint. + # Other available options are commented. + checkpoint: str = "./checkpoints/cotracker_stride_4_wind_8.pth" + # cotracker_stride_4_wind_12 + # cotracker_stride_8_wind_16 + + # EvaluationPredictor parameters + # The size (N) of the support grid used in the predictor. + # The total number of points is (N*N). + grid_size: int = 6 + # The size (N) of the local support grid. + local_grid_size: int = 6 + # A flag indicating whether to evaluate one ground truth point at a time. + single_point: bool = True + # The number of iterative updates for each sliding window. + n_iters: int = 6 + + seed: int = 0 + gpu_idx: int = 0 + + # Override hydra's working directory to current working dir, + # also disable storing the .hydra logs: + hydra: dict = field( + default_factory=lambda: { + "run": {"dir": "."}, + "output_subdir": None, + } + ) + + +def run_eval(cfg: DefaultConfig): + """ + The function evaluates CoTracker on a specified benchmark dataset based on a provided configuration. + + Args: + cfg (DefaultConfig): An instance of DefaultConfig class which includes: + - exp_dir (str): The directory path for the experiment. + - dataset_name (str): The name of the dataset to be used. + - dataset_root (str): The root directory of the dataset. + - checkpoint (str): The path to the CoTracker model's checkpoint. + - single_point (bool): A flag indicating whether to evaluate one ground truth point at a time. + - n_iters (int): The number of iterative updates for each sliding window. + - seed (int): The seed for setting the random state for reproducibility. + - gpu_idx (int): The index of the GPU to be used. + """ + # Creating the experiment directory if it doesn't exist + os.makedirs(cfg.exp_dir, exist_ok=True) + + # Saving the experiment configuration to a .yaml file in the experiment directory + cfg_file = os.path.join(cfg.exp_dir, "expconfig.yaml") + with open(cfg_file, "w") as f: + OmegaConf.save(config=cfg, f=f) + + evaluator = Evaluator(cfg.exp_dir) + cotracker_model = build_cotracker(cfg.checkpoint) + + # Creating the EvaluationPredictor object + predictor = EvaluationPredictor( + cotracker_model, + grid_size=cfg.grid_size, + local_grid_size=cfg.local_grid_size, + single_point=cfg.single_point, + n_iters=cfg.n_iters, + ) + + # Setting the random seeds + torch.manual_seed(cfg.seed) + np.random.seed(cfg.seed) + + # Constructing the specified dataset + curr_collate_fn = collate_fn + if cfg.dataset_name == "badja": + test_dataset = BadjaDataset(data_root=os.path.join(cfg.dataset_root, "BADJA")) + elif cfg.dataset_name == "fastcapture": + test_dataset = FastCaptureDataset( + data_root=os.path.join(cfg.dataset_root, "fastcapture"), + max_seq_len=100, + max_num_points=20, + ) + elif "tapvid" in cfg.dataset_name: + dataset_type = cfg.dataset_name.split("_")[1] + if dataset_type == "davis": + data_root = os.path.join(cfg.dataset_root, "/tapvid_davis/tapvid_davis.pkl") + elif dataset_type == "kinetics": + data_root = os.path.join( + cfg.dataset_root, "/kinetics/kinetics-dataset/k700-2020/tapvid_kinetics" + ) + test_dataset = TapVidDataset( + dataset_type=dataset_type, + data_root=data_root, + queried_first=not "strided" in cfg.dataset_name, + ) + + # Creating the DataLoader object + test_dataloader = torch.utils.data.DataLoader( + test_dataset, + batch_size=1, + shuffle=False, + num_workers=14, + collate_fn=curr_collate_fn, + ) + + # Timing and conducting the evaluation + import time + + start = time.time() + evaluate_result = evaluator.evaluate_sequence( + predictor, + test_dataloader, + dataset_name=cfg.dataset_name, + ) + end = time.time() + print(end - start) + + # Saving the evaluation results to a .json file + if not "tapvid" in cfg.dataset_name: + print("evaluate_result", evaluate_result) + else: + evaluate_result = evaluate_result["avg"] + result_file = os.path.join(cfg.exp_dir, f"result_eval_.json") + evaluate_result["time"] = end - start + print(f"Dumping eval results to {result_file}.") + with open(result_file, "w") as f: + json.dump(evaluate_result, f) + + +cs = hydra.core.config_store.ConfigStore.instance() +cs.store(name="default_config_eval", node=DefaultConfig) + + +@hydra.main(config_path="./configs/", config_name="default_config_eval") +def evaluate(cfg: DefaultConfig) -> None: + os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" + os.environ["CUDA_VISIBLE_DEVICES"] = str(cfg.gpu_idx) + run_eval(cfg) + + +if __name__ == "__main__": + evaluate() diff --git a/cotracker/models/__init__.py b/cotracker/models/__init__.py new file mode 100644 index 0000000..5277f46 --- /dev/null +++ b/cotracker/models/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/cotracker/models/build_cotracker.py b/cotracker/models/build_cotracker.py new file mode 100644 index 0000000..40b19b5 --- /dev/null +++ b/cotracker/models/build_cotracker.py @@ -0,0 +1,70 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch + +from cotracker.models.core.cotracker.cotracker import CoTracker + + +def build_cotracker( + checkpoint: str, +): + model_name = checkpoint.split("/")[-1].split(".")[0] + if model_name == "cotracker_stride_4_wind_8": + return build_cotracker_stride_4_wind_8(checkpoint=checkpoint) + elif model_name == "cotracker_stride_4_wind_12": + return build_cotracker_stride_4_wind_12(checkpoint=checkpoint) + elif model_name == "cotracker_stride_8_wind_16": + return build_cotracker_stride_8_wind_16(checkpoint=checkpoint) + else: + raise ValueError(f"Unknown model name {model_name}") + + +# model used to produce the results in the paper +def build_cotracker_stride_4_wind_8(checkpoint=None): + return _build_cotracker( + stride=4, + sequence_len=8, + checkpoint=checkpoint, + ) + + +def build_cotracker_stride_4_wind_12(checkpoint=None): + return _build_cotracker( + stride=4, + sequence_len=12, + checkpoint=checkpoint, + ) + + +# the fastest model +def build_cotracker_stride_8_wind_16(checkpoint=None): + return _build_cotracker( + stride=8, + sequence_len=16, + checkpoint=checkpoint, + ) + + +def _build_cotracker( + stride, + sequence_len, + checkpoint=None, +): + cotracker = CoTracker( + stride=stride, + S=sequence_len, + add_space_attn=True, + space_depth=6, + time_depth=6, + ) + if checkpoint is not None: + with open(checkpoint, "rb") as f: + state_dict = torch.load(f, map_location="cpu") + if "model" in state_dict: + state_dict = state_dict["model"] + cotracker.load_state_dict(state_dict) + return cotracker diff --git a/cotracker/models/core/__init__.py b/cotracker/models/core/__init__.py new file mode 100644 index 0000000..5277f46 --- /dev/null +++ b/cotracker/models/core/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/cotracker/models/core/cotracker/__init__.py b/cotracker/models/core/cotracker/__init__.py new file mode 100644 index 0000000..5277f46 --- /dev/null +++ b/cotracker/models/core/cotracker/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/cotracker/models/core/cotracker/blocks.py b/cotracker/models/core/cotracker/blocks.py new file mode 100644 index 0000000..d199715 --- /dev/null +++ b/cotracker/models/core/cotracker/blocks.py @@ -0,0 +1,400 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from einops import rearrange +from timm.models.vision_transformer import Attention, Mlp + + +class ResidualBlock(nn.Module): + def __init__(self, in_planes, planes, norm_fn="group", stride=1): + super(ResidualBlock, self).__init__() + + self.conv1 = nn.Conv2d( + in_planes, + planes, + kernel_size=3, + padding=1, + stride=stride, + padding_mode="zeros", + ) + self.conv2 = nn.Conv2d( + planes, planes, kernel_size=3, padding=1, padding_mode="zeros" + ) + self.relu = nn.ReLU(inplace=True) + + num_groups = planes // 8 + + if norm_fn == "group": + self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + if not stride == 1: + self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + + elif norm_fn == "batch": + self.norm1 = nn.BatchNorm2d(planes) + self.norm2 = nn.BatchNorm2d(planes) + if not stride == 1: + self.norm3 = nn.BatchNorm2d(planes) + + elif norm_fn == "instance": + self.norm1 = nn.InstanceNorm2d(planes) + self.norm2 = nn.InstanceNorm2d(planes) + if not stride == 1: + self.norm3 = nn.InstanceNorm2d(planes) + + elif norm_fn == "none": + self.norm1 = nn.Sequential() + self.norm2 = nn.Sequential() + if not stride == 1: + self.norm3 = nn.Sequential() + + if stride == 1: + self.downsample = None + + else: + self.downsample = nn.Sequential( + nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm3 + ) + + def forward(self, x): + y = x + y = self.relu(self.norm1(self.conv1(y))) + y = self.relu(self.norm2(self.conv2(y))) + + if self.downsample is not None: + x = self.downsample(x) + + return self.relu(x + y) + + +class BasicEncoder(nn.Module): + def __init__( + self, input_dim=3, output_dim=128, stride=8, norm_fn="batch", dropout=0.0 + ): + super(BasicEncoder, self).__init__() + self.stride = stride + self.norm_fn = norm_fn + self.in_planes = 64 + + if self.norm_fn == "group": + self.norm1 = nn.GroupNorm(num_groups=8, num_channels=self.in_planes) + self.norm2 = nn.GroupNorm(num_groups=8, num_channels=output_dim * 2) + + elif self.norm_fn == "batch": + self.norm1 = nn.BatchNorm2d(self.in_planes) + self.norm2 = nn.BatchNorm2d(output_dim * 2) + + elif self.norm_fn == "instance": + self.norm1 = nn.InstanceNorm2d(self.in_planes) + self.norm2 = nn.InstanceNorm2d(output_dim * 2) + + elif self.norm_fn == "none": + self.norm1 = nn.Sequential() + + self.conv1 = nn.Conv2d( + input_dim, + self.in_planes, + kernel_size=7, + stride=2, + padding=3, + padding_mode="zeros", + ) + self.relu1 = nn.ReLU(inplace=True) + + self.shallow = False + if self.shallow: + self.layer1 = self._make_layer(64, stride=1) + self.layer2 = self._make_layer(96, stride=2) + self.layer3 = self._make_layer(128, stride=2) + self.conv2 = nn.Conv2d(128 + 96 + 64, output_dim, kernel_size=1) + else: + self.layer1 = self._make_layer(64, stride=1) + self.layer2 = self._make_layer(96, stride=2) + self.layer3 = self._make_layer(128, stride=2) + self.layer4 = self._make_layer(128, stride=2) + + self.conv2 = nn.Conv2d( + 128 + 128 + 96 + 64, + output_dim * 2, + kernel_size=3, + padding=1, + padding_mode="zeros", + ) + self.relu2 = nn.ReLU(inplace=True) + self.conv3 = nn.Conv2d(output_dim * 2, output_dim, kernel_size=1) + + self.dropout = None + if dropout > 0: + self.dropout = nn.Dropout2d(p=dropout) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu") + elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)): + if m.weight is not None: + nn.init.constant_(m.weight, 1) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def _make_layer(self, dim, stride=1): + layer1 = ResidualBlock(self.in_planes, dim, self.norm_fn, stride=stride) + layer2 = ResidualBlock(dim, dim, self.norm_fn, stride=1) + layers = (layer1, layer2) + + self.in_planes = dim + return nn.Sequential(*layers) + + def forward(self, x): + _, _, H, W = x.shape + + x = self.conv1(x) + x = self.norm1(x) + x = self.relu1(x) + + if self.shallow: + a = self.layer1(x) + b = self.layer2(a) + c = self.layer3(b) + a = F.interpolate( + a, + (H // self.stride, W // self.stride), + mode="bilinear", + align_corners=True, + ) + b = F.interpolate( + b, + (H // self.stride, W // self.stride), + mode="bilinear", + align_corners=True, + ) + c = F.interpolate( + c, + (H // self.stride, W // self.stride), + mode="bilinear", + align_corners=True, + ) + x = self.conv2(torch.cat([a, b, c], dim=1)) + else: + a = self.layer1(x) + b = self.layer2(a) + c = self.layer3(b) + d = self.layer4(c) + a = F.interpolate( + a, + (H // self.stride, W // self.stride), + mode="bilinear", + align_corners=True, + ) + b = F.interpolate( + b, + (H // self.stride, W // self.stride), + mode="bilinear", + align_corners=True, + ) + c = F.interpolate( + c, + (H // self.stride, W // self.stride), + mode="bilinear", + align_corners=True, + ) + d = F.interpolate( + d, + (H // self.stride, W // self.stride), + mode="bilinear", + align_corners=True, + ) + x = self.conv2(torch.cat([a, b, c, d], dim=1)) + x = self.norm2(x) + x = self.relu2(x) + x = self.conv3(x) + + if self.training and self.dropout is not None: + x = self.dropout(x) + return x + + +class AttnBlock(nn.Module): + """ + A DiT block with adaptive layer norm zero (adaLN-Zero) conditioning. + """ + + def __init__(self, hidden_size, num_heads, mlp_ratio=4.0, **block_kwargs): + super().__init__() + self.norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) + self.attn = Attention( + hidden_size, num_heads=num_heads, qkv_bias=True, **block_kwargs + ) + + self.norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) + mlp_hidden_dim = int(hidden_size * mlp_ratio) + approx_gelu = lambda: nn.GELU(approximate="tanh") + self.mlp = Mlp( + in_features=hidden_size, + hidden_features=mlp_hidden_dim, + act_layer=approx_gelu, + drop=0, + ) + + def forward(self, x): + x = x + self.attn(self.norm1(x)) + x = x + self.mlp(self.norm2(x)) + return x + + +def bilinear_sampler(img, coords, mode="bilinear", mask=False): + """Wrapper for grid_sample, uses pixel coordinates""" + H, W = img.shape[-2:] + xgrid, ygrid = coords.split([1, 1], dim=-1) + # go to 0,1 then 0,2 then -1,1 + xgrid = 2 * xgrid / (W - 1) - 1 + ygrid = 2 * ygrid / (H - 1) - 1 + + grid = torch.cat([xgrid, ygrid], dim=-1) + img = F.grid_sample(img, grid, align_corners=True) + + if mask: + mask = (xgrid > -1) & (ygrid > -1) & (xgrid < 1) & (ygrid < 1) + return img, mask.float() + + return img + + +class CorrBlock: + def __init__(self, fmaps, num_levels=4, radius=4): + B, S, C, H, W = fmaps.shape + self.S, self.C, self.H, self.W = S, C, H, W + + self.num_levels = num_levels + self.radius = radius + self.fmaps_pyramid = [] + + self.fmaps_pyramid.append(fmaps) + for i in range(self.num_levels - 1): + fmaps_ = fmaps.reshape(B * S, C, H, W) + fmaps_ = F.avg_pool2d(fmaps_, 2, stride=2) + _, _, H, W = fmaps_.shape + fmaps = fmaps_.reshape(B, S, C, H, W) + self.fmaps_pyramid.append(fmaps) + + def sample(self, coords): + r = self.radius + B, S, N, D = coords.shape + assert D == 2 + + H, W = self.H, self.W + out_pyramid = [] + for i in range(self.num_levels): + corrs = self.corrs_pyramid[i] # B, S, N, H, W + _, _, _, H, W = corrs.shape + + dx = torch.linspace(-r, r, 2 * r + 1) + dy = torch.linspace(-r, r, 2 * r + 1) + delta = torch.stack(torch.meshgrid(dy, dx, indexing="ij"), axis=-1).to( + coords.device + ) + + centroid_lvl = coords.reshape(B * S * N, 1, 1, 2) / 2 ** i + delta_lvl = delta.view(1, 2 * r + 1, 2 * r + 1, 2) + coords_lvl = centroid_lvl + delta_lvl + + corrs = bilinear_sampler(corrs.reshape(B * S * N, 1, H, W), coords_lvl) + corrs = corrs.view(B, S, N, -1) + out_pyramid.append(corrs) + + out = torch.cat(out_pyramid, dim=-1) # B, S, N, LRR*2 + return out.contiguous().float() + + def corr(self, targets): + B, S, N, C = targets.shape + assert C == self.C + assert S == self.S + + fmap1 = targets + + self.corrs_pyramid = [] + for fmaps in self.fmaps_pyramid: + _, _, _, H, W = fmaps.shape + fmap2s = fmaps.view(B, S, C, H * W) + corrs = torch.matmul(fmap1, fmap2s) + corrs = corrs.view(B, S, N, H, W) + corrs = corrs / torch.sqrt(torch.tensor(C).float()) + self.corrs_pyramid.append(corrs) + + +class UpdateFormer(nn.Module): + """ + Transformer model that updates track estimates. + """ + + def __init__( + self, + space_depth=12, + time_depth=12, + input_dim=320, + hidden_size=384, + num_heads=8, + output_dim=130, + mlp_ratio=4.0, + add_space_attn=True, + ): + super().__init__() + self.out_channels = 2 + self.num_heads = num_heads + self.hidden_size = hidden_size + self.add_space_attn = add_space_attn + self.input_transform = torch.nn.Linear(input_dim, hidden_size, bias=True) + self.flow_head = torch.nn.Linear(hidden_size, output_dim, bias=True) + + self.time_blocks = nn.ModuleList( + [ + AttnBlock(hidden_size, num_heads, mlp_ratio=mlp_ratio) + for _ in range(time_depth) + ] + ) + + if add_space_attn: + self.space_blocks = nn.ModuleList( + [ + AttnBlock(hidden_size, num_heads, mlp_ratio=mlp_ratio) + for _ in range(space_depth) + ] + ) + assert len(self.time_blocks) >= len(self.space_blocks) + self.initialize_weights() + + def initialize_weights(self): + def _basic_init(module): + if isinstance(module, nn.Linear): + torch.nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + nn.init.constant_(module.bias, 0) + + self.apply(_basic_init) + + def forward(self, input_tensor): + x = self.input_transform(input_tensor) + + j = 0 + for i in range(len(self.time_blocks)): + B, N, T, _ = x.shape + x_time = rearrange(x, "b n t c -> (b n) t c", b=B, t=T, n=N) + x_time = self.time_blocks[i](x_time) + + x = rearrange(x_time, "(b n) t c -> b n t c ", b=B, t=T, n=N) + if self.add_space_attn and ( + i % (len(self.time_blocks) // len(self.space_blocks)) == 0 + ): + x_space = rearrange(x, "b n t c -> (b t) n c ", b=B, t=T, n=N) + x_space = self.space_blocks[j](x_space) + x = rearrange(x_space, "(b t) n c -> b n t c ", b=B, t=T, n=N) + j += 1 + + flow = self.flow_head(x) + return flow diff --git a/cotracker/models/core/cotracker/cotracker.py b/cotracker/models/core/cotracker/cotracker.py new file mode 100644 index 0000000..c9eca1f --- /dev/null +++ b/cotracker/models/core/cotracker/cotracker.py @@ -0,0 +1,351 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +import torch.nn as nn +from einops import rearrange + +from cotracker.models.core.cotracker.blocks import ( + BasicEncoder, + CorrBlock, + UpdateFormer, +) + +from cotracker.models.core.model_utils import meshgrid2d, bilinear_sample2d, smart_cat +from cotracker.models.core.embeddings import ( + get_2d_embedding, + get_1d_sincos_pos_embed_from_grid, + get_2d_sincos_pos_embed, +) + + +torch.manual_seed(0) + + +def get_points_on_a_grid(grid_size, interp_shape, grid_center=(0, 0)): + if grid_size == 1: + return torch.tensor([interp_shape[1] / 2, interp_shape[0] / 2])[ + None, None + ].cuda() + + grid_y, grid_x = meshgrid2d( + 1, grid_size, grid_size, stack=False, norm=False, device="cuda" + ) + step = interp_shape[1] // 64 + if grid_center[0] != 0 or grid_center[1] != 0: + grid_y = grid_y - grid_size / 2.0 + grid_x = grid_x - grid_size / 2.0 + grid_y = step + grid_y.reshape(1, -1) / float(grid_size - 1) * ( + interp_shape[0] - step * 2 + ) + grid_x = step + grid_x.reshape(1, -1) / float(grid_size - 1) * ( + interp_shape[1] - step * 2 + ) + + grid_y = grid_y + grid_center[0] + grid_x = grid_x + grid_center[1] + xy = torch.stack([grid_x, grid_y], dim=-1).cuda() + return xy + + +def sample_pos_embed(grid_size, embed_dim, coords): + pos_embed = get_2d_sincos_pos_embed(embed_dim=embed_dim, grid_size=grid_size) + pos_embed = ( + torch.from_numpy(pos_embed) + .reshape(grid_size[0], grid_size[1], embed_dim) + .float() + .unsqueeze(0) + .to(coords.device) + ) + sampled_pos_embed = bilinear_sample2d( + pos_embed.permute(0, 3, 1, 2), coords[:, 0, :, 0], coords[:, 0, :, 1] + ) + return sampled_pos_embed + + +class CoTracker(nn.Module): + def __init__( + self, + S=8, + stride=8, + add_space_attn=True, + num_heads=8, + hidden_size=384, + space_depth=12, + time_depth=12, + ): + super(CoTracker, self).__init__() + self.S = S + self.stride = stride + self.hidden_dim = 256 + self.latent_dim = latent_dim = 128 + self.corr_levels = 4 + self.corr_radius = 3 + self.add_space_attn = add_space_attn + self.fnet = BasicEncoder( + output_dim=self.latent_dim, norm_fn="instance", dropout=0, stride=stride + ) + + self.updateformer = UpdateFormer( + space_depth=space_depth, + time_depth=time_depth, + input_dim=456, + hidden_size=hidden_size, + num_heads=num_heads, + output_dim=latent_dim + 2, + mlp_ratio=4.0, + add_space_attn=add_space_attn, + ) + + self.norm = nn.GroupNorm(1, self.latent_dim) + self.ffeat_updater = nn.Sequential( + nn.Linear(self.latent_dim, self.latent_dim), + nn.GELU(), + ) + self.vis_predictor = nn.Sequential( + nn.Linear(self.latent_dim, 1), + ) + + def forward_iteration( + self, + fmaps, + coords_init, + feat_init=None, + vis_init=None, + track_mask=None, + iters=4, + ): + B, S_init, N, D = coords_init.shape + assert D == 2 + assert B == 1 + + B, S, __, H8, W8 = fmaps.shape + + device = fmaps.device + + if S_init < S: + coords = torch.cat( + [coords_init, coords_init[:, -1].repeat(1, S - S_init, 1, 1)], dim=1 + ) + vis_init = torch.cat( + [vis_init, vis_init[:, -1].repeat(1, S - S_init, 1, 1)], dim=1 + ) + else: + coords = coords_init.clone() + + fcorr_fn = CorrBlock( + fmaps, num_levels=self.corr_levels, radius=self.corr_radius + ) + + ffeats = feat_init.clone() + + times_ = torch.linspace(0, S - 1, S).reshape(1, S, 1) + + pos_embed = sample_pos_embed( + grid_size=(H8, W8), + embed_dim=456, + coords=coords, + ) + pos_embed = rearrange(pos_embed, "b e n -> (b n) e").unsqueeze(1) + times_embed = ( + torch.from_numpy(get_1d_sincos_pos_embed_from_grid(456, times_[0]))[None] + .repeat(B, 1, 1) + .float() + .to(device) + ) + coord_predictions = [] + + for __ in range(iters): + coords = coords.detach() + fcorr_fn.corr(ffeats) + + fcorrs = fcorr_fn.sample(coords) # B, S, N, LRR + LRR = fcorrs.shape[3] + + fcorrs_ = fcorrs.permute(0, 2, 1, 3).reshape(B * N, S, LRR) + flows_ = (coords - coords[:, 0:1]).permute(0, 2, 1, 3).reshape(B * N, S, 2) + + flows_cat = get_2d_embedding(flows_, 64, cat_coords=True) + ffeats_ = ffeats.permute(0, 2, 1, 3).reshape(B * N, S, self.latent_dim) + + if track_mask.shape[1] < vis_init.shape[1]: + track_mask = torch.cat( + [ + track_mask, + torch.zeros_like(track_mask[:, 0]).repeat( + 1, vis_init.shape[1] - track_mask.shape[1], 1, 1 + ), + ], + dim=1, + ) + concat = ( + torch.cat([track_mask, vis_init], dim=2) + .permute(0, 2, 1, 3) + .reshape(B * N, S, 2) + ) + + transformer_input = torch.cat([flows_cat, fcorrs_, ffeats_, concat], dim=2) + x = transformer_input + pos_embed + times_embed + + x = rearrange(x, "(b n) t d -> b n t d", b=B) + + delta = self.updateformer(x) + + delta = rearrange(delta, " b n t d -> (b n) t d") + + delta_coords_ = delta[:, :, :2] + delta_feats_ = delta[:, :, 2:] + + delta_feats_ = delta_feats_.reshape(B * N * S, self.latent_dim) + ffeats_ = ffeats.permute(0, 2, 1, 3).reshape(B * N * S, self.latent_dim) + + ffeats_ = self.ffeat_updater(self.norm(delta_feats_)) + ffeats_ + + ffeats = ffeats_.reshape(B, N, S, self.latent_dim).permute( + 0, 2, 1, 3 + ) # B,S,N,C + + coords = coords + delta_coords_.reshape(B, N, S, 2).permute(0, 2, 1, 3) + coord_predictions.append(coords * self.stride) + + vis_e = self.vis_predictor(ffeats.reshape(B * S * N, self.latent_dim)).reshape( + B, S, N + ) + return coord_predictions, vis_e, feat_init + + def forward(self, rgbs, queries, iters=4, feat_init=None, is_train=False): + B, T, C, H, W = rgbs.shape + B, N, __ = queries.shape + + device = rgbs.device + assert B == 1 + # INIT for the first sequence + # We want to sort points by the first frame they are visible to add them to the tensor of tracked points consequtively + first_positive_inds = queries[:, :, 0].long() + + __, sort_inds = torch.sort(first_positive_inds[0], dim=0, descending=False) + inv_sort_inds = torch.argsort(sort_inds, dim=0) + first_positive_sorted_inds = first_positive_inds[0][sort_inds] + + assert torch.allclose( + first_positive_inds[0], first_positive_inds[0][sort_inds][inv_sort_inds] + ) + + coords_init = queries[:, :, 1:].reshape(B, 1, N, 2).repeat( + 1, self.S, 1, 1 + ) / float(self.stride) + + rgbs = 2 * (rgbs / 255.0) - 1.0 + + traj_e = torch.zeros((B, T, N, 2), device=device) + vis_e = torch.zeros((B, T, N), device=device) + + ind_array = torch.arange(T, device=device) + ind_array = ind_array[None, :, None].repeat(B, 1, N) + + track_mask = (ind_array >= first_positive_inds[:, None, :]).unsqueeze(-1) + # these are logits, so we initialize visibility with something that would give a value close to 1 after softmax + vis_init = torch.ones((B, self.S, N, 1), device=device).float() * 10 + + ind = 0 + + track_mask_ = track_mask[:, :, sort_inds].clone() + coords_init_ = coords_init[:, :, sort_inds].clone() + vis_init_ = vis_init[:, :, sort_inds].clone() + + prev_wind_idx = 0 + fmaps_ = None + vis_predictions = [] + coord_predictions = [] + wind_inds = [] + while ind < T - self.S // 2: + rgbs_seq = rgbs[:, ind : ind + self.S] + + S = S_local = rgbs_seq.shape[1] + if S < self.S: + rgbs_seq = torch.cat( + [rgbs_seq, rgbs_seq[:, -1, None].repeat(1, self.S - S, 1, 1, 1)], + dim=1, + ) + S = rgbs_seq.shape[1] + rgbs_ = rgbs_seq.reshape(B * S, C, H, W) + + if fmaps_ is None: + fmaps_ = self.fnet(rgbs_) + else: + fmaps_ = torch.cat( + [fmaps_[self.S // 2 :], self.fnet(rgbs_[self.S // 2 :])], dim=0 + ) + fmaps = fmaps_.reshape( + B, S, self.latent_dim, H // self.stride, W // self.stride + ) + + curr_wind_points = torch.nonzero(first_positive_sorted_inds < ind + self.S) + if curr_wind_points.shape[0] == 0: + ind = ind + self.S // 2 + continue + wind_idx = curr_wind_points[-1] + 1 + + if wind_idx - prev_wind_idx > 0: + fmaps_sample = fmaps[ + :, first_positive_sorted_inds[prev_wind_idx:wind_idx] - ind + ] + + feat_init_ = bilinear_sample2d( + fmaps_sample, + coords_init_[:, 0, prev_wind_idx:wind_idx, 0], + coords_init_[:, 0, prev_wind_idx:wind_idx, 1], + ).permute(0, 2, 1) + + feat_init_ = feat_init_.unsqueeze(1).repeat(1, self.S, 1, 1) + feat_init = smart_cat(feat_init, feat_init_, dim=2) + + if prev_wind_idx > 0: + new_coords = coords[-1][:, self.S // 2 :] / float(self.stride) + + coords_init_[:, : self.S // 2, :prev_wind_idx] = new_coords + coords_init_[:, self.S // 2 :, :prev_wind_idx] = new_coords[ + :, -1 + ].repeat(1, self.S // 2, 1, 1) + + new_vis = vis[:, self.S // 2 :].unsqueeze(-1) + vis_init_[:, : self.S // 2, :prev_wind_idx] = new_vis + vis_init_[:, self.S // 2 :, :prev_wind_idx] = new_vis[:, -1].repeat( + 1, self.S // 2, 1, 1 + ) + + coords, vis, __ = self.forward_iteration( + fmaps=fmaps, + coords_init=coords_init_[:, :, :wind_idx], + feat_init=feat_init[:, :, :wind_idx], + vis_init=vis_init_[:, :, :wind_idx], + track_mask=track_mask_[:, ind : ind + self.S, :wind_idx], + iters=iters, + ) + if is_train: + vis_predictions.append(torch.sigmoid(vis[:, :S_local])) + coord_predictions.append([coord[:, :S_local] for coord in coords]) + wind_inds.append(wind_idx) + + traj_e[:, ind : ind + self.S, :wind_idx] = coords[-1][:, :S_local] + vis_e[:, ind : ind + self.S, :wind_idx] = vis[:, :S_local] + + track_mask_[:, : ind + self.S, :wind_idx] = 0.0 + ind = ind + self.S // 2 + + prev_wind_idx = wind_idx + + traj_e = traj_e[:, :, inv_sort_inds] + vis_e = vis_e[:, :, inv_sort_inds] + + vis_e = torch.sigmoid(vis_e) + + train_data = ( + (vis_predictions, coord_predictions, wind_inds, sort_inds) + if is_train + else None + ) + return traj_e, feat_init, vis_e, train_data diff --git a/cotracker/models/core/cotracker/losses.py b/cotracker/models/core/cotracker/losses.py new file mode 100644 index 0000000..2bdcc2e --- /dev/null +++ b/cotracker/models/core/cotracker/losses.py @@ -0,0 +1,61 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +import torch.nn.functional as F +from cotracker.models.core.model_utils import reduce_masked_mean + +EPS = 1e-6 + + +def balanced_ce_loss(pred, gt, valid=None): + total_balanced_loss = 0.0 + for j in range(len(gt)): + B, S, N = gt[j].shape + # pred and gt are the same shape + for (a, b) in zip(pred[j].size(), gt[j].size()): + assert a == b # some shape mismatch! + # if valid is not None: + for (a, b) in zip(pred[j].size(), valid[j].size()): + assert a == b # some shape mismatch! + + pos = (gt[j] > 0.95).float() + neg = (gt[j] < 0.05).float() + + label = pos * 2.0 - 1.0 + a = -label * pred[j] + b = F.relu(a) + loss = b + torch.log(torch.exp(-b) + torch.exp(a - b)) + + pos_loss = reduce_masked_mean(loss, pos * valid[j]) + neg_loss = reduce_masked_mean(loss, neg * valid[j]) + + balanced_loss = pos_loss + neg_loss + total_balanced_loss += balanced_loss / float(N) + return total_balanced_loss + + +def sequence_loss(flow_preds, flow_gt, vis, valids, gamma=0.8): + """Loss function defined over sequence of flow predictions""" + total_flow_loss = 0.0 + for j in range(len(flow_gt)): + B, S, N, D = flow_gt[j].shape + assert D == 2 + B, S1, N = vis[j].shape + B, S2, N = valids[j].shape + assert S == S1 + assert S == S2 + n_predictions = len(flow_preds[j]) + flow_loss = 0.0 + for i in range(n_predictions): + i_weight = gamma ** (n_predictions - i - 1) + flow_pred = flow_preds[j][i] + i_loss = (flow_pred - flow_gt[j]).abs() # B, S, N, 2 + i_loss = torch.mean(i_loss, dim=3) # B, S, N + flow_loss += i_weight * reduce_masked_mean(i_loss, valids[j]) + flow_loss = flow_loss / n_predictions + total_flow_loss += flow_loss / float(N) + return total_flow_loss diff --git a/cotracker/models/core/embeddings.py b/cotracker/models/core/embeddings.py new file mode 100644 index 0000000..574ed15 --- /dev/null +++ b/cotracker/models/core/embeddings.py @@ -0,0 +1,154 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +import numpy as np + + +def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False, extra_tokens=0): + """ + grid_size: int of the grid height and width + return: + pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token) + """ + if isinstance(grid_size, tuple): + grid_size_h, grid_size_w = grid_size + else: + grid_size_h = grid_size_w = grid_size + grid_h = np.arange(grid_size_h, dtype=np.float32) + grid_w = np.arange(grid_size_w, dtype=np.float32) + grid = np.meshgrid(grid_w, grid_h) # here w goes first + grid = np.stack(grid, axis=0) + + grid = grid.reshape([2, 1, grid_size_h, grid_size_w]) + pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid) + if cls_token and extra_tokens > 0: + pos_embed = np.concatenate( + [np.zeros([extra_tokens, embed_dim]), pos_embed], axis=0 + ) + return pos_embed + + +def get_2d_sincos_pos_embed_from_grid(embed_dim, grid): + assert embed_dim % 2 == 0 + + # use half of dimensions to encode grid_h + emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2) + emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2) + + emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D) + return emb + + +def get_1d_sincos_pos_embed_from_grid(embed_dim, pos): + """ + embed_dim: output dimension for each position + pos: a list of positions to be encoded: size (M,) + out: (M, D) + """ + assert embed_dim % 2 == 0 + omega = np.arange(embed_dim // 2, dtype=np.float64) + omega /= embed_dim / 2.0 + omega = 1.0 / 10000 ** omega # (D/2,) + + pos = pos.reshape(-1) # (M,) + out = np.einsum("m,d->md", pos, omega) # (M, D/2), outer product + + emb_sin = np.sin(out) # (M, D/2) + emb_cos = np.cos(out) # (M, D/2) + + emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D) + return emb + + +def get_2d_embedding(xy, C, cat_coords=True): + B, N, D = xy.shape + assert D == 2 + + x = xy[:, :, 0:1] + y = xy[:, :, 1:2] + div_term = ( + torch.arange(0, C, 2, device=xy.device, dtype=torch.float32) * (1000.0 / C) + ).reshape(1, 1, int(C / 2)) + + pe_x = torch.zeros(B, N, C, device=xy.device, dtype=torch.float32) + pe_y = torch.zeros(B, N, C, device=xy.device, dtype=torch.float32) + + pe_x[:, :, 0::2] = torch.sin(x * div_term) + pe_x[:, :, 1::2] = torch.cos(x * div_term) + + pe_y[:, :, 0::2] = torch.sin(y * div_term) + pe_y[:, :, 1::2] = torch.cos(y * div_term) + + pe = torch.cat([pe_x, pe_y], dim=2) # B, N, C*3 + if cat_coords: + pe = torch.cat([xy, pe], dim=2) # B, N, C*3+3 + return pe + + +def get_3d_embedding(xyz, C, cat_coords=True): + B, N, D = xyz.shape + assert D == 3 + + x = xyz[:, :, 0:1] + y = xyz[:, :, 1:2] + z = xyz[:, :, 2:3] + div_term = ( + torch.arange(0, C, 2, device=xyz.device, dtype=torch.float32) * (1000.0 / C) + ).reshape(1, 1, int(C / 2)) + + pe_x = torch.zeros(B, N, C, device=xyz.device, dtype=torch.float32) + pe_y = torch.zeros(B, N, C, device=xyz.device, dtype=torch.float32) + pe_z = torch.zeros(B, N, C, device=xyz.device, dtype=torch.float32) + + pe_x[:, :, 0::2] = torch.sin(x * div_term) + pe_x[:, :, 1::2] = torch.cos(x * div_term) + + pe_y[:, :, 0::2] = torch.sin(y * div_term) + pe_y[:, :, 1::2] = torch.cos(y * div_term) + + pe_z[:, :, 0::2] = torch.sin(z * div_term) + pe_z[:, :, 1::2] = torch.cos(z * div_term) + + pe = torch.cat([pe_x, pe_y, pe_z], dim=2) # B, N, C*3 + if cat_coords: + pe = torch.cat([pe, xyz], dim=2) # B, N, C*3+3 + return pe + + +def get_4d_embedding(xyzw, C, cat_coords=True): + B, N, D = xyzw.shape + assert D == 4 + + x = xyzw[:, :, 0:1] + y = xyzw[:, :, 1:2] + z = xyzw[:, :, 2:3] + w = xyzw[:, :, 3:4] + div_term = ( + torch.arange(0, C, 2, device=xyzw.device, dtype=torch.float32) * (1000.0 / C) + ).reshape(1, 1, int(C / 2)) + + pe_x = torch.zeros(B, N, C, device=xyzw.device, dtype=torch.float32) + pe_y = torch.zeros(B, N, C, device=xyzw.device, dtype=torch.float32) + pe_z = torch.zeros(B, N, C, device=xyzw.device, dtype=torch.float32) + pe_w = torch.zeros(B, N, C, device=xyzw.device, dtype=torch.float32) + + pe_x[:, :, 0::2] = torch.sin(x * div_term) + pe_x[:, :, 1::2] = torch.cos(x * div_term) + + pe_y[:, :, 0::2] = torch.sin(y * div_term) + pe_y[:, :, 1::2] = torch.cos(y * div_term) + + pe_z[:, :, 0::2] = torch.sin(z * div_term) + pe_z[:, :, 1::2] = torch.cos(z * div_term) + + pe_w[:, :, 0::2] = torch.sin(w * div_term) + pe_w[:, :, 1::2] = torch.cos(w * div_term) + + pe = torch.cat([pe_x, pe_y, pe_z, pe_w], dim=2) # B, N, C*3 + if cat_coords: + pe = torch.cat([pe, xyzw], dim=2) # B, N, C*3+3 + return pe diff --git a/cotracker/models/core/model_utils.py b/cotracker/models/core/model_utils.py new file mode 100644 index 0000000..2f38006 --- /dev/null +++ b/cotracker/models/core/model_utils.py @@ -0,0 +1,169 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch + +EPS = 1e-6 + + +def smart_cat(tensor1, tensor2, dim): + if tensor1 is None: + return tensor2 + return torch.cat([tensor1, tensor2], dim=dim) + + +def normalize_single(d): + # d is a whatever shape torch tensor + dmin = torch.min(d) + dmax = torch.max(d) + d = (d - dmin) / (EPS + (dmax - dmin)) + return d + + +def normalize(d): + # d is B x whatever. normalize within each element of the batch + out = torch.zeros(d.size()) + if d.is_cuda: + out = out.cuda() + B = list(d.size())[0] + for b in list(range(B)): + out[b] = normalize_single(d[b]) + return out + + +def meshgrid2d(B, Y, X, stack=False, norm=False, device="cuda"): + # returns a meshgrid sized B x Y x X + + grid_y = torch.linspace(0.0, Y - 1, Y, device=torch.device(device)) + grid_y = torch.reshape(grid_y, [1, Y, 1]) + grid_y = grid_y.repeat(B, 1, X) + + grid_x = torch.linspace(0.0, X - 1, X, device=torch.device(device)) + grid_x = torch.reshape(grid_x, [1, 1, X]) + grid_x = grid_x.repeat(B, Y, 1) + + if stack: + # note we stack in xy order + # (see https://pytorch.org/docs/stable/nn.functional.html#torch.nn.functional.grid_sample) + grid = torch.stack([grid_x, grid_y], dim=-1) + return grid + else: + return grid_y, grid_x + + +def reduce_masked_mean(x, mask, dim=None, keepdim=False): + # x and mask are the same shape, or at least broadcastably so < actually it's safer if you disallow broadcasting + # returns shape-1 + # axis can be a list of axes + for (a, b) in zip(x.size(), mask.size()): + assert a == b # some shape mismatch! + prod = x * mask + if dim is None: + numer = torch.sum(prod) + denom = EPS + torch.sum(mask) + else: + numer = torch.sum(prod, dim=dim, keepdim=keepdim) + denom = EPS + torch.sum(mask, dim=dim, keepdim=keepdim) + + mean = numer / denom + return mean + + +def bilinear_sample2d(im, x, y, return_inbounds=False): + # x and y are each B, N + # output is B, C, N + if len(im.shape) == 5: + B, N, C, H, W = list(im.shape) + else: + B, C, H, W = list(im.shape) + N = list(x.shape)[1] + + x = x.float() + y = y.float() + H_f = torch.tensor(H, dtype=torch.float32) + W_f = torch.tensor(W, dtype=torch.float32) + + # inbound_mask = (x>-0.5).float()*(y>-0.5).float()*(x -0.5).byte() & (x < float(W_f - 0.5)).byte() + y_valid = (y > -0.5).byte() & (y < float(H_f - 0.5)).byte() + inbounds = (x_valid & y_valid).float() + inbounds = inbounds.reshape( + B, N + ) # something seems wrong here for B>1; i'm getting an error here (or downstream if i put -1) + return output, inbounds + + return output # B, C, N diff --git a/cotracker/models/evaluation_predictor.py b/cotracker/models/evaluation_predictor.py new file mode 100644 index 0000000..b50233a --- /dev/null +++ b/cotracker/models/evaluation_predictor.py @@ -0,0 +1,103 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +import torch.nn.functional as F +from typing import Tuple + +from cotracker.models.core.cotracker.cotracker import CoTracker, get_points_on_a_grid + + +class EvaluationPredictor(torch.nn.Module): + def __init__( + self, + cotracker_model: CoTracker, + interp_shape: Tuple[int, int] = (384, 512), + grid_size: int = 6, + local_grid_size: int = 6, + single_point: bool = True, + n_iters: int = 6, + ) -> None: + super(EvaluationPredictor, self).__init__() + self.grid_size = grid_size + self.local_grid_size = local_grid_size + self.single_point = single_point + self.interp_shape = interp_shape + self.n_iters = n_iters + + self.model = cotracker_model + self.model.to("cuda") + self.model.eval() + + def forward(self, video, queries): + queries = queries.clone().cuda() + B, T, C, H, W = video.shape + B, N, D = queries.shape + + assert D == 3 + assert B == 1 + + rgbs = video.reshape(B * T, C, H, W) + rgbs = F.interpolate(rgbs, tuple(self.interp_shape), mode="bilinear") + rgbs = rgbs.reshape(B, T, 3, self.interp_shape[0], self.interp_shape[1]).cuda() + + queries[:, :, 1] *= self.interp_shape[1] / W + queries[:, :, 2] *= self.interp_shape[0] / H + + if self.single_point: + traj_e = torch.zeros((B, T, N, 2)).cuda() + vis_e = torch.zeros((B, T, N)).cuda() + for pind in range((N)): + query = queries[:, pind : pind + 1] + + t = query[0, 0, 0].long() + + traj_e_pind, vis_e_pind = self._process_one_point(rgbs, query) + traj_e[:, t:, pind : pind + 1] = traj_e_pind[:, :, :1] + vis_e[:, t:, pind : pind + 1] = vis_e_pind[:, :, :1] + else: + if self.grid_size > 0: + xy = get_points_on_a_grid(self.grid_size, rgbs.shape[3:]) + xy = torch.cat([torch.zeros_like(xy[:, :, :1]), xy], dim=2).cuda() # + queries = torch.cat([queries, xy], dim=1) # + + traj_e, __, vis_e, __ = self.model( + rgbs=rgbs, + queries=queries, + iters=self.n_iters, + ) + + traj_e[:, :, :, 0] *= W / float(self.interp_shape[1]) + traj_e[:, :, :, 1] *= H / float(self.interp_shape[0]) + return traj_e, vis_e + + def _process_one_point(self, rgbs, query): + t = query[0, 0, 0].long() + + device = rgbs.device + if self.local_grid_size > 0: + xy_target = get_points_on_a_grid( + self.local_grid_size, + (50, 50), + [query[0, 0, 2], query[0, 0, 1]], + ) + + xy_target = torch.cat( + [torch.zeros_like(xy_target[:, :, :1]), xy_target], dim=2 + ) # + query = torch.cat([query, xy_target], dim=1).to(device) # + + if self.grid_size > 0: + xy = get_points_on_a_grid(self.grid_size, rgbs.shape[3:]) + xy = torch.cat([torch.zeros_like(xy[:, :, :1]), xy], dim=2).cuda() # + query = torch.cat([query, xy], dim=1).to(device) # + # crop the video to start from the queried frame + query[0, 0, 0] = 0 + traj_e_pind, __, vis_e_pind, __ = self.model( + rgbs=rgbs[:, t:], queries=query, iters=self.n_iters + ) + + return traj_e_pind, vis_e_pind diff --git a/cotracker/predictor.py b/cotracker/predictor.py new file mode 100644 index 0000000..48f3337 --- /dev/null +++ b/cotracker/predictor.py @@ -0,0 +1,178 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +import torch.nn.functional as F + +from tqdm import tqdm +from cotracker.models.core.cotracker.cotracker import get_points_on_a_grid +from cotracker.models.core.model_utils import smart_cat +from cotracker.models.build_cotracker import ( + build_cotracker, +) + + +class CoTrackerPredictor(torch.nn.Module): + def __init__( + self, checkpoint="cotracker/checkpoints/cotracker_stride_4_wind_8.pth" + ): + super().__init__() + self.interp_shape = (384, 512) + self.support_grid_size = 6 + model = build_cotracker(checkpoint) + + self.model = model + self.model.to("cuda") + self.model.eval() + + @torch.no_grad() + def forward( + self, + video, # (1, T, 3, H, W) + # input prompt types: + # - None. Dense tracks are computed in this case. You can adjust *query_frame* to compute tracks starting from a specific frame. + # *backward_tracking=True* will compute tracks in both directions. + # - queries. Queried points of shape (1, N, 3) in format (t, x, y) for frame index and pixel coordinates. + # - grid_size. Grid of N*N points from the first frame. if segm_mask is provided, then computed only for the mask. + # You can adjust *query_frame* and *backward_tracking* for the regular grid in the same way as for dense tracks. + queries: torch.Tensor = None, + segm_mask: torch.Tensor = None, # Segmentation mask of shape (B, 1, H, W) + grid_size: int = 0, + grid_query_frame: int = 0, # only for dense and regular grid tracks + backward_tracking: bool = False, + ): + + if queries is None and grid_size == 0: + tracks, visibilities = self._compute_dense_tracks( + video, + grid_query_frame=grid_query_frame, + backward_tracking=backward_tracking, + ) + else: + tracks, visibilities = self._compute_sparse_tracks( + video, + queries, + segm_mask, + grid_size, + add_support_grid=(grid_size == 0 or segm_mask is not None), + grid_query_frame=grid_query_frame, + backward_tracking=backward_tracking, + ) + + return tracks, visibilities + + def _compute_dense_tracks( + self, video, grid_query_frame, grid_size=50, backward_tracking=False + ): + *_, H, W = video.shape + grid_step = W // grid_size + grid_width = W // grid_step + grid_height = H // grid_step + tracks = visibilities = None + grid_pts = torch.zeros((1, grid_width * grid_height, 3)).to("cuda") + grid_pts[0, :, 0] = grid_query_frame + for offset in tqdm(range(grid_step * grid_step)): + ox = offset % grid_step + oy = offset // grid_step + grid_pts[0, :, 1] = ( + torch.arange(grid_width).repeat(grid_height) * grid_step + ox + ) + grid_pts[0, :, 2] = ( + torch.arange(grid_height).repeat_interleave(grid_width) * grid_step + oy + ) + tracks_step, visibilities_step = self._compute_sparse_tracks( + video=video, + queries=grid_pts, + backward_tracking=backward_tracking, + ) + tracks = smart_cat(tracks, tracks_step, dim=2) + visibilities = smart_cat(visibilities, visibilities_step, dim=2) + + return tracks, visibilities + + def _compute_sparse_tracks( + self, + video, + queries, + segm_mask=None, + grid_size=0, + add_support_grid=False, + grid_query_frame=0, + backward_tracking=False, + ): + B, T, C, H, W = video.shape + assert B == 1 + + video = video.reshape(B * T, C, H, W) + video = F.interpolate(video, tuple(self.interp_shape), mode="bilinear").cuda() + video = video.reshape( + B, T, 3, self.interp_shape[0], self.interp_shape[1] + ).cuda() + + if queries is not None: + queries = queries.clone() + B, N, D = queries.shape + assert D == 3 + queries[:, :, 1] *= self.interp_shape[1] / W + queries[:, :, 2] *= self.interp_shape[0] / H + elif grid_size > 0: + grid_pts = get_points_on_a_grid(grid_size, self.interp_shape) + if segm_mask is not None: + segm_mask = F.interpolate( + segm_mask, tuple(self.interp_shape), mode="nearest" + ) + point_mask = segm_mask[0, 0][ + (grid_pts[0, :, 1]).round().long().cpu(), + (grid_pts[0, :, 0]).round().long().cpu(), + ].bool() + grid_pts = grid_pts[:, point_mask] + + queries = torch.cat( + [torch.ones_like(grid_pts[:, :, :1]) * grid_query_frame, grid_pts], + dim=2, + ) + + if add_support_grid: + grid_pts = get_points_on_a_grid(self.support_grid_size, self.interp_shape) + grid_pts = torch.cat( + [torch.zeros_like(grid_pts[:, :, :1]), grid_pts], dim=2 + ) + queries = torch.cat([queries, grid_pts], dim=1) + + tracks, __, visibilities, __ = self.model(rgbs=video, queries=queries, iters=6) + + if backward_tracking: + tracks, visibilities = self._compute_backward_tracks( + video, queries, tracks, visibilities + ) + if add_support_grid: + queries[:, -self.support_grid_size ** 2 :, 0] = T - 1 + if add_support_grid: + tracks = tracks[:, :, : -self.support_grid_size ** 2] + visibilities = visibilities[:, :, : -self.support_grid_size ** 2] + thr = 0.9 + visibilities = visibilities > thr + tracks[:, :, :, 0] *= W / float(self.interp_shape[1]) + tracks[:, :, :, 1] *= H / float(self.interp_shape[0]) + return tracks, visibilities + + def _compute_backward_tracks(self, video, queries, tracks, visibilities): + inv_video = video.flip(1).clone() + inv_queries = queries.clone() + inv_queries[:, :, 0] = inv_video.shape[1] - inv_queries[:, :, 0] - 1 + + inv_tracks, __, inv_visibilities, __ = self.model( + rgbs=inv_video, queries=inv_queries, iters=6 + ) + + inv_tracks = inv_tracks.flip(1) + inv_visibilities = inv_visibilities.flip(1) + + mask = tracks == 0 + + tracks[mask] = inv_tracks[mask] + visibilities[mask[:, :, :, 0]] = inv_visibilities[mask[:, :, :, 0]] + return tracks, visibilities diff --git a/cotracker/utils/__init__.py b/cotracker/utils/__init__.py new file mode 100644 index 0000000..5277f46 --- /dev/null +++ b/cotracker/utils/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/cotracker/utils/visualizer.py b/cotracker/utils/visualizer.py new file mode 100644 index 0000000..bfb0636 --- /dev/null +++ b/cotracker/utils/visualizer.py @@ -0,0 +1,291 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import os +import numpy as np +import cv2 +import torch +import flow_vis + +from matplotlib import cm +import torch.nn.functional as F +import torchvision.transforms as transforms +from moviepy.editor import ImageSequenceClip +from torch.utils.tensorboard import SummaryWriter +import matplotlib.pyplot as plt + + +class Visualizer: + def __init__( + self, + save_dir: str = "./results", + grayscale: bool = False, + pad_value: int = 0, + fps: int = 10, + mode: str = "rainbow", # 'cool', 'optical_flow' + linewidth: int = 2, + show_first_frame: int = 10, + tracks_leave_trace: int = 0, # -1 for infinite + ): + self.mode = mode + self.save_dir = save_dir + if mode == "rainbow": + self.color_map = cm.get_cmap("gist_rainbow") + elif mode == "cool": + self.color_map = cm.get_cmap(mode) + self.show_first_frame = show_first_frame + self.grayscale = grayscale + self.tracks_leave_trace = tracks_leave_trace + self.pad_value = pad_value + self.linewidth = linewidth + self.fps = fps + + def visualize( + self, + video: torch.Tensor, # (B,T,C,H,W) + tracks: torch.Tensor, # (B,T,N,2) + gt_tracks: torch.Tensor = None, # (B,T,N,2) + segm_mask: torch.Tensor = None, # (B,1,H,W) + filename: str = "video", + writer: SummaryWriter = None, + step: int = 0, + query_frame: int = 0, + save_video: bool = True, + compensate_for_camera_motion: bool = False, + ): + if compensate_for_camera_motion: + assert segm_mask is not None + if segm_mask is not None: + coords = tracks[0, query_frame].round().long() + segm_mask = segm_mask[0, query_frame][coords[:, 1], coords[:, 0]].long() + + video = F.pad( + video, + (self.pad_value, self.pad_value, self.pad_value, self.pad_value), + "constant", + 255, + ) + tracks = tracks + self.pad_value + + if self.grayscale: + transform = transforms.Grayscale() + video = transform(video) + video = video.repeat(1, 1, 3, 1, 1) + + res_video = self.draw_tracks_on_video( + video=video, + tracks=tracks, + segm_mask=segm_mask, + gt_tracks=gt_tracks, + query_frame=query_frame, + compensate_for_camera_motion=compensate_for_camera_motion, + ) + if save_video: + self.save_video(res_video, filename=filename, writer=writer, step=step) + return res_video + + def save_video(self, video, filename, writer=None, step=0): + if writer is not None: + writer.add_video( + f"{filename}_pred_track", + video.to(torch.uint8), + global_step=step, + fps=self.fps, + ) + else: + os.makedirs(self.save_dir, exist_ok=True) + wide_list = list(video.unbind(1)) + wide_list = [wide[0].permute(1, 2, 0).cpu().numpy() for wide in wide_list] + clip = ImageSequenceClip(wide_list[2:-1], fps=self.fps) + + # Write the video file + save_path = os.path.join(self.save_dir, f"{filename}_pred_track.mp4") + clip.write_videofile(save_path, codec="libx264", fps=self.fps, logger=None) + + print(f"Video saved to {save_path}") + + def draw_tracks_on_video( + self, + video: torch.Tensor, + tracks: torch.Tensor, + segm_mask: torch.Tensor = None, + gt_tracks=None, + query_frame: int = 0, + compensate_for_camera_motion=False, + ): + B, T, C, H, W = video.shape + _, _, N, D = tracks.shape + + assert D == 2 + assert C == 3 + video = video[0].permute(0, 2, 3, 1).byte().detach().cpu().numpy() # S, H, W, C + tracks = tracks[0].long().detach().cpu().numpy() # S, N, 2 + if gt_tracks is not None: + gt_tracks = gt_tracks[0].detach().cpu().numpy() + + res_video = [] + + # process input video + for rgb in video: + res_video.append(rgb.copy()) + + vector_colors = np.zeros((T, N, 3)) + if self.mode == "optical_flow": + vector_colors = flow_vis.flow_to_color(tracks - tracks[query_frame][None]) + elif segm_mask is None: + if self.mode == "rainbow": + y_min, y_max = ( + tracks[query_frame, :, 1].min(), + tracks[query_frame, :, 1].max(), + ) + norm = plt.Normalize(y_min, y_max) + for n in range(N): + color = self.color_map(norm(tracks[query_frame, n, 1])) + color = np.array(color[:3])[None] * 255 + vector_colors[:, n] = np.repeat(color, T, axis=0) + else: + # color changes with time + for t in range(T): + color = np.array(self.color_map(t / T)[:3])[None] * 255 + vector_colors[t] = np.repeat(color, N, axis=0) + else: + if self.mode == "rainbow": + vector_colors[:, segm_mask <= 0, :] = 255 + + y_min, y_max = ( + tracks[0, segm_mask > 0, 1].min(), + tracks[0, segm_mask > 0, 1].max(), + ) + norm = plt.Normalize(y_min, y_max) + for n in range(N): + if segm_mask[n] > 0: + color = self.color_map(norm(tracks[0, n, 1])) + color = np.array(color[:3])[None] * 255 + vector_colors[:, n] = np.repeat(color, T, axis=0) + + else: + # color changes with segm class + segm_mask = segm_mask.cpu() + color = np.zeros((segm_mask.shape[0], 3), dtype=np.float32) + color[segm_mask > 0] = np.array(self.color_map(1.0)[:3]) * 255.0 + color[segm_mask <= 0] = np.array(self.color_map(0.0)[:3]) * 255.0 + vector_colors = np.repeat(color[None], T, axis=0) + + # draw tracks + if self.tracks_leave_trace != 0: + for t in range(1, T): + first_ind = ( + max(0, t - self.tracks_leave_trace) + if self.tracks_leave_trace >= 0 + else 0 + ) + curr_tracks = tracks[first_ind : t + 1] + curr_colors = vector_colors[first_ind : t + 1] + if compensate_for_camera_motion: + diff = ( + tracks[first_ind : t + 1, segm_mask <= 0] + - tracks[t : t + 1, segm_mask <= 0] + ).mean(1)[:, None] + + curr_tracks = curr_tracks - diff + curr_tracks = curr_tracks[:, segm_mask > 0] + curr_colors = curr_colors[:, segm_mask > 0] + + res_video[t] = self._draw_pred_tracks( + res_video[t], + curr_tracks, + curr_colors, + ) + if gt_tracks is not None: + res_video[t] = self._draw_gt_tracks( + res_video[t], gt_tracks[first_ind : t + 1] + ) + + # draw points + for t in range(T): + for i in range(N): + coord = (tracks[t, i, 0], tracks[t, i, 1]) + if coord[0] != 0 and coord[1] != 0: + if not compensate_for_camera_motion or ( + compensate_for_camera_motion and segm_mask[i] > 0 + ): + cv2.circle( + res_video[t], + coord, + int(self.linewidth * 2), + vector_colors[t, i].tolist(), + -1, + ) + + # construct the final rgb sequence + if self.show_first_frame > 0: + res_video = [res_video[0]] * self.show_first_frame + res_video[1:] + return torch.from_numpy(np.stack(res_video)).permute(0, 3, 1, 2)[None].byte() + + def _draw_pred_tracks( + self, + rgb: np.ndarray, # H x W x 3 + tracks: np.ndarray, # T x 2 + vector_colors: np.ndarray, + alpha: float = 0.5, + ): + T, N, _ = tracks.shape + + for s in range(T - 1): + vector_color = vector_colors[s] + original = rgb.copy() + alpha = (s / T) ** 2 + for i in range(N): + coord_y = (int(tracks[s, i, 0]), int(tracks[s, i, 1])) + coord_x = (int(tracks[s + 1, i, 0]), int(tracks[s + 1, i, 1])) + if coord_y[0] != 0 and coord_y[1] != 0: + cv2.line( + rgb, + coord_y, + coord_x, + vector_color[i].tolist(), + self.linewidth, + cv2.LINE_AA, + ) + if self.tracks_leave_trace > 0: + rgb = cv2.addWeighted(rgb, alpha, original, 1 - alpha, 0) + return rgb + + def _draw_gt_tracks( + self, + rgb: np.ndarray, # H x W x 3, + gt_tracks: np.ndarray, # T x 2 + ): + T, N, _ = gt_tracks.shape + color = np.array((211.0, 0.0, 0.0)) + + for t in range(T): + for i in range(N): + gt_tracks = gt_tracks[t][i] + # draw a red cross + if gt_tracks[0] > 0 and gt_tracks[1] > 0: + length = self.linewidth * 3 + coord_y = (int(gt_tracks[0]) + length, int(gt_tracks[1]) + length) + coord_x = (int(gt_tracks[0]) - length, int(gt_tracks[1]) - length) + cv2.line( + rgb, + coord_y, + coord_x, + color, + self.linewidth, + cv2.LINE_AA, + ) + coord_y = (int(gt_tracks[0]) - length, int(gt_tracks[1]) + length) + coord_x = (int(gt_tracks[0]) + length, int(gt_tracks[1]) - length) + cv2.line( + rgb, + coord_y, + coord_x, + color, + self.linewidth, + cv2.LINE_AA, + ) + return rgb diff --git a/demo.py b/demo.py new file mode 100644 index 0000000..660ae01 --- /dev/null +++ b/demo.py @@ -0,0 +1,71 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import os +import torch +import argparse +import numpy as np + +from torchvision.io import read_video +from PIL import Image +from cotracker.utils.visualizer import Visualizer +from cotracker.predictor import CoTrackerPredictor + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--video_path", + default="./assets/apple.mp4", + help="path to a video", + ) + parser.add_argument( + "--mask_path", + default="./assets/apple_mask.png", + help="path to a segmentation mask", + ) + parser.add_argument( + "--checkpoint", + default="./checkpoints/cotracker_stride_4_wind_8.pth", + help="cotracker model", + ) + parser.add_argument("--grid_size", type=int, default=0, help="Regular grid size") + parser.add_argument( + "--grid_query_frame", + type=int, + default=0, + help="Compute dense and grid tracks starting from this frame ", + ) + + parser.add_argument( + "--backward_tracking", + action="store_true", + help="Compute tracks in both directions, not only forward", + ) + + args = parser.parse_args() + + # load the input video frame by frame + video = read_video(args.video_path) + video = video[0].permute(0, 3, 1, 2)[None].float() + segm_mask = np.array(Image.open(os.path.join(args.mask_path))) + segm_mask = torch.from_numpy(segm_mask)[None, None] + + model = CoTrackerPredictor(checkpoint=args.checkpoint) + + pred_tracks, pred_visibility = model( + video, + grid_size=args.grid_size, + grid_query_frame=args.grid_query_frame, + backward_tracking=args.backward_tracking, + # segm_mask=segm_mask + ) + print("computed") + + # save a video with predicted tracks + seq_name = args.video_path.split("/")[-1] + vis = Visualizer(save_dir="./saved_videos", pad_value=120, linewidth=3) + vis.visualize(video, pred_tracks, query_frame=args.grid_query_frame) diff --git a/notebooks/demo.ipynb b/notebooks/demo.ipynb new file mode 100644 index 0000000..1741e5f --- /dev/null +++ b/notebooks/demo.ipynb @@ -0,0 +1,924 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "60a7e08e-93c6-4370-9778-3bb102dce78b", + "metadata": {}, + "source": [ + "Copyright (c) Meta Platforms, Inc. and affiliates." + ] + }, + { + "cell_type": "markdown", + "id": "3081cd8f-f6f9-4a1a-8c36-8a857b0c3b03", + "metadata": {}, + "source": [ + "\n", + " \"Open\n", + "" + ] + }, + { + "cell_type": "markdown", + "id": "f9f3240f-0354-4802-b8b5-9070930fc957", + "metadata": {}, + "source": [ + "# CoTracker: It is Better to Track Together\n", + "This is a demo for CoTracker, a model that can track any point in a video." + ] + }, + { + "cell_type": "markdown", + "id": "36ff1fd0-572e-47fb-8221-1e73ac17cfd1", + "metadata": {}, + "source": [ + "\"Logo\"" + ] + }, + { + "cell_type": "markdown", + "id": "6757bfa3-d663-4a54-9722-3e1a7da3307c", + "metadata": {}, + "source": [ + "Let's install dependencies:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "1745a859-71d4-4ec3-8ef3-027cabe786d4", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import cv2\n", + "import torch\n", + "\n", + "from torchvision.io import read_video\n", + "from cotracker.utils.visualizer import Visualizer\n", + "from IPython.display import HTML" + ] + }, + { + "cell_type": "markdown", + "id": "7894bd2d-2099-46fa-8286-f0c56298ecd1", + "metadata": {}, + "source": [ + "Read a video from CO3D:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "f1f9ca4d-951e-49d2-8844-91f7bcadfecd", + "metadata": {}, + "outputs": [], + "source": [ + "video = read_video('../assets/apple.mp4')[0]\n", + "video = video.permute(0, 3, 1, 2)[None].float()" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "fb4c2e9d-0e85-4c10-81a2-827d0759bf87", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "HTML(\"\"\"\"\"\")" + ] + }, + { + "cell_type": "markdown", + "id": "6f89ae18-54d0-4384-8a79-ca9247f5f31a", + "metadata": {}, + "source": [ + "Import CoTrackerPredictor and create an instance of it. We'll use this object to estimate tracks:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "d59ac40b-bde8-46d4-bd57-4ead939f22ca", + "metadata": {}, + "outputs": [], + "source": [ + "from cotracker.predictor import CoTrackerPredictor\n", + "\n", + "model = CoTrackerPredictor(\n", + " checkpoint=os.path.join(\n", + " '../checkpoints/cotracker_stride_4_wind_8.pth'\n", + " )\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "e8398155-6dae-4ff0-95f3-dbb52ac70d20", + "metadata": {}, + "source": [ + "Track points sampled on a regular grid of size 30\\*30 on the first frame:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "17fcaae9-7b3c-474c-977a-cce08a09d580", + "metadata": {}, + "outputs": [], + "source": [ + "pred_tracks, pred_visibility = model(video, grid_size=30)" + ] + }, + { + "cell_type": "markdown", + "id": "50a58521-a9ba-4f8b-be02-cfdaf79613a2", + "metadata": {}, + "source": [ + "Visualize and save the result: " + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "7e793ce0-7b77-46ca-a629-155a6a146000", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Video saved to ./videos/teaser_pred_track.mp4\n" + ] + } + ], + "source": [ + "vis = Visualizer(save_dir='./videos', pad_value=100)\n", + "vis.visualize(video=video, tracks=pred_tracks, filename='teaser');" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "2d0733ba-8fe1-4cd4-b963-2085202fba13", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "HTML(\"\"\"\"\"\")" + ] + }, + { + "cell_type": "markdown", + "id": "73d88a5f-057c-4b9f-828d-ee0b97d1e72f", + "metadata": {}, + "source": [ + "## Tracking manually selected points" + ] + }, + { + "cell_type": "markdown", + "id": "a75bca85-b872-4f4e-be19-ff16f0984037", + "metadata": { + "jp-MarkdownHeadingCollapsed": true, + "tags": [] + }, + "source": [ + "We will start by tracking points queried manually.\n", + "We define a queried point as: [time, x coord, y coord] \n", + "\n", + "So, the code below defines points with different x and y coordinates sampled on frames 0, 10, 20, and 30:" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "c6422e7c-8c6f-4269-92c3-245344afe35b", + "metadata": {}, + "outputs": [], + "source": [ + "queries = torch.tensor([\n", + " [0., 400., 350.], # point tracked from the first frame\n", + " [10., 600., 500.], # frame number 10\n", + " [20., 750., 600.], # ...\n", + " [30., 900., 200.]\n", + "]).cuda()" + ] + }, + { + "cell_type": "markdown", + "id": "13697a2a-7304-4d18-93be-bfbebf3dc12a", + "metadata": {}, + "source": [ + "That's what our queried points look like:" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "d7141079-d7e0-40b3-b031-a28879c4bd6d", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAoEAAAHVCAYAAACOpCHEAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAA9hAAAPYQGoP6dpAABHHElEQVR4nO3de3hU5b33/88kIUMwTEKCOUGCQTkUQYsgMSpYH/OAlHooiGxKlaIVtcFCUR6MW6HSrbC11lMV7b7cYDcqigWtbLRXBESt4SgoJyO4gUTMQcFMQMn5+/uDX9ZmTERCDpPJer+ua10k933PzH27kq+frFlrjcfMTAAAAHCVsGBPAAAAAG2PEAgAAOBChEAAAAAXIgQCAAC4ECEQAADAhQiBAAAALkQIBAAAcCFCIAAAgAsRAgEAAFyIEAgAAOBChEA0y+LFi+XxeBrd7r777mBPr8VUVlZq9uzZSklJUVRUlDIyMpSbmxvsaQFoIW6oZUePHtXcuXN15ZVXKi4uTh6PR4sXL/7e8bt379aVV16p6OhoxcXF6YYbbtCXX37ZdhNGq4sI9gTQMcybN0/p6ekBbQMHDgzSbFrer371K7366quaMWOG+vTpo8WLF+unP/2p1q5dq0svvTTY0wPQQjpyLfvqq680b948paWl6fzzz9c777zzvWM///xzjRgxQjExMXrwwQd19OhR/fGPf9T27du1ceNGRUZGtt3E0WoIgWgRo0eP1tChQ09pbEVFhSIjIxUWFhoHojdu3KilS5fq4Ycf1l133SVJuvHGGzVw4ED9v//3//TBBx8EeYYAWkpHrmXJyckqKipSUlKSNm/erAsvvPB7xz744IP65ptvtGXLFqWlpUmShg0bpv/7f/+vFi9erKlTp7bVtNGKQuMnFyHrnXfekcfj0dKlS3XvvfeqR48e6tKli8rLy3X48GHdddddGjRokKKjo+Xz+TR69Gh99NFHjT7HK6+8ovvvv189evRQ165ddd1118nv96uyslIzZsxQQkKCoqOjNWXKFFVWVjaYy5IlSzRkyBBFRUUpLi5O//Iv/6LCwsIfXMOrr76q8PDwgKLXuXNn3XzzzcrLyzul5wAQ2jpCLfN6vUpKSjql9f7tb3/Tz372MycASlJWVpb69u2rV1555ZSeA+0fRwLRIvx+v7766quAtu7duztf/+EPf1BkZKTuuusuVVZWKjIyUrt27dJrr72m8ePHKz09XSUlJXr22Wd12WWXadeuXUpJSQl4vvnz5ysqKkp333239u7dqyeffFKdOnVSWFiYvv76a/3+97/X+vXrtXjxYqWnp2vOnDnOYx944AHdd999uv766/XrX/9aX375pZ588kmNGDFCW7duVWxs7PeubevWrerbt698Pl9A+7BhwyRJ27ZtU2pq6un+pwPQjnTkWnaqDh48qNLS0kaPiA4bNkyrVq1q9mugnTCgGRYtWmSSGt3MzNauXWuSrHfv3vbtt98GPLaiosJqa2sD2vbt22der9fmzZvntNU/x8CBA62qqsppnzhxonk8Hhs9enTAc2RmZlqvXr2c7/fv32/h4eH2wAMPBIzbvn27RURENGj/rnPPPdf+z//5Pw3ad+7caZLsmWeeOenjAbR/bqhlJ9q0aZNJskWLFn1v31//+tcGfbNmzTJJVlFRccqvhfaLt4PRIp566inl5uYGbCeaPHmyoqKiAtq8Xq9zLk1tba0OHTqk6Oho9evXTx9++GGD17jxxhvVqVMn5/uMjAyZmW666aaAcRkZGSosLFRNTY0kafny5aqrq9P111+vr776ytmSkpLUp08frV279qRrO3bsmLxeb4P2zp07O/0AOoaOXMtOVX1No+51fLwdjBYxbNiwk55M/d2r7SSprq5Ojz/+uJ5++mnt27dPtbW1Tl98fHyD8SeemyJJMTExktTgrdiYmBjV1dXJ7/crPj5ee/bskZmpT58+jc7txGLcmKioqEbPy6moqHD6AXQMHbmWnar6mkbd6/gIgWgTjRWMBx98UPfdd59uuukm/eEPf1BcXJzCwsI0Y8YM1dXVNRgfHh7e6HN/X7uZSTpeoD0ej958881Gx0ZHR5907snJyTp48GCD9qKiIklqcL4PgI4rlGvZqUpOTpb0vzXuREVFRYqLi2v0KCFCDyEQQfPqq6/q8ssv13PPPRfQXlZWFnAidnOdffbZMjOlp6erb9++TX78j3/8Y61du1bl5eUBF4ds2LDB6QfgXqFSy05Vjx49dOaZZ2rz5s0N+jZu3EjN60A4JxBBEx4e7vyFW2/ZsmWNHnVrjrFjxyo8PFz3339/g9czMx06dOikj7/uuutUW1urv/zlL05bZWWlFi1apIyMDK4MBlwuVGpZU4wbN04rV64MuPXM6tWr9emnn2r8+PEt9joILo4EImh+9rOfad68eZoyZYouvvhibd++XS+88IJ69+7doq9z9tln69/+7d+Uk5Oj/fv369prr1XXrl21b98+rVixQlOnTnVuAt2YjIwMjR8/Xjk5OSotLdU555yj559/Xvv372/wlz8A9wmVWiZJf/7zn1VWVqYvvvhCkvTGG2/o888/lyTdcccdzvmJ99xzj5YtW6bLL79c06dP19GjR/Xwww9r0KBBmjJlSouuC8FDCETQ3HPPPfrmm2/04osv6uWXX9YFF1yg//7v/26Vz+m8++671bdvXz366KO6//77JR0/CXvkyJG6+uqrf/Dxf/3rX3Xffffpv/7rv/T111/rvPPO08qVKzVixIgWnyuA0BJKteyPf/yjDhw44Hy/fPlyLV++XJL0y1/+MuAilXXr1mnmzJm6++67FRkZqTFjxuiRRx7hfMAOxGPfPaYMAACADo9zAgEAAFyIEAgAAOBChEAAAAAXCloIfOqpp3TWWWepc+fOysjI0MaNG4M1FQBocdQ4AO1dUELgyy+/rJkzZ2ru3Ln68MMPdf7552vUqFEqLS0NxnQAoEVR4wCEgqBcHZyRkaELL7xQf/7znyUd/yic1NRU3XHHHY1eUl9ZWRnwGYZ1dXU6fPiw4uPj5fF42mzeANovM9ORI0eUkpKisLDgnulCjQPQklqtvlkbq6ystPDwcFuxYkVA+4033mhXX311o4+ZO3euSWJjY2P7wa2wsLANKtn3o8axsbG11tbS9a3Nbxb91Vdfqba2VomJiQHtiYmJ+uSTTxp9TE5OjmbOnOl87/f7lZaWpsLCwoDPcgXgXuXl5UpNTVXXrl2DOg9qHICW1lr1LSQ+McTr9TZ6h3Kfz0eBBBAgFN8+pcYBOBUtXd/a/MSZ7t27Kzw8XCUlJQHtJSUlSkpKauvpAECLosYBCBVtHgIjIyM1ZMgQrV692mmrq6vT6tWrlZmZ2dbTAYAWRY0DECqC8nbwzJkzNXnyZA0dOlTDhg3TY489pm+++UZTpkwJxnQAoEVR4wCEgqCEwAkTJujLL7/UnDlzVFxcrB//+Md66623GpxIDQChiBoHIBQE5T6BzVVeXq6YmBj5/X5OmgYgqWPVhY60FgDN11o1gc8OBgAAcCFCIAAAgAsRAgEAAFyIEAgAAOBChEAAAAAXIgQCAAC4ECEQAADAhQiBAAAALkQIBAAAcCFCIAAAgAsRAgEAAFyIEAgAAOBChEAAAAAXIgQCAAC4ECEQAADAhQiBAAAALkQIBAAAcCFCIAAAgAsRAgEAAFyIEAgAAOBChEAAAAAXIgQCAAC4ECEQAADAhQiBAAAALkQIBAAAcCFCIAAAgAsRAgEAAFyIEAgAAOBChEAAAAAXIgQCAAC4ECEQAADAhQiBAAAALkQIBAAAcCFCIAAAgAsRAgEAAFyIEAgAAOBChEAAAAAXalIInD9/vi688EJ17dpVCQkJuvbaa5Wfnx8wpqKiQtnZ2YqPj1d0dLTGjRunkpKSgDEFBQUaM2aMunTpooSEBM2aNUs1NTXNXw0ANAM1DoCbNCkErlu3TtnZ2Vq/fr1yc3NVXV2tkSNH6ptvvnHG/O53v9Mbb7yhZcuWad26dfriiy80duxYp7+2tlZjxoxRVVWVPvjgAz3//PNavHix5syZ03KrAoDTQI0D4CrWDKWlpSbJ1q1bZ2ZmZWVl1qlTJ1u2bJkzZvfu3SbJ8vLyzMxs1apVFhYWZsXFxc6YhQsXms/ns8rKykZfp6Kiwvx+v7MVFhaaJPP7/c2ZPoAOxO/3t3hdoMYBaA9ao76ZmTXrnEC/3y9JiouLkyRt2bJF1dXVysrKcsb0799faWlpysvLkyTl5eVp0KBBSkxMdMaMGjVK5eXl2rlzZ6OvM3/+fMXExDhbampqc6YNAKeEGgegIzvtEFhXV6cZM2bokksu0cCBAyVJxcXFioyMVGxsbMDYxMREFRcXO2NOLI71/fV9jcnJyZHf73e2wsLC0502AJwSahyAji7idB+YnZ2tHTt26P3332/J+TTK6/XK6/W2+usAQD1qHICO7rSOBE6bNk0rV67U2rVr1bNnT6c9KSlJVVVVKisrCxhfUlKipKQkZ8x3r6Sr/75+DAAEEzUOgBs0KQSamaZNm6YVK1ZozZo1Sk9PD+gfMmSIOnXqpNWrVztt+fn5KigoUGZmpiQpMzNT27dvV2lpqTMmNzdXPp9PAwYMaM5aAKBZqHEA3KRJbwdnZ2frxRdf1Ouvv66uXbs657fExMQoKipKMTExuvnmmzVz5kzFxcXJ5/PpjjvuUGZmpi666CJJ0siRIzVgwADdcMMNeuihh1RcXKx7771X2dnZvB0CIKiocQBcpSmXEktqdFu0aJEz5tixY/ab3/zGunXrZl26dLGf//znVlRUFPA8+/fvt9GjR1tUVJR1797d7rzzTquurj7lebTWpdIAQldL1AVqHID2qLVqgsfMrO2jZ/OUl5crJiZGfr9fPp8v2NMB0A50pLrQkdYCoPlaqybw2cEAAAAuRAgEAABwIUIgAACACxECAQAAXIgQCAAA4EKEQAAAABciBAIAALgQIRAAAMCFCIEAAAAuRAgEAABwIUIgAACACxECAQAAXIgQCAAA4EKEQAAAABciBAIAALgQIRAAAMCFCIEAAAAuRAgEAABwIUIgAACACxECAQAAXIgQCAAA4EKEQAAAABciBAIAALhQRLAngCCprZXee08qKpKSk6Xhw6Xw8GDPCgAAtBFCoBstXy5Nny59/vn/tvXsKT3+uDR2bPDmBQAA2gxvB7vN8uXSddcFBkBJOnjwePvy5cGZFwAAaFOEQDeprT1+BNCsYV9924wZx8cBAIAOjRDoJu+91/AI4InMpMLC4+MAAECHRgh0k6Kilh0HAABCFiHQTZKTW3YcAAAIWYRANxk+/PhVwB5P4/0ej5SaenwcAADo0AiBbhIefvw2MFLDIFj//WOPcb9AAABcgBDoNmPHSq++KvXoEdjes+fxdu4TCACAK3CzaDcaO1a65ho+MQQAABcjBLpVeLj0k58EexYAACBIeDsYAADAhZoVAhcsWCCPx6MZM2Y4bRUVFcrOzlZ8fLyio6M1btw4lZSUBDyuoKBAY8aMUZcuXZSQkKBZs2appqamOVMBgBZFfQPQ0Z12CNy0aZOeffZZnXfeeQHtv/vd7/TGG29o2bJlWrdunb744guNPeFig9raWo0ZM0ZVVVX64IMP9Pzzz2vx4sWaM2fO6a8CAFoQ9Q2AK9hpOHLkiPXp08dyc3Ptsssus+nTp5uZWVlZmXXq1MmWLVvmjN29e7dJsry8PDMzW7VqlYWFhVlxcbEzZuHChebz+ayysvKUXt/v95sk8/v9pzN9AB1QS9WFYNe3llwLgI6htWrCaR0JzM7O1pgxY5SVlRXQvmXLFlVXVwe09+/fX2lpacrLy5Mk5eXladCgQUpMTHTGjBo1SuXl5dq5c2ejr1dZWany8vKADQBaQ1vXN4kaByA4mnx18NKlS/Xhhx9q06ZNDfqKi4sVGRmp2NjYgPbExEQVFxc7Y04skPX99X2NmT9/vu6///6mThUAmiQY9U2ixgEIjiYdCSwsLNT06dP1wgsvqHPnzq01pwZycnLk9/udrbCwsM1eG4A7BKu+SdQ4AMHRpBC4ZcsWlZaW6oILLlBERIQiIiK0bt06PfHEE4qIiFBiYqKqqqpUVlYW8LiSkhIlJSVJkpKSkhpcTVf/ff2Y7/J6vfL5fAEbALSkYNU3iRoHIDiaFAKvuOIKbd++Xdu2bXO2oUOHatKkSc7XnTp10urVq53H5Ofnq6CgQJmZmZKkzMxMbd++XaWlpc6Y3Nxc+Xw+DRgwoIWWBQBNQ30D4DZNOiewa9euGjhwYEDbGWecofj4eKf95ptv1syZMxUXFyefz6c77rhDmZmZuuiiiyRJI0eO1IABA3TDDTfooYceUnFxse69915lZ2fL6/W20LIAoGmobwDcpsU/Nu7RRx9VWFiYxo0bp8rKSo0aNUpPP/200x8eHq6VK1fq9ttvV2Zmps444wxNnjxZ8+bNa+mpAECLor4B6Eg8ZmbBnkRTlZeXKyYmRn6/n3NnAEjqWHWhI60FQPO1Vk3gs4MBAABciBAIAADgQoRAAAAAFyIEAgAAuBAhEAAAwIUIgQAAAC5ECAQAAHAhQiAAAIALEQIBAABciBAIAADgQoRAAAAAFyIEAgAAuFBEsCcAAIDr1NZK770nFRVJycnS8OFSeHiwZwWXIQQCANCWli+Xpk+XPv/8f9t69pQef1waOzZ484Lr8HYwAABtZfly6brrAgOgJB08eLx9+fLgzAuuRAgEAKAt1NYePwJo1rCvvm3GjOPjgDZACAQAoC28917DI4AnMpMKC4+PA9oAIRAAgLZQVNSy44BmIgQCANAWkpNbdhzQTIRAAADawvDhx68C9nga7/d4pNTU4+OANkAIBACgLYSHH78NjNQwCNZ//9hj3C8QbYYQCABAWxk7Vnr1ValHj8D2nj2Pt3OfQLQhbhYNAEBbGjtWuuYaPjEEQUcIBACgrYWHSz/5SbBnAZfj7WAAAAAXIgQCAAC4ECEQAADAhQiBAAAALkQIBAAAcCFCIAAAgAsRAgEAAFyIEAgAAOBChEAAAAAXIgQCAAC4ECEQAADAhQiBAAAALkQIBAAAcKEmh8CDBw/ql7/8peLj4xUVFaVBgwZp8+bNTr+Zac6cOUpOTlZUVJSysrK0Z8+egOc4fPiwJk2aJJ/Pp9jYWN188806evRo81cDAM1AfQPgJk0KgV9//bUuueQSderUSW+++aZ27dqlRx55RN26dXPGPPTQQ3riiSf0zDPPaMOGDTrjjDM0atQoVVRUOGMmTZqknTt3Kjc3VytXrtS7776rqVOnttyqAKCJqG8AXMeaYPbs2XbppZd+b39dXZ0lJSXZww8/7LSVlZWZ1+u1l156yczMdu3aZZJs06ZNzpg333zTPB6PHTx4sNHnraioML/f72yFhYUmyfx+f1OmD6AD8/v9zaoLwapvZtQ4ACfX3Pr2fZp0JPDvf/+7hg4dqvHjxyshIUGDBw/Wf/zHfzj9+/btU3FxsbKyspy2mJgYZWRkKC8vT5KUl5en2NhYDR061BmTlZWlsLAwbdiwodHXnT9/vmJiYpwtNTW1KdMGgB8UrPomUeMABEeTQuD//M//aOHCherTp4/+8Y9/6Pbbb9dvf/tbPf/885Kk4uJiSVJiYmLA4xITE52+4uJiJSQkBPRHREQoLi7OGfNdOTk58vv9zlZYWNiUaQPADwpWfZOocQCCI6Ipg+vq6jR06FA9+OCDkqTBgwdrx44deuaZZzR58uRWmaAkeb1eeb3eVnt+AAhWfZOocQCCo0lHApOTkzVgwICAth/96EcqKCiQJCUlJUmSSkpKAsaUlJQ4fUlJSSotLQ3or6mp0eHDh50xANDWqG8A3KZJIfCSSy5Rfn5+QNunn36qXr16SZLS09OVlJSk1atXO/3l5eXasGGDMjMzJUmZmZkqKyvTli1bnDFr1qxRXV2dMjIyTnshANAc1DcArtOUq0g2btxoERER9sADD9iePXvshRdesC5dutiSJUucMQsWLLDY2Fh7/fXX7eOPP7ZrrrnG0tPT7dixY86YK6+80gYPHmwbNmyw999/3/r06WMTJ0485Xm01lUyAEJXc+tCe6lvLbEWAB1La9WEJoVAM7M33njDBg4caF6v1/r3729/+ctfAvrr6ursvvvus8TERPN6vXbFFVdYfn5+wJhDhw7ZxIkTLTo62nw+n02ZMsWOHDlyynOgQAL4rpaoC+2hvrXUWgB0HK1VEzxmZsE7Dnl6ysvLFRMTI7/fL5/PF+zpAGgHOlJd6EhrAdB8rVUT+OxgAAAAFyIEAgAAuBAhEAAAwIUIgQAAAC5ECAQAAHAhQiAAAIALEQIBAABciBAIAADgQoRAAAAAFyIEAgAAuBAhEAAAwIUIgQAAAC5ECAQAAHAhQiAAAIALEQIBAABciBAIAADgQoRAAAAAFyIEAgAAuBAhEAAAwIUIgQAAAC4UEewJnA4zkySVl5cHeSYA2ov6elBfH0IZNQ7AiVqrvoVkCDx06JAkKTU1NcgzAdDeHDlyRDExMcGeRrNQ4wA0pqXrW0iGwLi4OElSQUFByBf78vJypaamqrCwUD6fL9jTaRbW0j65ZS1mpiNHjiglJSVIs2s51Lj2ibW0Px1lHVJw6ltIhsCwsOOnMsbExIT8Tq/n8/lYSzvEWtqn71tLqAemetS49o21tD8dZR1S29Y3LgwBAABwIUIgAACAC4VkCPR6vZo7d668Xm+wp9JsrKV9Yi3tU0day8l0pHWylvapo6ylo6xDCs5aPNYR7qcAAACAJgnJI4EAAABoHkIgAACACxECAQAAXIgQCAAA4EIhGQKfeuopnXXWWercubMyMjK0cePGYE8pwPz583XhhReqa9euSkhI0LXXXqv8/PyAMT/5yU/k8XgCtttuuy1gTEFBgcaMGaMuXbooISFBs2bNUk1NTVsuRb///e8bzLN///5Of0VFhbKzsxUfH6/o6GiNGzdOJSUl7W4dknTWWWc1WIvH41F2drak9r1P3n33XV111VVKSUmRx+PRa6+9FtBvZpozZ46Sk5MVFRWlrKws7dmzJ2DM4cOHNWnSJPl8PsXGxurmm2/W0aNHA8Z8/PHHGj58uDp37qzU1FQ99NBDbbqW6upqzZ49W4MGDdIZZ5yhlJQU3Xjjjfriiy8CnqOxfblgwYI2X0troL61nY5U36TQrXHUtyDWNwsxS5cutcjISPvP//xP27lzp91yyy0WGxtrJSUlwZ6aY9SoUbZo0SLbsWOHbdu2zX76059aWlqaHT161Blz2WWX2S233GJFRUXO5vf7nf6amhobOHCgZWVl2datW23VqlXWvXt3y8nJadO1zJ07184999yAeX755ZdO/2233Wapqam2evVq27x5s1100UV28cUXt7t1mJmVlpYGrCM3N9ck2dq1a82sfe+TVatW2b/+67/a8uXLTZKtWLEioH/BggUWExNjr732mn300Ud29dVXW3p6uh07dswZc+WVV9r5559v69evt/fee8/OOeccmzhxotPv9/stMTHRJk2aZDt27LCXXnrJoqKi7Nlnn22ztZSVlVlWVpa9/PLL9sknn1heXp4NGzbMhgwZEvAcvXr1snnz5gXsqxN/v9pqLS2N+kZ9a45QrXHUt+DVt5ALgcOGDbPs7Gzn+9raWktJSbH58+cHcVYnV1paapJs3bp1Tttll11m06dP/97HrFq1ysLCwqy4uNhpW7hwofl8PqusrGzN6QaYO3eunX/++Y32lZWVWadOnWzZsmVO2+7du02S5eXlmVn7WUdjpk+fbmeffbbV1dWZWejsk+8Wlrq6OktKSrKHH37YaSsrKzOv12svvfSSmZnt2rXLJNmmTZucMW+++aZ5PB47ePCgmZk9/fTT1q1bt4C1zJ492/r169dma2nMxo0bTZIdOHDAaevVq5c9+uij3/uYYKylJVDfqG8tKRRrHPWtbetbSL0dXFVVpS1btigrK8tpCwsLU1ZWlvLy8oI4s5Pz+/2S/vdD4eu98MIL6t69uwYOHKicnBx9++23Tl9eXp4GDRqkxMREp23UqFEqLy/Xzp0722bi/789e/YoJSVFvXv31qRJk1RQUCBJ2rJli6qrqwP2R//+/ZWWlubsj/a0jhNVVVVpyZIluummm+TxeJz2UNknJ9q3b5+Ki4sD9kNMTIwyMjIC9kNsbKyGDh3qjMnKylJYWJg2bNjgjBkxYoQiIyOdMaNGjVJ+fr6+/vrrNlpNQ36/Xx6PR7GxsQHtCxYsUHx8vAYPHqyHH3444C2r9rqWk6G+Ud9aUkepcdS31q1vEc2afRv76quvVFtbG/ADKkmJiYn65JNPgjSrk6urq9OMGTN0ySWXaODAgU77L37xC/Xq1UspKSn6+OOPNXv2bOXn52v58uWSpOLi4kbXWd/XVjIyMrR48WL169dPRUVFuv/++zV8+HDt2LFDxcXFioyMbPDDm5iY6Myxvazju1577TWVlZXpV7/6ldMWKvvku+pfu7G5nbgfEhISAvojIiIUFxcXMCY9Pb3Bc9T3devWrVXmfzIVFRWaPXu2Jk6cGPCB6r/97W91wQUXKC4uTh988IFycnJUVFSkP/3pT85829tafgj1jfrWkjpKjaO+tW59C6kQGIqys7O1Y8cOvf/++wHtU6dOdb4eNGiQkpOTdcUVV+izzz7T2Wef3dbT/F6jR492vj7vvPOUkZGhXr166ZVXXlFUVFQQZ9Y8zz33nEaPHq2UlBSnLVT2iVtUV1fr+uuvl5lp4cKFAX0zZ850vj7vvPMUGRmpW2+9VfPnz+8QHx8VKqhv7Rc1rn1rL/UtpN4O7t69u8LDwxtcnVVSUqKkpKQgzer7TZs2TStXrtTatWvVs2fPk47NyMiQJO3du1eSlJSU1Og66/uCJTY2Vn379tXevXuVlJSkqqoqlZWVBYw5cX+0x3UcOHBAb7/9tn7961+fdFyo7JP61z7Z70VSUpJKS0sD+mtqanT48OF2ua/qC+SBAweUm5sb8FdyYzIyMlRTU6P9+/dLal9rOVXUt+Dvn45Q36SOVeOob61b30IqBEZGRmrIkCFavXq101ZXV6fVq1crMzMziDMLZGaaNm2aVqxYoTVr1jQ4bNuYbdu2SZKSk5MlSZmZmdq+fXvAD3b9D8uAAQNaZd6n4ujRo/rss8+UnJysIUOGqFOnTgH7Iz8/XwUFBc7+aI/rWLRokRISEjRmzJiTjguVfZKenq6kpKSA/VBeXq4NGzYE7IeysjJt2bLFGbNmzRrV1dU5/yPIzMzUu+++q+rqamdMbm6u+vXr16ZvldQXyD179ujtt99WfHz8Dz5m27ZtCgsLc94Sai9raQrqW/B/lzpCfZM6Vo2jvrVyfWvypSRBtnTpUvN6vbZ48WLbtWuXTZ061WJjYwOuZgq222+/3WJiYuydd94JuMT722+/NTOzvXv32rx582zz5s22b98+e/3116137942YsQI5znqL9UfOXKkbdu2zd566y0788wz2/zWA3feeae98847tm/fPvvnP/9pWVlZ1r17dystLTWz47dQSEtLszVr1tjmzZstMzPTMjMz29066tXW1lpaWprNnj07oL2975MjR47Y1q1bbevWrSbJ/vSnP9nWrVudK8oWLFhgsbGx9vrrr9vHH39s11xzTaO3UBg8eLBt2LDB3n//fevTp0/ALRTKysosMTHRbrjhBtuxY4ctXbrUunTp0uK3UDjZWqqqquzqq6+2nj172rZt2wJ+f+qvhPvggw/s0UcftW3bttlnn31mS5YssTPPPNNuvPHGNl9LS6O+Ud+aKxRrHPUtePUt5EKgmdmTTz5paWlpFhkZacOGDbP169cHe0oBJDW6LVq0yMzMCgoKbMSIERYXF2der9fOOeccmzVrVsD9mszM9u/fb6NHj7aoqCjr3r273XnnnVZdXd2ma5kwYYIlJydbZGSk9ejRwyZMmGB79+51+o8dO2a/+c1vrFu3btalSxf7+c9/bkVFRe1uHfX+8Y9/mCTLz88PaG/v+2Tt2rWN/kxNnjzZzI7fRuG+++6zxMRE83q9dsUVVzRY46FDh2zixIkWHR1tPp/PpkyZYkeOHAkY89FHH9mll15qXq/XevToYQsWLGjTtezbt+97f3/q73W2ZcsWy8jIsJiYGOvcubP96Ec/sgcffNAqKirafC2tgfrWdjpafTMLzRpHfQteffOYmTXt2CEAAABCXUidEwgAAICWQQgEAABwIUIgAACACxECAQAAXIgQCAAA4EKEQAAAABciBAIAALgQIRAAAMCFCIEAAAAuRAgEAABwIUIgAACACxECAQAAXIgQCAAA4EKEQAAAABciBAIAALgQIRAAAMCFCIEAAAAuRAgEAABwIUIgTtnixYvl8Xga3e6+++5gT69FbNq0SdOmTdO5556rM844Q2lpabr++uv16aefNjp+9+7duvLKKxUdHa24uDjdcMMN+vLLL9t41gCayw31befOnRo/frx69+6tLl26qHv37hoxYoTeeOONRsdT3zq+iGBPAKFn3rx5Sk9PD2gbOHBgkGbTsv793/9d//znPzV+/Hidd955Ki4u1p///GddcMEFWr9+fcA6P//8c40YMUIxMTF68MEHdfToUf3xj3/U9u3btXHjRkVGRgZxJQBOR0eubwcOHNCRI0c0efJkpaSk6Ntvv9Xf/vY3XX311Xr22Wc1depUZyz1zSUMOEWLFi0ySbZp06ZTfsyxY8estra2FWfVsv75z39aZWVlQNunn35qXq/XJk2aFNB+++23W1RUlB04cMBpy83NNUn27LPPtsl8AbQMN9S3xtTU1Nj5559v/fr1C2invrkDbwejxbzzzjvyeDxaunSp7r33XvXo0UNdunRReXm5Dh8+rLvuukuDBg1SdHS0fD6fRo8erY8++qjR53jllVd0//33q0ePHuratauuu+46+f1+VVZWasaMGUpISFB0dLSmTJmiysrKBnNZsmSJhgwZoqioKMXFxelf/uVfVFhY+INruPjiixv8hdunTx+de+652r17d0D73/72N/3sZz9TWlqa05aVlaW+ffvqlVdeacp/OgDtXEeob40JDw9XamqqysrKAtqpb+7A28FoMr/fr6+++iqgrXv37s7Xf/jDHxQZGam77rpLlZWVioyM1K5du/Taa69p/PjxSk9PV0lJiZ599llddtll2rVrl1JSUgKeb/78+YqKitLdd9+tvXv36sknn1SnTp0UFhamr7/+Wr///e+1fv16LV68WOnp6ZozZ47z2AceeED33Xefrr/+ev3617/Wl19+qSeffFIjRozQ1q1bFRsb26T1mplKSkp07rnnOm0HDx5UaWmphg4d2mD8sGHDtGrVqia9BoD2wQ317ZtvvtGxY8fk9/v197//XW+++aYmTJjg9FPfXCTYhyIROurfLmlsMzNbu3atSbLevXvbt99+G/DYioqKBm+b7Nu3z7xer82bN89pq3+OgQMHWlVVldM+ceJE83g8Nnr06IDnyMzMtF69ejnf79+/38LDw+2BBx4IGLd9+3aLiIho0H4q/uu//ssk2XPPPee0bdq0ySTZX//61wbjZ82aZZKsoqKiya8FIDjcVN9uvfVWZ21hYWF23XXX2eHDh51+6pt7cCQQTfbUU0+pb9++39s/efJkRUVFBbR5vV7n69raWpWVlSk6Olr9+vXThx9+2OA5brzxRnXq1Mn5PiMjQy+99JJuuummgHEZGRl64oknVFNTo4iICC1fvlx1dXW6/vrrA/6aT0pKUp8+fbR27Vrdc889p7zWTz75RNnZ2crMzNTkyZOd9mPHjjVYV73OnTs7YxrrB9B+uaG+zZgxQ9ddd52++OILvfLKK6qtrVVVVZXTT31zD0IgmmzYsGGNvk1Q77tX1klSXV2dHn/8cT399NPat2+famtrnb74+PgG4088D0WSYmJiJEmpqakN2uvq6uT3+xUfH689e/bIzNSnT59G53Zi4f0hxcXFGjNmjGJiYvTqq68qPDzc6av/n0Bj5+tUVFQEjAEQOtxQ3/r376/+/ftLOh5IR44cqauuukobNmyQx+OhvrkIIRAtrrHi8OCDD+q+++7TTTfdpD/84Q+Ki4tTWFiYZsyYobq6ugbjTwxcp9JuZpKOF2OPx6M333yz0bHR0dGntAa/36/Ro0errKxM7733XoNzepKTkyVJRUVFDR5bVFSkuLg4/koGOqCOUN++67rrrtOtt96qTz/9VP369aO+uQghEG3i1Vdf1eWXX67nnnsuoL2srCzgpOvmOvvss2VmSk9PP+lbOidTUVGhq666Sp9++qnefvttDRgwoMGYHj166Mwzz9TmzZsb9G3cuFE//vGPT+u1AYSeUKpvjal/+9fv90uivrkJt4hBmwgPD3f+mq23bNkyHTx4sEVfZ+zYsQoPD9f999/f4PXMTIcOHTrp42trazVhwgTl5eVp2bJlyszM/N6x48aN08qVKwNuzbB69Wp9+umnGj9+fPMWAiBkhEp9Ky0tbdBWXV2tv/71r4qKigr4g5f65g4cCUSb+NnPfqZ58+ZpypQpuvjii7V9+3a98MIL6t27d4u+ztlnn61/+7d/U05Ojvbv369rr71WXbt21b59+7RixQpNnTpVd9111/c+/s4779Tf//53XXXVVTp8+LCWLFkS0P/LX/7S+fqee+7RsmXLdPnll2v69Ok6evSoHn74YQ0aNEhTpkxp0XUBaL9Cpb7deuutKi8v14gRI9SjRw8VFxfrhRde0CeffKJHHnkk4O1k6ps7EALRJu655x598803evHFF/Xyyy/rggsu0H//93+3ymdy3n333erbt68effRR3X///ZKOn3A9cuRIXX311Sd97LZt2yRJb7zxRqOfp3liCExNTdW6des0c+ZM3X333YqMjNSYMWP0yCOPcL4M4CKhUt8mTJig5557TgsXLtShQ4fUtWtXDRkyRP/+7//e4LHUN3fw2HePKQMAAKDD45xAAAAAFyIEAgAAuBAhEAAAwIWCFgKfeuopnXXWWercubMyMjK0cePGYE0FAFocNQ5AexeUEPjyyy9r5syZmjt3rj788EOdf/75GjVqVKP3MAKAUEONAxAKgnJ1cEZGhi688EL9+c9/lnT8o3BSU1N1xx13tMol9QDQlqhxAEJBm98nsKqqSlu2bFFOTo7TFhYWpqysLOXl5TX6mMrKyoAPsq6rq9Phw4cVHx8vj8fT6nMG0P6ZmY4cOaKUlBSFhQXvdGdqHICW1lr1rc1D4FdffaXa2lolJiYGtCcmJuqTTz5p9DHz5893booJACdTWFionj17Bu31qXEAWktL17eQ+MSQnJwczZw50/ne7/crLS1NhYWF8vl8QZwZgPaivLxcqamp6tq1a7Cn0mTUOAAn01r1rc1DYPfu3RUeHq6SkpKA9pKSEiUlJTX6GK/X2+jH1Ph8PgokgADBfvuUGgegtbR0fWvzE2ciIyM1ZMgQrV692mmrq6vT6tWrlZmZ2dbTAYAWRY0DECqC8nbwzJkzNXnyZA0dOlTDhg3TY489pm+++UZTpkwJxnQAoEVR4wCEgqCEwAkTJujLL7/UnDlzVFxcrB//+Md66623GpxIDQChiBoHIBQE5T6BzVVeXq6YmBj5/X7OlwEgqWPVhY60FgDN11o1gc8OBgAAcCFCIAAAgAsRAgEAAFyIEAgAAOBChEAAAAAXIgQCAAC4ECEQAADAhQiBAAAALkQIBAAAcCFCIAAAgAsRAgEAAFyIEAgAAOBChEAAAAAXIgQCAAC4ECEQAADAhQiBAAAALkQIBAAAcCFCIAAAgAsRAgEAAFyIEAgAAOBChEAAAAAXIgQCAAC4ECEQAADAhQiBAAAALkQIBAAAcCFCIAAAgAsRAgEAAFyIEAgAAOBChEAAAAAXIgQCAAC4ECEQAADAhQiBAAAALkQIBAAAcCFCIAAAgAsRAgEAAFyIEAgAAOBCEcGeAAAAcIHaWum996SiIik5WRo+XAoPD/asXK1JRwLnz5+vCy+8UF27dlVCQoKuvfZa5efnB4ypqKhQdna24uPjFR0drXHjxqmkpCRgTEFBgcaMGaMuXbooISFBs2bNUk1NTfNXAwDNQI0DWsny5dJZZ0mXXy794hfH/z3rrOPtCJomhcB169YpOztb69evV25urqqrqzVy5Eh98803zpjf/e53euONN7Rs2TKtW7dOX3zxhcaOHev019bWasyYMaqqqtIHH3yg559/XosXL9acOXNablUAcBqocUArWL5cuu466fPPA9sPHjzeThAMHmuG0tJSk2Tr1q0zM7OysjLr1KmTLVu2zBmze/duk2R5eXlmZrZq1SoLCwuz4uJiZ8zChQvN5/NZZWXlKb2u3+83Seb3+5szfQAdSGvUBWoc0Ew1NWY9e5pJjW8ej1lq6vFx+F6tVROadWGI3++XJMXFxUmStmzZourqamVlZTlj+vfvr7S0NOXl5UmS8vLyNGjQICUmJjpjRo0apfLycu3cubPR16msrFR5eXnABgCtjRoHNNN77zU8AngiM6mw8Pg4tLnTDoF1dXWaMWOGLrnkEg0cOFCSVFxcrMjISMXGxgaMTUxMVHFxsTPmxOJY31/f15j58+crJibG2VJTU0932gBwSqhxQAsoKmrZcWhRpx0Cs7OztWPHDi1durQl59OonJwc+f1+ZyssLGz11wTgbtQ4oAUkJ7fsOLSo0wqB06ZN08qVK7V27Vr17NnTaU9KSlJVVZXKysoCxpeUlCgpKckZ890r6eq/rx/zXV6vVz6fL2ADgNZCjQNayPDhUs+eksfTeL/HI6WmHh+HNtekEGhmmjZtmlasWKE1a9YoPT09oH/IkCHq1KmTVq9e7bTl5+eroKBAmZmZkqTMzExt375dpaWlzpjc3Fz5fD4NGDCgOWsBgGahxgEtLDxcevzx419/NwjWf//YY9wvMFiachXJ7bffbjExMfbOO+9YUVGRs3377bfOmNtuu83S0tJszZo1tnnzZsvMzLTMzEynv6amxgYOHGgjR460bdu22VtvvWVnnnmm5eTknPI8uHIOwHe1RF2gxgGt5G9/a3iVcGrq8Xb8oNaqCU0KgZIa3RYtWuSMOXbsmP3mN7+xbt26WZcuXeznP/+5FRUVBTzP/v37bfTo0RYVFWXdu3e3O++806qrq095HhRIAN/VEnWBGge0opoas7VrzV588fi/3BbmlLVWTfCYmbXtscfmKy8vV0xMjPx+P+fOAJDUsepCR1oLgOZrrZrQrPsEAgAAIDQRAgEAAFyIEAgAAOBChEAAAAAXIgQCAAC4ECEQAADAhQiBAAAALkQIBAAAcCFCIAAAgAsRAgEAAFyIEAgAAOBChEAAAAAXIgQCAAC4ECEQAADAhQiBAAAALkQIBAAAcCFCIAAAgAsRAgEAAFyIEAgAAOBChEAAAAAXIgQCAAC4ECEQAADAhQiBAAAALkQIBAAAcCFCIAAAgAsRAgEAAFyIEAgAAOBChEAAAAAXIgQCAAC4ECEQAADAhQiBAAAALkQIBAAAcCFCIAAAgAsRAgEAAFyIEAgAAOBChEAAAAAXIgQCAAC4ULNC4IIFC+TxeDRjxgynraKiQtnZ2YqPj1d0dLTGjRunkpKSgMcVFBRozJgx6tKlixISEjRr1izV1NQ0ZyoA0KKobwA6utMOgZs2bdKzzz6r8847L6D9d7/7nd544w0tW7ZM69at0xdffKGxY8c6/bW1tRozZoyqqqr0wQcf6Pnnn9fixYs1Z86c018FALQg6hsAV7DTcOTIEevTp4/l5ubaZZddZtOnTzczs7KyMuvUqZMtW7bMGbt7926TZHl5eWZmtmrVKgsLC7Pi4mJnzMKFC83n81llZWWjr1dRUWF+v9/ZCgsLTZL5/f7TmT6ADsjv97dIXWjr+mZGjQNwci1V377rtI4EZmdna8yYMcrKygpo37Jli6qrqwPa+/fvr7S0NOXl5UmS8vLyNGjQICUmJjpjRo0apfLycu3cubPR15s/f75iYmKcLTU19XSmDQA/qK3rm0SNAxAcTQ6BS5cu1Ycffqj58+c36CsuLlZkZKRiY2MD2hMTE1VcXOyMObFA1vfX9zUmJydHfr/f2QoLC5s6bQD4QcGobxI1DkBwRDRlcGFhoaZPn67c3Fx17ty5tebUgNfrldfrbbPXA+A+wapvEjUOQHA06Ujgli1bVFpaqgsuuEARERGKiIjQunXr9MQTTygiIkKJiYmqqqpSWVlZwONKSkqUlJQkSUpKSmpwNV399/VjAKCtUd8AuE2TQuAVV1yh7du3a9u2bc42dOhQTZo0yfm6U6dOWr16tfOY/Px8FRQUKDMzU5KUmZmp7du3q7S01BmTm5srn8+nAQMGtNCyAKBpqG8A3KZJbwd37dpVAwcODGg744wzFB8f77TffPPNmjlzpuLi4uTz+XTHHXcoMzNTF110kSRp5MiRGjBggG644QY99NBDKi4u1r333qvs7GzeDgEQNNQ3AG7TpBB4Kh599FGFhYVp3Lhxqqys1KhRo/T00087/eHh4Vq5cqVuv/12ZWZm6owzztDkyZM1b968lp4KALQo6huAjsRjZhbsSTRVeXm5YmJi5Pf75fP5gj0dAO1AR6oLHWktAJqvtWoCnx0MAADgQoRAAAAAFyIEAgAAuBAhEAAAwIUIgQAAAC5ECAQAAHAhQiAAAIALEQIBAABciBAIAADgQoRAAAAAFyIEAgAAuBAhEAAAwIUIgQAAAC5ECAQAAHAhQiAAAIALEQIBAABciBAIAADgQoRAAAAAFyIEAgAAuBAhEAAAwIUIgQAAAC5ECAQAAHAhQiAAAIALEQIBAABciBAIAADgQoRAAAAAFyIEAgAAuBAhEAAAwIUIgQAAAC5ECAQAAHAhQiAAAIALEQIBAABciBAIAADgQoRAAAAAFyIEAgAAuFBEsCcAtDu1tdJ770lFRVJysjR8uBQeHuxZAQDQopp8JPDgwYP65S9/qfj4eEVFRWnQoEHavHmz029mmjNnjpKTkxUVFaWsrCzt2bMn4DkOHz6sSZMmyefzKTY2VjfffLOOHj3a/NUAzbV8uXTWWdLll0u/+MXxf88663g7OjzqGwA3aVII/Prrr3XJJZeoU6dOevPNN7Vr1y498sgj6tatmzPmoYce0hNPPKFnnnlGGzZs0BlnnKFRo0apoqLCGTNp0iTt3LlTubm5Wrlypd59911NnTq15VYFnI7ly6XrrpM+/zyw/eDB4+0EwQ6N+gbAdawJZs+ebZdeeun39tfV1VlSUpI9/PDDTltZWZl5vV576aWXzMxs165dJsk2bdrkjHnzzTfN4/HYwYMHT2kefr/fJJnf72/K9IHvV1Nj1rOnmdT45vGYpaYeH4d2qbl1ob3UNzNqHIBArVUTmnQk8O9//7uGDh2q8ePHKyEhQYMHD9Z//Md/OP379u1TcXGxsrKynLaYmBhlZGQoLy9PkpSXl6fY2FgNHTrUGZOVlaWwsDBt2LCh0detrKxUeXl5wAa0qPfea3gE8ERmUmHh8XHokIJV3yRqHIDgaFII/J//+R8tXLhQffr00T/+8Q/dfvvt+u1vf6vnn39eklRcXCxJSkxMDHhcYmKi01dcXKyEhISA/oiICMXFxTljvmv+/PmKiYlxttTU1KZMG/hhRUUtOw4hJ1j1TaLGAQiOJoXAuro6XXDBBXrwwQc1ePBgTZ06VbfccoueeeaZ1pqfJCknJ0d+v9/ZCgsLW/X14ELJyS07DiEnWPVNosYBCI4mhcDk5GQNGDAgoO1HP/qRCgoKJElJSUmSpJKSkoAxJSUlTl9SUpJKS0sD+mtqanT48GFnzHd5vV75fL6ADWhRw4dLPXtKHk/j/R6PlJp6fBw6pGDVN4kaByA4mhQCL7nkEuXn5we0ffrpp+rVq5ckKT09XUlJSVq9erXTX15erg0bNigzM1OSlJmZqbKyMm3ZssUZs2bNGtXV1SkjI+O0FwI0S3i49Pjjx7/+bhCs//6xx7hfYAdGfQPgOk25imTjxo0WERFhDzzwgO3Zs8deeOEF69Kliy1ZssQZs2DBAouNjbXXX3/dPv74Y7vmmmssPT3djh075oy58sorbfDgwbZhwwZ7//33rU+fPjZx4sRTngdXzqHV/O1vDa8STk093o52rbl1ob3Ut5ZYC4COpbVqQpNCoJnZG2+8YQMHDjSv12v9+/e3v/zlLwH9dXV1dt9991liYqJ5vV674oorLD8/P2DMoUOHbOLEiRYdHW0+n8+mTJliR44cOeU5UCDRqmpqzNauNXvxxeP/cluYkNASdaE91LeWWguAjqO1aoLHzCx4xyFPT3l5uWJiYuT3+zl3BoCkjlUXOtJaADRfa9WEJn9sHAAAAEIfIRAAAMCFCIEAAAAuRAgEAABwIUIgAACACxECAQAAXIgQCAAA4EKEQAAAABciBAIAALgQIRAAAMCFCIEAAAAuRAgEAABwIUIgAACACxECAQAAXIgQCAAA4EKEQAAAABciBAIAALgQIRAAAMCFCIEAAAAuRAgEAABwoYhgT+B0mJkkqby8PMgzAdBe1NeD+voQyqhxAE7UWvUtJEPgoUOHJEmpqalBngmA9ubIkSOKiYkJ9jSahRoHoDEtXd9CMgTGxcVJkgoKCkK+2JeXlys1NVWFhYXy+XzBnk6zsJb2yS1rMTMdOXJEKSkpQZpdy6HGtU+spf3pKOuQglPfQjIEhoUdP5UxJiYm5Hd6PZ/Px1raIdbSPn3fWkI9MNWjxrVvrKX96SjrkNq2vnFhCAAAgAsRAgEAAFwoJEOg1+vV3Llz5fV6gz2VZmMt7RNraZ860lpOpiOtk7W0Tx1lLR1lHVJw1uKxjnA/BQAAADRJSB4JBAAAQPMQAgEAAFyIEAgAAOBChEAAAAAXIgQCAAC4UEiGwKeeekpnnXWWOnfurIyMDG3cuDHYUwowf/58XXjhheratasSEhJ07bXXKj8/P2DMT37yE3k8noDttttuCxhTUFCgMWPGqEuXLkpISNCsWbNUU1PTlkvR73//+wbz7N+/v9NfUVGh7OxsxcfHKzo6WuPGjVNJSUm7W4cknXXWWQ3W4vF4lJ2dLal975N3331XV111lVJSUuTxePTaa68F9JuZ5syZo+TkZEVFRSkrK0t79uwJGHP48GFNmjRJPp9PsbGxuvnmm3X06NGAMR9//LGGDx+uzp07KzU1VQ899FCbrqW6ulqzZ8/WoEGDdMYZZyglJUU33nijvvjii4DnaGxfLliwoM3X0hqob22nI9U3KXRrHPUtiPXNQszSpUstMjLS/vM//9N27txpt9xyi8XGxlpJSUmwp+YYNWqULVq0yHbs2GHbtm2zn/70p5aWlmZHjx51xlx22WV2yy23WFFRkbP5/X6nv6amxgYOHGhZWVm2detWW7VqlXXv3t1ycnLadC1z5861c889N2CeX375pdN/2223WWpqqq1evdo2b95sF110kV188cXtbh1mZqWlpQHryM3NNUm2du1aM2vf+2TVqlX2r//6r7Z8+XKTZCtWrAjoX7BggcXExNhrr71mH330kV199dWWnp5ux44dc8ZceeWVdv7559v69evtvffes3POOccmTpzo9Pv9fktMTLRJkybZjh077KWXXrKoqCh79tln22wtZWVllpWVZS+//LJ98sknlpeXZ8OGDbMhQ4YEPEevXr1s3rx5AfvqxN+vtlpLS6O+Ud+aI1RrHPUtePUt5ELgsGHDLDs72/m+trbWUlJSbP78+UGc1cmVlpaaJFu3bp3Tdtlll9n06dO/9zGrVq2ysLAwKy4udtoWLlxoPp/PKisrW3O6AebOnWvnn39+o31lZWXWqVMnW7ZsmdO2e/duk2R5eXlm1n7W0Zjp06fb2WefbXV1dWYWOvvku4Wlrq7OkpKS7OGHH3baysrKzOv12ksvvWRmZrt27TJJtmnTJmfMm2++aR6Pxw4ePGhmZk8//bR169YtYC2zZ8+2fv36tdlaGrNx40aTZAcOHHDaevXqZY8++uj3PiYYa2kJ1DfqW0sKxRpHfWvb+hZSbwdXVVVpy5YtysrKctrCwsKUlZWlvLy8IM7s5Px+vyQpLi4uoP2FF15Q9+7dNXDgQOXk5Ojbb791+vLy8jRo0CAlJiY6baNGjVJ5ebl27tzZNhP//+3Zs0cpKSnq3bu3Jk2apIKCAknSli1bVF1dHbA/+vfvr7S0NGd/tKd1nKiqqkpLlizRTTfdJI/H47SHyj450b59+1RcXBywH2JiYpSRkRGwH2JjYzV06FBnTFZWlsLCwrRhwwZnzIgRIxQZGemMGTVqlPLz8/X111+30Woa8vv98ng8io2NDWhfsGCB4uPjNXjwYD388MMBb1m117WcDPWN+taSOkqNo761bn2LaNbs29hXX32l2tragB9QSUpMTNQnn3wSpFmdXF1dnWbMmKFLLrlEAwcOdNp/8YtfqFevXkpJSdHHH3+s2bNnKz8/X8uXL5ckFRcXN7rO+r62kpGRocWLF6tfv34qKirS/fffr+HDh2vHjh0qLi5WZGRkgx/exMREZ47tZR3f9dprr6msrEy/+tWvnLZQ2SffVf/ajc3txP2QkJAQ0B8REaG4uLiAMenp6Q2eo76vW7durTL/k6moqNDs2bM1ceJE+Xw+p/23v/2tLrjgAsXFxemDDz5QTk6OioqK9Kc//cmZb3tbyw+hvlHfWlJHqXHUt9atbyEVAkNRdna2duzYoffffz+gferUqc7XgwYNUnJysq644gp99tlnOvvss9t6mt9r9OjRztfnnXeeMjIy1KtXL73yyiuKiooK4sya57nnntPo0aOVkpLitIXKPnGL6upqXX/99TIzLVy4MKBv5syZztfnnXeeIiMjdeutt2r+/Pkd4jNEQwX1rf2ixrVv7aW+hdTbwd27d1d4eHiDq7NKSkqUlJQUpFl9v2nTpmnlypVau3atevbsedKxGRkZkqS9e/dKkpKSkhpdZ31fsMTGxqpv377au3evkpKSVFVVpbKysoAxJ+6P9riOAwcO6O2339avf/3rk44LlX1S/9on+71ISkpSaWlpQH9NTY0OHz7cLvdVfYE8cOCAcnNzA/5KbkxGRoZqamq0f/9+Se1rLaeK+hb8/dMR6pvUsWoc9a1161tIhcDIyEgNGTJEq1evdtrq6uq0evVqZWZmBnFmgcxM06ZN04oVK7RmzZoGh20bs23bNklScnKyJCkzM1Pbt28P+MGu/2EZMGBAq8z7VBw9elSfffaZkpOTNWTIEHXq1Clgf+Tn56ugoMDZH+1xHYsWLVJCQoLGjBlz0nGhsk/S09OVlJQUsB/Ky8u1YcOGgP1QVlamLVu2OGPWrFmjuro6538EmZmZevfdd1VdXe2Myc3NVb9+/dr0rZL6Arlnzx69/fbbio+P/8HHbNu2TWFhYc5bQu1lLU1BfQv+71JHqG9Sx6px1LdWrm9NvpQkyJYuXWper9cWL15su3btsqlTp1psbGzA1UzBdvvtt1tMTIy98847AZd4f/vtt2ZmtnfvXps3b55t3rzZ9u3bZ6+//rr17t3bRowY4TxH/aX6I0eOtG3bttlbb71lZ555ZpvfeuDOO++0d955x/bt22f//Oc/LSsry7p3726lpaVmdvwWCmlpabZmzRrbvHmzZWZmWmZmZrtbR73a2lpLS0uz2bNnB7S3931y5MgR27p1q23dutUk2Z/+9CfbunWrc0XZggULLDY21l5//XX7+OOP7Zprrmn0FgqDBw+2DRs22Pvvv299+vQJuIVCWVmZJSYm2g033GA7duywpUuXWpcuXVr8FgonW0tVVZVdffXV1rNnT9u2bVvA70/9lXAffPCBPfroo7Zt2zb77LPPbMmSJXbmmWfajTfe2OZraWnUN+pbc4VijaO+Ba++hVwINDN78sknLS0tzSIjI23YsGG2fv36YE8pgKRGt0WLFpmZWUFBgY0YMcLi4uLM6/XaOeecY7NmzQq4X5OZ2f79+2306NEWFRVl3bt3tzvvvNOqq6vbdC0TJkyw5ORki4yMtB49etiECRNs7969Tv+xY8fsN7/5jXXr1s26dOliP//5z62oqKjdraPeP/7xD5Nk+fn5Ae3tfZ+sXbu20Z+pyZMnm9nx2yjcd999lpiYaF6v16644ooGazx06JBNnDjRoqOjzefz2ZQpU+zIkSMBYz766CO79NJLzev1Wo8ePWzBggVtupZ9+/Z97+9P/b3OtmzZYhkZGRYTE2OdO3e2H/3oR/bggw9aRUVFm6+lNVDf2k5Hq29moVnjqG/Bq28eM7OmHTsEAABAqAupcwIBAADQMgiBAAAALkQIBAAAcCFCIAAAgAsRAgEAAFyIEAgAAOBChEAAAAAXIgQCAAC4ECEQAADAhQiBAAAALkQIBAAAcKH/D4wWoYb9OawnAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "import matplotlib.pyplot as plt\n", + "# Create a list of frame numbers corresponding to each point\n", + "frame_numbers = queries[:,0].int().tolist()\n", + "\n", + "fig, axs = plt.subplots(2, 2)\n", + "axs = axs.flatten()\n", + "\n", + "for i, (query, frame_number) in enumerate(zip(queries, frame_numbers)):\n", + " ax = axs[i]\n", + " ax.plot(query[1].item(), query[2].item(), 'ro') \n", + " \n", + " ax.set_title(\"Frame {}\".format(frame_number))\n", + " ax.set_xlim(0, video.shape[4])\n", + " ax.set_ylim(0, video.shape[3])\n", + " ax.invert_yaxis()\n", + " \n", + "plt.tight_layout()\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "id": "aec7693b-9d74-48b3-b612-360290ff1e7a", + "metadata": {}, + "source": [ + "We pass these points as input to the model and track them:" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "09008ca9-6a87-494f-8b05-6370cae6a600", + "metadata": {}, + "outputs": [], + "source": [ + "pred_tracks, __ = model(video, queries=queries[None])" + ] + }, + { + "cell_type": "markdown", + "id": "b00d2a35-3daf-482d-b40b-b6d4f548ca40", + "metadata": {}, + "source": [ + "Finally, we visualize the results with tracks leaving traces from the frame where the tracking starts.\n", + "Color encodes time:" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "01467f8d-667c-4f41-b418-93132584c659", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Video saved to ./videos/queries_pred_track.mp4\n" + ] + } + ], + "source": [ + "vis = Visualizer(\n", + " save_dir='./videos',\n", + " linewidth=6,\n", + " mode='cool',\n", + " tracks_leave_trace=-1\n", + ")\n", + "vis.visualize(\n", + " video=video,\n", + " tracks=pred_tracks, \n", + " filename='queries');" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "fe23d210-ed90-49f1-8311-b7e354c7a9f6", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "HTML(\"\"\"\"\"\")" + ] + }, + { + "cell_type": "markdown", + "id": "87f2a3b4-a8b3-4aeb-87d2-28f056c624ba", + "metadata": {}, + "source": [ + "## Points on a regular grid" + ] + }, + { + "cell_type": "markdown", + "id": "a9aac679-19f8-4b78-9cc9-d934c6f83b01", + "metadata": {}, + "source": [ + "### Tracking forward from the frame number x" + ] + }, + { + "cell_type": "markdown", + "id": "0aeabca9-cc34-4d0f-8b2d-e6a6f797cb20", + "metadata": {}, + "source": [ + "Let's now sample points on a regular grid and start tracking from the frame number 20 with a grid of 30\\*30. " + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "c880f3ca-cf42-4f64-9df6-a0e8de6561dc", + "metadata": {}, + "outputs": [], + "source": [ + "grid_size = 30\n", + "grid_query_frame = 20" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "3cd58820-7b23-469e-9b6d-5fa81257981f", + "metadata": {}, + "outputs": [], + "source": [ + "pred_tracks, __ = model(video, grid_size=grid_size, grid_query_frame=grid_query_frame)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "25a85a1d-dce0-4e6b-9f7a-aaf31ade0600", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Video saved to ./videos/grid_query_20_pred_track.mp4\n" + ] + } + ], + "source": [ + "vis = Visualizer(save_dir='./videos', pad_value=100)\n", + "vis.visualize(\n", + " video=video,\n", + " tracks=pred_tracks, \n", + " filename='grid_query_20',\n", + " query_frame=grid_query_frame);" + ] + }, + { + "cell_type": "markdown", + "id": "ce0fb5b8-d249-4f4e-b59a-51b4f03972c4", + "metadata": {}, + "source": [ + "Notice that tracking starts only from points sampled on a frame in the middle of the video. This is different from the grid in the first example:" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "f0b01d51-9222-472b-a714-188c38d83ad9", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "HTML(\"\"\"\"\"\")" + ] + }, + { + "cell_type": "markdown", + "id": "10baad8f-0cb8-4118-9e69-3fb24575715c", + "metadata": {}, + "source": [ + "### Tracking forward **and backward** from the frame number x" + ] + }, + { + "cell_type": "markdown", + "id": "8409e2f7-9e4e-4228-b198-56a64e2260a7", + "metadata": {}, + "source": [ + "CoTracker is an online algorithm and tracks points only in one direction. However, we can also run it backward from the queried point to track in both directions: " + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "506233dc-1fb3-4a3c-b9eb-5cbd5df49128", + "metadata": {}, + "outputs": [], + "source": [ + "grid_size = 30\n", + "grid_query_frame = 20" + ] + }, + { + "cell_type": "markdown", + "id": "495b5fb4-9050-41fe-be98-d757916d0812", + "metadata": {}, + "source": [ + "Let's activate backward tracking:" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "677cf34e-6c6a-49e3-a21b-f8a4f718f916", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Video saved to ./videos/grid_query_20_backward_pred_track.mp4\n" + ] + } + ], + "source": [ + "pred_tracks, __ = model(video, grid_size=grid_size, grid_query_frame=grid_query_frame, backward_tracking=True)\n", + "vis.visualize(\n", + " video=video,\n", + " tracks=pred_tracks, \n", + " filename='grid_query_20_backward',\n", + " query_frame=grid_query_frame);" + ] + }, + { + "cell_type": "markdown", + "id": "585a0afa-2cfc-4a07-a6f0-f65924b9ebce", + "metadata": {}, + "source": [ + "As you can see, we are now tracking points queried in the middle from the first frame:" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "c8d64ab0-7e92-4238-8e7d-178652fc409c", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "HTML(\"\"\"\"\"\")" + ] + }, + { + "cell_type": "markdown", + "id": "fb55fb01-6d8e-4e06-9346-8b2e9ef489c2", + "metadata": {}, + "source": [ + "## Regular grid + Segmentation mask" + ] + }, + { + "cell_type": "markdown", + "id": "e93a6b0a-b173-46fa-a6d2-1661ae6e6779", + "metadata": {}, + "source": [ + "Let's now sample points on a grid and filter them with a segmentation mask.\n", + "This allows us to track points sampled densely on an object because we consume less GPU memory." + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "b759548d-1eda-473e-9c90-99e5d3197e20", + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "from PIL import Image\n", + "grid_size = 120" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "14ae8a8b-fec7-40d1-b6f2-10e333b75db4", + "metadata": {}, + "outputs": [], + "source": [ + "input_mask = '../assets/apple_mask.png'\n", + "segm_mask = np.array(Image.open(input_mask))" + ] + }, + { + "cell_type": "markdown", + "id": "4e3a1520-64bf-4a0d-b6e9-639430e31940", + "metadata": {}, + "source": [ + "That's a segmentation mask for the first frame:" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "4d2efd4e-22df-4833-b9a0-a0763d59ee22", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 22, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAigAAAFHCAYAAACLR7eXAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAA9hAAAPYQGoP6dpAACRIUlEQVR4nOz9e6xs2Vnejf7GGPNSt1Vr7bWvfXf72m6w4YsN9j45JycCBweZKAgjkcgCB1mJhNpWwBIijoDIJMEIpJCgAI6iiERKHCL+IFGMHGI5YBTcMY6Rv+PYHw630G137/tadZ2XcXnPH2PMWmu3G0zbhl5tj59V3ntVzaqaVbu757Pf93mfV4mIkMlkMplMJnOG0M/3CWQymUwmk8k8kyxQMplMJpPJnDmyQMlkMplMJnPmyAIlk8lkMpnMmSMLlEwmk8lkMmeOLFAymUwmk8mcObJAyWQymUwmc+bIAiWTyWQymcyZIwuUTCaTyWQyZ44sUDKZTCaTyZw5nleB8rM/+7O86EUvYjQa8brXvY7f+q3fej5PJ5PJZDKZzBnheRMo/+E//Afe+c538g/+wT/gt3/7t/m6r/s63vjGN3Ljxo3n65QymUwmk8mcEdTztSzwda97Hd/wDd/AP//n/xyAEAIPPPAA73jHO/h7f+/vPR+nlMlkMplM5oxQPB9v2vc9H//4x3nXu961u09rzRve8AYef/zxzzu+6zq6rtv9HELgzp07nD9/HqXUn8s5ZzKZTCaT+dIQEVarFffeey9a/8lNnOdFoNy6dQvvPZcvX77r/suXL/M7v/M7n3f8e97zHt797nf/eZ1eJpPJZDKZP0OefPJJ7r///j/xmBfEFM+73vUuFovF7vbEE08836eUyWQymUzmi2Rvb+8LHvO8VFAuXLiAMYbr16/fdf/169e5cuXK5x1f1zV1Xf95nV4mk8lkMpk/Q/409oznpYJSVRWvec1r+NCHPrS7L4TAhz70Ia5evfp8nFImk8lkMpkzxPNSQQF45zvfyVvf+lZe+9rX8o3f+I3803/6T9lsNnzv937v83VKmUwmk8lkzgjPm0D5ru/6Lm7evMmP/uiPcu3aNb7+67+e//Jf/svnGWczmUwmk8l89fG85aB8KSyXS/b395/v08hkMplMJvNFsFgsmM/nf+IxL4gpnkwmk8lkMl9dZIGSyWQymUzmzJEFSiaTyWQymTNHFiiZTCaTyWTOHFmgZDKZTCaTOXNkgZLJZDKZTObMkQVKJpPJZDKZM0cWKJlMJpPJZM4cWaBkMplMJpM5c2SBkslkMplM5syRBUomk8lkMpkzRxYomUwmk8lkzhxZoGQymUwmkzlzZIGSyWQymUzmzJEFSiaTyWQymTNHFiiZTCaTyWTOHFmgZDKZTCaTOXNkgZLJZDKZTObMkQVKJpPJZDKZM0cWKJlMJpPJZM4cWaBkMplMJpM5c2SBkslkMplM5syRBUomk8lkMpkzRxYomUwmk8lkzhxZoGQymUwmkzlzZIGSyWQymUzmzJEFSiaTyWQymTNHFiiZTCaTyWTOHM9ZoPzGb/wGf+2v/TXuvfdelFL8x//4H+96XET40R/9Ue655x7G4zFveMMb+N3f/d27jrlz5w5vectbmM/nHBwc8La3vY31ev0lfZBMJpPJZDJfOTxngbLZbPi6r/s6fvZnf/ZZH//Jn/xJfuZnfob3vve9fPSjH2U6nfLGN76Rtm13x7zlLW/hU5/6FB/84Ad5//vfz2/8xm/wd/7O3/niP0Umk8lkMpmvLORLAJBf/uVf3v0cQpArV67IT/3UT+3uOz4+lrqu5d//+38vIiKf/vSnBZCPfexju2M+8IEPiFJKPve5z/2p3nexWAiQb/mWb/mWb/mWby/A22Kx+ILX+i+rB+UP//APuXbtGm94wxt29+3v7/O6172Oxx9/HIDHH3+cg4MDXvva1+6OecMb3oDWmo9+9KPP+rpd17FcLu+6ZTKZTCaT+crlyypQrl27BsDly5fvuv/y5cu7x65du8alS5fuerwoCg4PD3fHPJP3vOc97O/v724PPPDAl/O0M5lMJpPJnDFeEFM873rXu1gsFrvbk08++XyfUiaTyWQymT9DvqwC5cqVKwBcv379rvuvX7++e+zKlSvcuHHjrsedc9y5c2d3zDOp65r5fH7XLZPJZDKZzFcuX1aB8vDDD3PlyhU+9KEP7e5bLpd89KMf5erVqwBcvXqV4+NjPv7xj++O+W//7b8RQuB1r3vdl/N0MplMJpPJvEApnusT1us1v/d7v7f7+Q//8A/5xCc+weHhIQ8++CDf//3fzz/6R/+Il73sZTz88MP8yI/8CPfeey/f/u3fDsArX/lK/upf/av87b/9t3nve9+LtZa3v/3t/I2/8Te49957v2wfLJPJZDKZzAuYP+VE8Y5f+7Vfe9aRobe+9a0iEkeNf+RHfkQuX74sdV3LN3/zN8tnPvOZu17j9u3b8jf/5t+U2Wwm8/lcvvd7v1dWq9Wf+hzymHG+5Vu+5Vu+5dsL9/anGTNWIiK8wFgul+zv7z/fp5HJZDKZTOaLYLFYfEE/6QtiiieTyWQymcxXF1mgZDKZTCaTOXM8Z5NsJvPViFIACqXiTSuFNhqjDShABFMUlEVBVZbUdUVhChSglaYoDIUx6PT88Wi0+1lEEBEUihA81jmarqO3DusdPngkCJ21WOfw3uOcI6Tn+RAIPux+RoTYtxVeeA3cTCaTiWSBksn8CSiliNokihKlNcboJEQqqrrCGI3Rmsl4zGwyYT7bY282Y28yYTaaMB2NmIzHTEdj6iKKllFZMa6SiBEghCRSYNu23Do+5mi15GizpOlbmr5jud2w2m7ZNC3btqWzFuc9Xd9j+/h77wMhvZYkgXK3aMlkMpkXBlmgZDLPgoolk7srJlpjCkNVloxHI6aTSbyNR0zHYw7ncy7tn+PSuQucnx+wN5qwNxqzN5kyHY2ZjcYYUfRti7eWQmlKbdAC4jzBeZRShHlgu9+xtR1r27Potxw3C442S24vF9xcLri9XrFotmzalvW2oWlbuq7HOodzbidUgggSAgFQWaRkMpkXEFmgZDKnGATJTpjoKEwKYyiKglFdM5tOOHdwwKXzF7h87gKXDg64tH/AvQeHXNw7YK8cMzIlJRotQl1WTMZjlBPctmW7svjeopWiNAWVKcAGnPUoEYrCcFGPkXpKmCo6JaylYys9q3bLjcUR15dHXNsuudVsuLlYcLxcslyvWW8b2raj63u6vsc5h3MeHzwhxEpKrqZkMpkXAlmgZDKJoYWjtcJojTbRN1JWJeO6ZjqZcDDf456Ll7j/4hUevvdBXnLpfi5P5+yVNRMx1F5RBQVdIDgX/SbUGK/w1uGbQH+rQXeWqqqYTmuqomC53EBvCUFQk5rJfIoEwbWeutBMqgnBTHHlnBeNL7C92LOi44bdcm295NZiwfU7t7mxOGaxXbNarzlaLlltNmy3TRIrJ0JFJJy0fzKZTOYMkgVK5queoVpiUgvnmdWSvdmUg9keF8+d46FLV3jkgRfx4sv3c3HvAgfVlKIPaOsxnaP0oDpH6C3OOkBo7mwJ3iPOowO44zXbzYayLOnrNSLCcrnE+wAKdGmYHszRlWEyGXNwMKddNPjgqbRhVhQcVlP8+IAH5rA+51lebFk2W47aNUftipurOzxx4zqfu32TG0d3OF4uadqOtovVlcFsu/OrZLGSyWTOGFmgZL6qGdo4xkRvSVVVjOqayXjE+YMD7rl0ifsvXuKhK/fwwPlL3HNwngvTffZGU2bVlJGUeN8S+o5CF+i2Z3Nngd22uL7H9j2bdkPTtzjrmE5mKOBzTz2FTj4X7wNd34ECUQKFZnR7jClL7r//AZQyrBZHlCgm9Yi6rinrisoJRVVQ6YI9mXJlMqUfn8dqz8Y23Lq84NrmiD+6fYMnbl3j1nrBneWSO4sFi/WabdPS31VZORErmUwm83yTBUrmq5LTPpPCGOq6YjaZMJtO2ZvNuHjuHA8/+AAvuf9BXnL/g9x38TJTU7FXjBgVNVVRMipqigBiNGo+BYFw7Q7dZ7esbt+m3zZYZ1lsFtxeH9H1lrIsCUFomoa6qijKEus91jucs3hxmLKgamrKoqLZbtibH1BVFXvTKdqUWLfBL5agwIxK1GhEOZpwMJ0RKoMVj9cV9+3NeNn0Eo/u38dT9xxzfXvMZ49u8sTN6zx1dJtbi2OOjhestxvarsdad6oFlFVKJpN5fskCJfNVxzOnckZVzXxvxvnDQy6cO8fFw3M8/MCDvPqVj/CSB17Epf3zTEcjjDJUpqIwFVoDXQtdjyoLlNZxTLirqKeG42tbbL9msTjm1uqI43ZN6yweYo6J9+zNZhhfsNluaW1PCB5BYhWnHDMuJ/TO0tmG2XSOBGEy22O72dCsV4zKkvneHmOBsihREpA+UAaolcGLplSGelJzMNnnof0rvHh+hScP7uGp9RFPHd3k9556gut3bnNnuWS92dL2Pc46fKqmZDKZzPNFFiiZryqUOpnKKcuCuq6Zz2bcc/kSD917Hy99+GFe8ZKX8LIXv5grFy8yq8fUKgaw6bJEjI5TMM4RgsfbjrIIKBTSdrTr2wRaXGjo+zXrzR2absW237Kxls57euuQEFi1W0II9N7F7VlKoRSYrqUuOiZ1y8RN6Z2l7VqOl0dcu/E5yqLG6JL53ozRdMJeVVFIYHH9OmjDaDShNAUaqIzCGMVEleyrgsPJiHvKQ47mK66fO+bBcxf4g9uf4/9ce5rP3rjBncWKTdPQ9zZ6VCRXUzKZzPNDFiiZrxqUUilkrYw+k8mY+d4e91++zCtf/FJe9fJHeOXLX869l64wn+0xqkeUxqCVoAoFpRCwiHMosQgt4hu8dQBoZ+nXt1mtb7PeHrFY3mbbrdi0S9a2ZdNbei+7nJKTWV+N0gatDQK4IAwPewm0XUNhSkpTUI9GTEZTZuM9RGmUKWibLTevLSnKioNz5/FdDzpmqlAYTF1hqoqx0kyNMDdjDsuaS9WUe6ZzXnT+Ir93/iKfOXiCP7x2jadv3+F4uaLtutT2Cbu020wmk/nzIguUzFc8SoFSMe21qkqmkwnn9ve5cP48Vw7P8+qXP8LrXvkqXnbPgxzs71PXEwqpML1GaVCVgaqCKmBsQ3A90rXodgvdBrfdYr1FbVrazR2Oj66xWt1m2Ryz6bc0vsMGS2d7OutjNH0ISAgIKp6bKjC6QCkDSuHxtLbHBZ+yUiyVqUBH30xdjXDWcv36dSqjqcuCw8PzzGZ7tG1P13Voral09MuU9QhBY7yjUI5SVVSimGjDvqm5XB/wwPwiv3v4WT79uSf5g2tPc/v4mPV2S9t1yUib2z6ZTObPjyxQMl/RnLR0NFVZMZ1OuHB4yMMPPMgrH34xL7vvQV718Mu5b++QuRlRWEVhBOM9ITiceKgURo0xqkR5QTlP6DqKtkXbDtuuadYL1k9fp11tWW5us2jusLZrtrbHepd2+QSc63HeI4G04CdO8gQVKxUKhdYGKJEQ8EYTKkErRaFlF2HfdFs22y3zK1fYm9YEZ1HGsFmv2W62aGMoywpCQAUhOEcAnLOId2ilmKXslpkrOChGXKwnXKrmnB8fcDib8/s3nuLpmzc5Wi7ZNC3WWpzP1ZRMJvPnQxYoma9YhpZOYQrqumI6HnPx8JAXP/Qi/q9Hvpave/gVPHjuMheqOdO+xHiN8lGcCJa2bRAV6GwDT3oODqfoEQRxNMtj3GaNaxu6dsN2cYvF0Q2Wtxcst0ds/ZpeOhwejwcRFAHBE4JF0OjhXz+J7ZwgMQdFfFwAWBSaSldAABVASWzbKA3GYKoSlKZzjvneDFMWrO4sabdNnBYqLT4lyU72ppiyQJyl7zu8dVRV3COkvWNE4HwxohwdcnBYcqmacGl/zmdmE/7w6WvcODpivUmBbz5N+oSQE2kzmcyfGVmgZL4iOamcFIxGFbPJlAvnDnjp/Q/x2le8mte+7Ou5f3qReaipNwpDQJcBbTz9qsG5js1mzXxvgtssOLr+OfqRpp4YTKGwtmXrGpzrsb6jaZYst0tu3rlJ2zV0OHo8vQo4HWLlAQ8EYtD8My/tQlQnEFQACYRggIBSEttAxqTFf6C0QYywajZMKJkw4Wi1BuswGtpmg+07KtvhupZ2u0IbhVY6jidrRde1u3d3weMVlBK4oArG00ucG085nEyY1TX/uyh4Wh+x2rV8HA4gJdJmMpnMl5ssUDJfcQxjxEO+yd5sxuUL53n5Qw9x9eVfz2se+hrumVxi6ioqr2LLxjZs+w3e9xit8MGyPLpDfbAPdku13HD82Tv40FKUGlUpOmOx2hLEY6Vn3W7pvKXFYg10Cjrl6fE4CXiJIkQrTRDYiRI1nPgp6aKilAlAiB+Koiyp6pqqGjEajQgErLfM5+fZ25vTtpbFrSO6ZosCyrLEOUtVWSpfUJaGoGJirKpqFFCPRpRVibWWzXaL73sUcGA0s9GMvXMPMzMVI11RFhVP3bnNarOhaVqwFu8gkEVKJpP58pMFSuYrip04KQpGoxGHB3MeuOcKL3/wQb7xkVfzF+55BZeLA+o2MFYebQP9akOzOqJZHeP6lqoylJWmu3Obo82S4Dqc6+iWK5rtgk23RmqNm4AeK6QEKaF1HX3h6U3AGeiNYLXHK48LcWRXoSkKQ2sdQStMYTh/4QJXrtzDE0/+Eb23LJdLtCiUBkegRLDBE7Simk44mB+idPxXtxpVdNby+3/wf2i3DdJ2FAqMMbjg0FbT2Y7SluzP51hrWa1WGGMoy5LJdMJkOsEYA0ChNOIDJgQq8dw/HlHP7qW+T1HVFaNJzbVbd9KenxMDbcgG2kwm82UmC5TMVwy7tk5hGNcjDs8d8OL77uPrXvZyvvFlj/Lycw9w3k+YWqFIo8J227E9OmZ9dIfl4g5d3wAeUwhd13LTd/Rui7MdVWXQReCoWdNuOvRGUU9r9LzAjcCJp1FRoFgjOCN4DV4JVhxBCaKhR9PXBdOL53jkVY/yspe/lKPjI8qHDrh46QJP/v4f8nv/9/+DbF1sB2nFdDajGo8YT6YcHJ7n9u3baG04Oj7m9q1bVKakNJpCCS4EnHe0ycwqIiituXXndvw5CKYoKMuCVbPFLOLeoVFdUZqCojQopUErwrZl5DperueMzj/E4WjK/55c4//cvM61W3dYrD5/HDmTyWS+HGSBkvmK4PPEycE+L7n/AV7zyKO87pGv5WvOP8hspRk7w8QUGA2u7VmvN2yOl3TNlq5r6boWpQJu2xLE4X30mIh4+rbH0rJxPT0eOsda9+h6DEphxdOJR5TgNNh0c0rhtSYUEBT0WnHPwy/mr37HX8Ma+PBv/gbLxQJtNNfcivFEM7nnHO31Baob9vV4ppMps8ket27d4vj4mOl0SvCBK5cuMa5H3Ll5E28tKgREAgASBJ/8Lial3Qbvd9uay7rGlIbReMxemFEUBVqB0gq0wjYd3WaNqRT3z8ZMDu9nf7LHdDyhMAVaKxbLFQ0dvXOEFO6WyWQyXypZoGRe8ChFausYRnWMrX/gyj285uWv5C+++Gt4uLrAeOEwDYiDTlqUgOt7NsfHrJd3sP0W5zqCWBQBFzq8tzjf0bsWpYW2b2hCRyghFCr6SoJDS4n2mj44rA8E57Cux4snoPABPJoAeAXFdMSVhx7AjMc8/vHf4qOf+ATrzYogglEKbT0XJ3tcqubUIoiC/XMHlFXJk597gr7tMcqgRDCF4fqNG0gQRkqzP52A92y3W5xzYDRBxdZS33cASAjURQUqoLXCOct6bWnaJrV9xrR9j9JxqmlUFPiugxAYlcI9o5r+/OWYgJsqMigFbYcFyCIlk8l8GcgCJfOCZ5hwqdNCvSvnL/DoQy/mNS96hBeNLzBbBQrfQRdw1mOHCPcQ8M2WvlnR2S3OW7y3BHFst0s628WxYBzW97S2YeMapDSUe2O8BhzQ9XgKbHDY3uGto3Muvk8AERAZxoMV07197ixW/NcP/Teeun2TgEJXFbbr6HtLGWCx3DAaGc6rCUVVMd3bY7VesdqsICgqU2L7Hu81zgWunD/Pw/fei+06nvrs5xAJ1HWN12AljgX3vcPZKCoCQq1ryuDoXU/TNti0C2gynYJS7M3nTCZTWoFiNMVtG4qVZdoK50aKF527iLOOvu/TZBJAzGkRn9s9mUzmSyMLlMwLGq0URuv4N//RmMvnL/Cql76c//fLvpaXjC8y2Sj0psdojUHR9Q3b5RpEKMsK77Ysl0csVscoozClwdqG1XpB17doA0F7OtvG8V8N1jls02AmJYgmdB5RAYcQnMf6QBsCIcgu4+Tk/4U7R3f4gxvX6LTQI/RiccERfEAJFKIYm4JSaUTF5z311FOE3lOXNcYYjClAx/bPpKqojWa9XrE8XiAI8/k+ZVXS2p7rR7dpmoa+66IfRSm27RZjDOFOwPn4/spoRCn0aoEpC+rVgnMHB1jnKMqSixcuMp4eELYbJosj9sfw4sOLKTwuTgeFENL+HpeNs5lM5ksiC5TMC5ad76QsmIxHXDw85JGHXsT/65Wv4hX7V5g3inLrkKaD8QinhPVqyc2nnqJpt1hrsa5nuVzSdFs8sYIg4rCuw9oeUR6vHA6PLjXaKFSh6MXSdT1al5TUIIKXgJVAJwGbouwV4PHJICsICiFQVybWZsSjHJiUbaJ1wQTD4WhO2cfANmt7WhqMGKQQlDGUVU1V1agg4D3L5ZLVYkGwjsIYFv2Ctm3YdNEzEyQtNBTBe4+XgNGaIB4f4uJDUQpVGLRW0CvW2y1Hx0cURUFRVay3G+YHB2hj0L0wsR1hYriyN6NxF6I4GQRKEGwsHeUwt0wm80WRBUrmBckgTkxRMK5HXDh3jpc98ADf+PKv4dGDe5g1UKwttD1uu2SxWbHYrlke32G7PqbrWpq2Y73dslqt6H2PEBAlaAUBTwgepQIeRyhiwqvuwSCIAWUUXhzeSswXUYKXeBOlCAp88FgckvJHHApRiqJU7BcjyuAxDXQ2Tt4oFJPRmAIVtyUH6HtLrT1lUaK1iQsDJzNmoylFMr067xDvMaYkCGw2G/quo5eAiMYjBB+rGj54vAgqxFFmpRUBhfUO31mUVqiUz2K0ift7nMN1PavVimo6YTKdUmEojrfMjOXe6Qx9JVpRQpBd9cQRPS+ZTCbzXMkCJfOC4yQl1lBXJfO9GS+69z5e+7JHedX5+zjXwHgTCKuevlliXUvnA9t2S+97nBKC0VglrPuWjevobRfFSaEpCo0oYqXClGhlsOKwEvACIWhiakg0mVrvsX0AreIiQAnpoi9YCVii0RUCQRTiQRtNYYSZLlCmQKRHHITgQDk6GrQTkAJrLFJDXddMp1PG9Zi6KJlWMYROBG7dugVGKLVBAyhNNRrjJNCJp3U9m+0S16So+jTlE8NrPT74eD+C1rGKorSOrRt/8t2HPlaKhEA9GVEqzVQMShTzw0uMqxoRCHIiUhDJptlMJvOc0c/l4Pe85z18wzd8A3t7e1y6dIlv//Zv5zOf+cxdx7Rty2OPPcb58+eZzWa8+c1v5vr163cd88QTT/CmN72JyWTCpUuX+MEf/ME4cZDJfAGiOIm+k6IoGI9GXDg4xyvufZCvufwQ53zBhXJGacFtN3TbNcF6Dvbm7E/3UEpTjafU0xmiNUVdMdufc3jpIucuXaCejAnGELTCK3AavNL4VPlQSiFaEYLCB4Uog2AIHpwLOAc+GWODgEWwCpwCpwSHxwUbhYhYNJ4KRak0JvlP1LDtWBQh3QpTMRpNGY+njKsR916+wkMPPcTBwQEKoTSGSmu0BMRZ6sJw/twB916+zOXzh+ztTSlKg+iAE4vHYYOl6Rs2zZZt29DZHuscvbP0zqVqUMB6T+csrevpnMUFz3q54ujmbfq2xSCUXhi1jnsmMx68fIlze3uM65qiMGit4w6hTCaTeQ48pwrKhz/8YR577DG+4Ru+Aeccf//v/32+5Vu+hU9/+tNMp1MAfuAHfoBf+ZVf4Zd+6ZfY39/n7W9/O9/xHd/Bb/7mbwIxz+FNb3oTV65c4SMf+QhPP/003/M930NZlvz4j//4l/8TZr6iiEmxaWqnrtjfm/PQ5Xt5xeH9HNiSOoBtW9bLFV27xjoLSmOPj9m0W7ptgxiDdTZuFYY4TqsVRVWijMZvN7RdHBOW3iPiQQm61BS6oEQnLwfQC1IqlNJIirMPXvC9o3cemzbwKGKLx6fY+1IphNieEQlpVFozXMdjwJpCKUNRVIxGE0b1hMl4yovueZD5ZMpisSA4T3CeQmlc8Ni+xXYdLjiOlkcI0PQty25DY7f0rsen9xOEkM4nLgAMoBQKUEZTKYkbnAkoD8o5TPCgNVop8AqjhbIYMatHLJuGcSi5b2+f5eGGputOFgum1lcmk8n8aVHyJdjsb968yaVLl/jwhz/MX/pLf4nFYsHFixd53/vex3d+53cC8Du/8zu88pWv5PHHH+f1r389H/jAB/i2b/s2nnrqKS5fvgzAe9/7Xn7oh36ImzdvUlXVF3zf5XLJ/v7+F3vamRcoSp1UTkZ1zf7eHg/fdz9/6RVfz+suv4SHx4dcUBWq8xzduIltlrTW0vU9TbNhs12z2jb0QbDO4SVWC4RYKQjK03YtTd9GP4aPRlnnLSF4TKkxtcEUhqLS6NoQSlAjhS8FC7gg2L7DtZa2c9iQhIYFH0ISKJ56VFHWI3wHXWdpOkvf96ggjE3JKGgKKRgVYy7snee+S/dzsDdnPtnjwvSQ+XzGvffeg7OOO7ducfvGTVbrFX3X0HRNqoLY2GqRwKZbYcVG/8kw+6wUXhS9tScCJf3XQBSgoChLdGFQxOqRKQxVWe3abNWoRhvN/qUDnBI6o7ltG26qwO9cf5rPXr/BYr2mOyVWMplMZrFYMJ/P/8RjnlOL59neAODw8BCAj3/841hrecMb3rA75pFHHuHBBx/k8ccfB+Dxxx/nVa961U6cALzxjW9kuVzyqU996ks5ncxXOEpFX0RRRO/Jub09XnbhHh45uMwD430OdEURFL63BD9cnAXnHai4mK+qKqqqYDqbMBqNqOqKsixPNgUDCBSmwJgSpXVc2qeiiGm7ltVmyXq7Ybvd0rctfeNwvRA8dK2l7x299fQh4IfqgRKCTl4UpfBe8H3Ae49zPk4UhRCPG9pB4tFGURQlptAorSHAeDLhnvvuZzSbglZsNhvarsU5x/zggIdf+mIu33MPDzz0IPc9+ACXrlxmMpsx3ZsxmU0xZYEYhS4LxtMR49mYelxhSrO76UIjWuhdj3MW512c/nGe3vZ4H/NkrPf4IBxdv4Nft+i2Y+zhvCl54MIFzh/sMx2PKMsyt3oymcxz4os2yYYQ+P7v/37+4l/8i3zt134tANeuXaOqKg4ODu469vLly1y7dm13zGlxMjw+PPZsdF1H13W7n5fL5Rd72pkXKCr5PwoTKyh1XXNx/4CXnb+Hh6aHzEOB6XuaTUPXtHFsltg2mU/36H2NtRZdFYy0pqhKjhYLRMcWjw+eO4uOIIHCGNAaVLxA68IgIU72uOBAx/Hf0DuwCjqL7xRqXOIlihIrggtpxFZCCmsbjKPgeo94C6Lihd8HBEEVBRQGsQGtNejYWlKFQnQUObeXR3S/bxnVNa5r8c5RVTXBB5y13L65xQVPZ3s2zRal43hxUZWMZxOoilhtcY6+9zgXp3sCHjX8nUUJKElj0g4VNIKJvSoHac4HEaEa1RCEZttRS8l4VOB6zzlVcOXgHF0fBU387GmkORdSMpnMF+CLFiiPPfYY/+t//S/++3//71/O83lW3vOe9/Dud7/7z/x9MmeX0+2dcV2zP53y4MXLvOTwMueLCaZxSOeQ3qUQtpLgLZD2w7hAPZogtqO1Pcv1mt5ZrLOEtFwveE9h4r8SgXjx1cnzElQMXtNGYwoNSu0SWZWLe2ucikkqLvhoMg3RKRs9GJDSzNj91kVhYq0D4nsZY9DGQIAQwEmgriqKosCJZdksabZbts2ag+k+Vy5cZFSWHN25Q9NsWSwWbLYrgiJuGg4ebTTBBELnCQSMMShjCNbSdS3exzEdrUz6sk9+iQJLCOKi4AsaTwDv0AhN21DUFfO9PbpFx2bVUPqSelyyp+DeyQx/MY5BW+eiEJMc4JbJZL4wX1SL5+1vfzvvf//7+bVf+zXuv//+3f1Xrlyh73uOj4/vOv769etcuXJld8wzp3qGn4djnsm73vUuFovF7vbkk09+MaedeYEyVE90SoydTiZcPDjkxRfv5b7ZIXtmROnjP8zKaKwE2r7DBo8uCkRHkYEonPM0bUPvOiAQgqfvO3xwlEVBXZQYpfHOIt5TGINWJmZ7CIhoou6IeSKSxopjoFq8CDvnCT5G6Qcv+BDbQ15CzCMhtnGCiov8fNq3g46fUdIEkajYklJa0Wy3bJYrjo/usFgesW3WdHbL8fIOy82So8URy/UC7x2jUU1VFpSlwRhNCI6ubVgsjjk6vsN2uwGG1Nc4MuyDTyLEIeksYadVoqBSPoXZhd3nD96xXi+5fedWFGbe0W57VOuYoZmJ4oHzFzm3N2c0TPWo3OrJZDJfmOdUQRER3vGOd/DLv/zL/Pqv/zoPP/zwXY+/5jWvoSxLPvShD/HmN78ZgM985jM88cQTXL16FYCrV6/yj//xP+bGjRtcunQJgA9+8IPM53MeffTRZ33fuq6p6/o5f7jMCx+lTkaLTy8DvHJwngcmh0xVhfSW8XRCoTRHi2OsEsrJiIv7F+htz/J4RbPaoI2hqiumeoLuNKiA0oIipp/2NuaXSBlQOjCqC6x3lGVJ4So612HF4nyPKIkTNypWRDyeEFT0mQRBBUltoSg+vJy0TEClxNoQJ2hUrNigIOgY8Ga0RlD03tJ0DSNTYQI4awnKsjEaJR7rWi7qi4xnY/RC43zAFIr5aMYkjFmu1yyWx3RdQ287+saC0ejCYIyhKDTWeZQSggzzRiSXrN79VoYNx0oQFTNfgvJoreKmZ9thTBGnokThOovRMC4NzsPh3pxbxwuaNpllc8psJpP5AjwngfLYY4/xvve9j//0n/4Te3t7O8/I/v4+4/GY/f193va2t/HOd76Tw8ND5vM573jHO7h69Sqvf/3rAfiWb/kWHn30Ub77u7+bn/zJn+TatWv88A//MI899lgWIZlnQcUR3FQ9mU3GXJwf8OKDi9yjp9ReU5iCem+PzXKJ1XDfS1/MqNS0qyVFqKlGY47NHdobPaUpQNfM53tUdclqs+D4+BgfAiITAJp+iwsVAljv6a2j7jo67+i9ZWsbrOtSRLyN1QQBJ4LXKaTMxzoEeqg+cGovTxzxdT5gfcADQcddgoNQUUaDAqcDrWvZtiXaxrHk8WgUfTDeEcRjsezP55zvz/PE//nDOBXUbOmsTaFqQl1XdK5JIS2KEBwBhxiGGeg0Bk06YX36jwBSuqwoAa0wZRErKCGKuqKoKIoS630UWiJIY6lFsW43HI4nHO7P2bYt1sW9Q14pshklk8n8cTwngfLzP//zAPzlv/yX77r/F37hF/hbf+tvAfDTP/3TaK1585vfTNd1vPGNb+Tnfu7ndscaY3j/+9/P933f93H16lWm0ylvfetb+bEf+7Ev7ZNkvuJQu1ts0ZTGMB2NuGfvgJfML3BvtcdhOUYHYXN0xLptmJ8/ZDybcXz9aY5v3aDv4xhx17TUkxGh8XjrwHnWbUNZGC4dXqAoC5TWLFdLxl2JLhRKK3rn2PY9q+2a9balcT2qMPS2prdtSqnt0h6eWEHxJA+Lip8iek7UTgiAREETorCJEz4qRudr2T1X6bj9uPMdy7VH21Tf0IGyLahmJU3X8kef/SOMGGzbsm03eG9RKqbaRoHVIzqgtaEsCmSYTAqe4ANBRZ/MrukiJ201kdja2j00pMQSg+hMivAXTj8n3adA2p4RwrgQLh2cY7ne0PUxzVbS62Q/SiaTeTa+pByU54ucg/LVwXDBM1pTVxUH+3Nedv/9/MWXfA1/6dLLeWR6mcPJHq7p6LYNXoGqCtq+pdksabcbUIrJdEJVVWyWcdtv324JITCdjjh/4ZDZbEbX99y+c4fj4yMa21BWJaYwtF3Ltu9Y24Z109BaR9P1NH3LttvS9S2tbWnF0inBFxqv4kTMEHEvEHf14FEi+ADOx6A3n4y2nlg1KcuSsS6p0YxEMVElczOmcoYiKJRAWRYczA+4fP4K+/M9Nps12/WGrmvBO5y3pP4TfdfRtA2dt7HdxOB9iSPOIrEiEkQwZiinkKqZiqZt47NSmB3ECSdtDEqDQaGNoSiK3fPjygBDORoBEFAcK+F4ZvjDO7d44tq1mI3S98k0+4L7T1Amk/kS+dPkoORdPJkzTYwNif6T6XjM5YPzPLh/gYujOXVVpWwOhzgffRtNj7IW28TR9OlsRghCs2lwfUxcnR+epygL6qrAFJrNckXbtND2jHWBKmtECcF5jCimRR03+mpDbx19aVltFKWGhRKk0ATbgrf0REHikzARFKKiQEEUQaB30a8yxMYOSauiAjZ4Cm0otCYkQRLdIApNHLPWKc9FAcvjJX3fsj+f0/U1i6M70cYaPATB2h7rYyquJ6XHiuB83L8jKbBNgqAqRVVHUaGUxpiCqvRY7yDt5Rn+VEIQlChQAVB4FV9raAMJUIigdNxbtKcM1imunDvHcrul7Xuc80iI55MlSiaTeSZZoGTONkolj0PBdDzh4nyfS+N95tWUejTBWIdL0e3BWhCw6y2269jbm1GUJcujJXUdA9om9QGjUUVhNN719G1L02xxnUV5oUBhROG8YJSmKEcEEUZasT+do4zBe8et49vcXh0z7scs+gbTbGi6BmU7AoFeDVUTTqooKJz39OGkdRUng2IrhRBXQVgTqJDdnK8a8uMkYMqKvdkelSnw3qEl7r9ar1fxuX1L0zV471FK4ZzDOosnpPNJFRuJxuCTWZooaorSUBQlTdPskl+VilujtdZY6/ESXycuJRyefSImdyPYzlEURdx/pGDUe/b3ai4fHrLabOj7mGybVj3/Of5DlclkXghkgZI5swweFKM0VVkyH404nOxxMJkxNiXaC67vKZTCEbNMnHW0fUtVlZw7d8h6tWY0qmKbqDSUpqAwhtVqQdds6ZsGa22sUmiNBEEHRYFGF8VuOWBVV4wmE0xZYr2jqkacv3CRtW148sZTjDc1y2bLstmgXIuXDqssftdWibkmXsUtxxCFRxDwAqDRouLkT/CIKVFKU2pNoTSFUmgTdwA16w1u27It15RFQd/32L7fbQ4mBMSnCSGXKidKpfC6U/+TmM+ikhhywbJpVozqMUprmqZDa0NVlXgbMFWBTo0iLye6QpBddH78c4v/C87jAphCo1XBGEXb9Jzf2+P6dMJmu005NJLbPJlM5vPIAiVzdhk8KMYwqUdcmO9z7/45DqZTtDL4ricEjws9XXA0fUu7brASjaybzQZvHcZoxnWJUbA3mwKKa9c+y3qxpDQGBXGqZEg7Bcp62AkVR3JRGmsdzgrOeSo9YjqbM/UtYgN74ym3NwuqVYHeaHwftx03WKyK1QuvwGvwRqFS+ybEwslJLohI9GUUgaoqmUnNTI8otWFwq6pUnWidp00/i6TxZh0FgneO3js8xMkcLbuAtGFk2AzjzkkwhRDo+xiZr1UZNym74XvxeO8xhUETqyQhiZPhvT8PEZBAcCDi4ph445jUBRf291ltNnHSyAckVV0ymUxmIAuUzJllqGoURcHedMI95w65f/+Q8+WIqVcEI5iyQoynRNisN9hgGc9HXL5yBYXi6M4d6mrEZFTGFkmheeLJJ1iuV5RGU49GaKVo2xZvXUyP1XonFKpRwWQyYe/ggOlsRt87bt++Q9M0tMstDs+4GMPUYOqa0WTKQddyfX3M06sjXLPAK3sSf6ZOFvENrRsFacw3TfT46B+ZFBV7eszMjKhCgUqx+CHdJAy1mDSREzzBCb3tccHj09iwIIiLVlwfAqLAlCVFYVAq+kl88GilY3XEBRwWRKdAe4/SKplqAzql3erU2IltHaKAUsRNx0lcMogY70GEkYG26bg4n3O8XrNtoiAKabopa5RMJjOQBUrmzKK0xpjU3pntcengHBdHe8xUSS0Kp6CoS+rKsA1rvO0pSmEyqiiNYr1esz+fYl3H09c+R9d1bJuWO8fH1GXFi176UsajMV3bcu1zT4HWjKqKru9Zr9YYHUeb3bZj6e/QrDdRmDRtrCIkY2thDHv1lFqPmfo5fQhUx1PK0YTyuOb25oil3eJDDDEjCCpEY+rQlokeFfDBU6AYU3Blfo6L5ZSxGAoxqBA9Jc5a2u0WJz6N/QbEWyREn4oPKQVWK0qt46I/gZAkRaykBJQqYyVKHEj8nouyYLPdJvNqOJX6Gg0xQhxfjruKonwxOoa9eOdiXH9ZMa5qvAScc6lNpiHEMyianv39Q65cOM/RcknbxWkeP4TFZDKZDFmgZM4wWikKUzAe1Zyb73F5fo59U2NcTGnVQejXG/q+4/jmEc52mErhneXO8W3mBwfMzx2wOLrN9umOp59+iuADxhSc299nMp5w7fo1NusNfddxeHAQPR1HR7FaEAKutxhRKBfQqsBtW1zTUFUV41HN4bk55WxEj2PdN1ilWG4b1qstFyaBSTVm/3iPp49ucHt7zEYsIiHGzA/TOxIrEaLiFEwtmvv2z/Piy/dx3owoPIzLmv35ARLiaoinn/ocm22IF/8QkOCjEdYPttc0+bQ3o+961s0WJTEczYcYc++cQ2swukArwfvoyNWqiCF0BHTavixpM7SWKHwEdruR6npEYTSb5ToalgWcNgRN8qYkn4tRGAUjCYT1lvsvXODazVusNlt6Z1GS2zyZTOaELFAyZxatNaYwjEYjDvb2uDQ7x9TUiBOarsM3LV27ZrNc0TcdCk9dj9k/t4+pa/YuXqC3Hf/793+Pzz35BH3TcunwIvO9PZrthv/7//cJ2rZFgjAZj/HiaTYNq+2G1vXx4qqjT2Oz3XDzzq1kkK2Yzmbce/99HFy5yLbbstrEltFob85i07A6WmOU4t7pmAvzAy4enOPJ69f4o1tPsZIt22BT9QQ0MTU2AJXSvPTeB3jto6/i6x56KWbV0i83KC9UQWjantB2KOJCRAg4L2k/jk/fXBQa3nu2m5j5susnMUza6GjIlUBZVxSFoWnb3dZwrRWKGF0vXnbTOTEZN4mO5A/yIU4MKaOR5FlpthtMVWLKMr5piEFxQYRRUdH3DuktB/M9rt++Q9t1saJErqFkMplIFiiZM4vWsYIyqioOpntcnJ+j0iXihK7v6LZL1sdH+LYHI9TTmv2DGfP9PTrnaddrfv/3f5fPPflHuL6jMppms6LbrGi7FhdCmowBH3paG8dz2zRiC9C0G45VzB5RxnDu/CGj8Yjp4QyvPDduPk2QwGhcM6vGrFZbQtNxeW+Peik0jWUcDA+cv8zh/IDzB+d4+vgWnzu6zqZt6XsbFxlqxeE9l/maRx/hL3/9a7nPTNhz0IcFq02Dc5bl4ojVek3TruMyPxMrIR6Hx6IKjToZC0Ip6F0fB4tF0oixJ6T5qDhC7DHeIniCBCCAUmmzscb7WO1BK5SOi/6CJMGiFS4Eeu8gtOikLIIEJIDxBcGEnZBRKBAdTcEi9Jst+9Mp4/GIddPgfXzvbETJZDKQBUrmDKOVpiwK9qZTLs/PMS/HaK/BO/p2y3q1JFiHGWlGs5rJdMp8f07bblmsNjx14wb/5w9+l9B3FCYlq/YNzlqcc9TjEahoMG2tZdttsemx4a/xCsDDZDLh3vuucPmeK6xWK5abFZaYzjqdzajHc5y1bNcbnPVMyhpb1eheMaoqqr0p62bL6KLhnnMXeMm9D7LpWgCqUc09Dz3Ao9/wf/Gil7yIc2UJN4753Kc+w7ZvwQAGLA4KMGODcgpvhUAgKA86VkmqusJJFA1REIQ43aPAaEVIo8wiMT2W4Nk0cbuxVnEhUFEUMfRO9KkdPJwEuqiTqSPrLT5I8peoXa5KQFGFgApCUEJd12k/siQvrSJsW/bOzZlNJhwvY46L9z5XUDKZDJAFSuYMU5QFo7piPplyaTJHtT3Og91sWdy5g992FLXi0v2XKApD37U8/dTn2G4bnnj6aW7cukkhUBdxoNZ7j+16XN9T1TXz/X3armO1WmGtpbc93vs4eZL+Fm+GSLXKsHewx3K9pLc948mYzvZUdcVkOqbrW5bLFS44tImVh7oq0QpUWRJQqHLEeF6xbDbsT6aUo5q9+Zz5+XNcfuh+7n3Fw1TzMYUExHquvOgBnuwall2DE00IOhpdlUF5Q9gqnAhBBZQBHwLGBFRBbJeE1DBRKaNkt9xInYiAZID1Pk79aA06jQdLSMekNpcXh6LYPT8dtptACrsnpDRdEXQIqDQtpBVY55GU6aKDgPfMp1OqqsRaB8rlCkomkwGyQMmcYaqiYDIaMR+NGHuhdJ7QNyxu38JvOgievXOHjPamLI7u0LcdTzzxBIvlktvHCwhCPRpR6gLvLa632D56P4IPHN85wgeP6y29s9hhgV0IiPdxcnZY4lc6Ot0RglBPaqbzKZtmy2Q2RhmF8xZTapx0sUJgDFWpKUzFaDalcZa6ArShriqKcY1oxWgy5nC+z7wqKcRTT2vEWRqxTC/OudhcZtUcI8ZRliVbaVBKMSpHbHwXKw7GI4XggseFDqcDFB5ciqOXOEbsQxQNgk5x9JL2HRmMjhNCAXASMEgSHAoIBAlRZInBqCiSgiiE+Hy04J0lBJ928sSdP6RX6J1D6zTBJKAFKq1prWc2HlGXFa3pTvJgMpnMVz1ZoGTOLEppJqMRF2Zz9ikI6w3Low121WAkJsyO92ZY72Isuwi271ksFiiBcT2iLEuUCvTblu16jfOewhRY+hiznlJibXAEF+PfvXfxIoqKuR+FwYw0t7a3mU1mGDS37tykqAqWmwW3jm9x/vCQo6M7bJsNk8mUupxhlaB0gScgKlYzqqLkgftfwnhvj6CgqktGkxJXeZTaYm2Bb1p6v6b1ltHFPS519/L0k08SOlC9wYdAWRYUlUY7FUPnBZwNWGvRKEydpoLCUCRRqaKiYigayQCsU2YJZjfu7AEJAZTZjRgHH2IOi3gUZRQpgPcxN6aqanoguIDSmhACnbVoNBpBp5UFQyHHoKm0pveeWT2irkoKY8j6JJPJDGSBkjmzaKUY1zXnJjP2KHDLNWrbohCM0ShgcXxEa1uCd1z/3NN87qmnAKjLmrIwKC1sN1varo/mT613o70CzKYztFYcLRaQNic7F6sV46qKtlLtEGC5WtKLZVZNcVuLbjXNzS3GFCxWR7RtSz2qKL1B+xJT1IhSLFdLlImVhqosqapYXQlaqCpNUXjasMT3HtX2+L6nUSsW22OM1YzmU1RlWK2OaXGs+oZ+2+CCBw2qNuALlHcxkwSFENAFhF5Se0Wjibt1kGFzTmSXPqKSEFEelCZ6d5OAER0zVnxAp5wVY8zuu1SK1B5TaG3onafzcXS4UhV7sxFd30VzrxggjSz3jrIsmI3HrDfb6IPBk8lkMlmgZM4sRWEYlyXzqmamCyqxeK0RA0VRELRju15z6+gmy6NjlsfHONtRFgWlVsxmY2ywrG+uUSIUZZG8GamsoBTOO5pNQ9u3iNHRixE8VV0StGC9JYin6RpG1RTb9Sx6hxZFs9rS9S31aMSmW4EoeqnQtaInMJkcYEyFDU1arKfZNoEnP/skB+fPMdmb0vsS4wNbtSasGkbjfTTgjKVxW9pVw0g0XeVYuQ2bbo3D0ntL73q8BJQSilFFKQLK4l3AO6KXRkfD6mDrSDsJCcRFgSqZSeKyQhmW6yDKR3+JTtWPmI8PnGS3KKOoy5pts6XpWwKC84Jru1iBUhobBOViewht8CGmyuqQtjKj2LQds9E4iatMJpOJZIGSOZMooCoLZmXFubJmrxoR1j3z6QwdNIvVMU7HSPh+07A6OsbaNiakikbh2G4WbLqG3nUUymCKIl5jiX4LJUJnezabDa3tY8w8QlEaWtchymNKQ1DQ9C26L/Gdp+8tZVngnKWqS6z0dC5QqAKCYmM7SjzKaWo9RnSLV7DdNlRFTWfXiHJY3yEaemlZy5KqH3Ou7CnqOvo5sLTdinXX04UOh42TQ0bwheCsjxM8EitDFNEjQgBRcQdOQOGJBlYf4s+B6A9RRC9ITLSNCwZ3X74ISoWocNLyv+hZEVTa6RNCwAWL3+WvxFHkEGJYWxwZ1jgvLFdriqpCKUPwDmU92mgKBdiO8dDi+XP+5yyTyZxdskDJnE2UYlzVHNZjDnWFOE/bbBnVmrbt2NgV23XDdrXm6NYddJosCSFuNW7bLaEJbJ2lMAVlUVJWJbbvccHSW0e/tWBU2kMDIWVwePE0vQVq9sbTODkjgabZICoeX5SGyWyMcw7rLdZbjNaMiwm60PTB0dqGzja7hFhrO6ztKIotxQjEOLrOsunWtHqLcgWNbBnPpvgQ6NYtve3Ybjf4ztL7Hicx4TVoQdL+QPEB52MaCoUCl5b5AU5rXIitnbiB+EQCBAmxnHKq5TUgqcIyGlWUZUXbNgQ/CBNHcI5OPF5c/P60iX4TrYCUxzK8liistWA01aiGoBDvMQoKLZRBqFI7T+cqSiaTSWSBkjmzjMqSc6MJUwyb1Yr1coUvO44XR6zWS5arJV3X4ZxjNpvGjcGQxEyDC45gFPvzA8bjCd45OtvTtjHvJAhIiJWBajRCE2KAm7OgYdt7ZBOoRiXGaLwXylGNOHC+xzdxNLm3PSioxxXW96jO4IJj06xRoqirmuADfW9RKJz13Aw3WXUbvBN6b/GFQ47B6R5Vl3gkRe07ms0W3zm60GPFE/POFGgdQ9hUnDTCC0E8oiRVTmLWSTS+qmjWFUkm2djN8QJa4gbDIcVVpQWAooim4iIuCMS7JFA83jq8F4KO/pMQQpoIUkmkxJsohdIaUQoXPCWkBYUKI4pCawqvMCJMR6MsUDKZzI4sUDJnlrIoOJjNGJmC1Z1jtpsN27Ck2a65c+cO1ls6a0FB6yybto1RYSGA93gVMLpkb77PeDzms599km27wTuLl3ByAS4LqnGJ7zrQASWCKU3c9msE7y2hAVVonO/pui6mpSpQRqckWE3f9TRdQ+ujYPEh4J2LrY4ApS4pdRUFg7M0PgbCOfFYOrzz1H6LmdRIai0F53DiCeLoxeFwMaJexRFor6Og0BhEufjRg+CHvTbJCyL4WCwhVke0MYhS2LScME4sRWGj0+SOCHR9h3U2PlPFySoACQ6liyhGUktIQhRLmhSLLyk/JcS2W2HSc5OpdojRr5XGeM9kNEriJpPJZLJAyZxhxnXNpCwJTcf69hHaB7zraNqGvu/pXY/zHlUYNtuGIMJkPKLvOqyPGR5VOWI0GrHZbNhsNngf2x0hBLz4XcvjeHGMCwFTKvZnc3rXA/Fi7ryLnlqncdYRJKSFeXFRn9IqmmADcerHxeeInLScoism5pUEEVZdg65LirKMi/lMjJr3hcfgo0DRCpUyW7y1MQ22KHbG3UA4CXdN/hkJEoVBSDtziI9rpdGaXZqrKCirCm2EvutTq+dk0eDJlI+Okz/Js6KCpLZYNAoTNKSiR5BTE0JBDZoGtN5NF3nvCc7FKaYQzbNaQLtoTDYqV1AymUwkC5TMmUQBk7pipDXtahmNlRLDw3yqB1jvccFTiCZ4iwDTvRlFVXF0u0cpjVIFx4tVHBFOY7BBwPqYeRKU4KXH+oAyGlPWhOT5DBIILnpSTNpz0we3G9tVRkevh4rjuEGE1WqLoOKUTBpr9jbgglDouBjQeU/XdfRdQzUeYYoodIKB0FgKE3Auvq5BxyDYIOhCUaoCUYLtolCKVQ/ihuQoWfCkVtfp0R2VWjciMebegy4No6pEnMeLTXkoENWKQrRO1ZSYGCsh4HVKj1UQrEOFuCQwvnYUJYJE8RJj3OKGZB09OD44jIA3JrpcVBQuwXpUXaBNFiiZTCaSBUrmbKJgbzxCdx3NylIQCErQpUEUWO/jkjqtMFVJWRq6rmO93cSpFK3wQVhtNxwtj2MVJLUvBBU3AKsoQpwASqNNQRDFZtugC8EUBqWjT0LpZDCVgJcQl+kpTSC1UURQQfB9NKNa5ynLgul4AkDbtwQVc0R8iIFqUuhYxUkZI+IVofU45ZFaCBrEVIiPe26KNBod2yASKxukKk8Iu6pJ3M8THxkeHyoiKbgfLZq+tUjhITg0krYUpxaPkuRvGV5BUhvHgSrjKweL8lCWVRQpu7ZSSHuAYn6KEk9dlTS9w3uH1gUheASdwttOhFT2oGQymYEsUDJnEoVir66Rtsd2lmAD3jqKIgqRpmvpXM94PGE6mzCbTTk6OmaxOIa0dyd4oUewvUUbTWkKlNYoZdCFSeKAeGE0Ok6ipJh2EYuEaA5VRmEKTT0as9m2tH2zywLxw8XYSNqKHKsl1jkAbOkgBLz3uCBgbTyGALoA71EqELxHRKFVAUEovKIYFxDihE6sLCh8Z8HFMWkJQxQ9+JT0OrSvhuLJIEtUqqCgAM8uC6bv4/kNY8HR6EpMvlUQCFFwpK6PQkcjrqg00g0Gh1YVqViSKjdC3IbsKDV0zTaKJwkEo5D0PQ9/1hoheJ+C2jKZTCYLlMwZZlaUlEQj52azwfU9IQQ26xXW91hxFGIZ701RRYFTQjAao2PMve06gg8EE30ipiiYTuI0T+9itL3z8UI8KSt0UVCWJc4JzsW2RFXV6FLjg0/L8WLbR+JVnqBj5D4qVkGc9zGMTCm8BJqmgSB459PkTNqZIwGRgDEKrcyuehFswAeH6IqyLMFocILrffSbeEEHtRsLju8ZYoskQJAYNCchtVxOcmKHnLUTg2qIkW3DESezN0nIJJERF/95NCmoLZCUSPw8NjiwiqIoYztq94ohtpNCIHQdVNVOBHoXhU9V6hi14kGGdLlMJpMhC5TMGUUpxbis0J44GpymSVzf0fsOi0cVKgag2Q7tLdNzc7a+Z1RVrBcrtFaM63EyYxKrJ9pgfR8Dy5SK2SBBsM5TGYPtLUEEhUbrgqoa48VhbU/brnHOgxec9/HCWhsoVBQcQfBB0IWmBPqux6PidmClEFF4LzifDLoS8J2i0DWiQXtQdsgNcbSdRRkdR4VdkhoBnHVI7OHE1wo+VmDcYIE9ERtDpWcgbiUWRMXXBdCnFIpPDhpCirnXEpcBKh0FTZAT7UIcdx7EmHIeTdxUHKVNPND7gFZgRNCqQCuD9x4VjS0ECXFfT/qzyGQyGcgCJXOGEQm0TUMJcSGg8zjv6Z2LLQhRzPZmgGLbtuyfO4gipChYN1vuveceSm1YNw14TwjJnNrHDJRAHJsVJI7SKihMrGZorShMQd/1bLstbd/uDKCSBI8Mx6uAxSFpYaH44XwtpTY4SVWHNBETQjLoBqHQHm8sQStQHl0ajBSIVljj0MZEv4uXEyHi43PFC8EL4kL8NYAShaZIk83xvU4TDcCC0oIZREoIp1fz7ARNAArRoFMuitbgY1T9jmQEHpJmtdapopSyUSRWm8QD3se4+74HCYiJXhyrYwspOH/XeWQyma9uskDJnFmcc3SdwyQPh/cubus1hkld0dueQhsWR0e0XUvfxR0wiztHAKzXa2xvsc6iJY3hppYDSnaVFZTCmIL5fA8Roes6wMf37Dxd1+NciEbZoXSgVGyZBBBPEhwQU9Si+tGYGGbmPUNcfEh6IB4iBBsXBgaRNIOjcGKpdQ1GI04IOqa1EmIcvZL0WmmkuNQVLvTx86PQYkCGWPtkXEUhPu7b0UrHCR2tCV4IEkPeREnaXpw+Yso2MSnnJY5kxyoRxJFqNbSPkselLMvk81FxOaBEY3H84LFqEkJIsflx+zE62nFC8LnDk8lkdmSBkjmztG1H1zvYOLzzu/TXuq6pqrgLZ3F8nCoiAVMUzOdzVscLVBA2q3WaKpEYMJambpSKF3hlom8EoKwrJpNJEkVNqqq4aOz0pPFkBWpoo0SnRbRjROEh6FPiRMffeo/37C78sYIyOD/AO8GoIlZHQsxCERTOeaSJF3FdRENqCAG8YLRO+3NivolCx9aPE3QgRevHNksQRbKaID5+D7EFE5VAoRVeGbyKm4eH+3WKuo/VlSIu/xtGg3X6z0ZwBIkZtUI0H5dVFQ28xPaTUsQQOARnXWwV6TiNJChUiB4eFUKqzGSFkslkIs/JMv/zP//zvPrVr2Y+nzOfz7l69Sof+MAHdo+3bctjjz3G+fPnmc1mvPnNb+b69et3vcYTTzzBm970JiaTCZcuXeIHf/AHcWniIZMZEBH6vqNtW9q2wXlHUZQAeBdompa9vTmFKVAqXtD7ruf27dv43hJCoO/6VC1R+BBTQtKa3RhclqZWirJgOp3E8Dbv0TqZT52NomjYfsyQxDqM83KSRRKLGTtfR1GUkBbqDbe4SfmkxTO8FqRWUYh+jWADofe0bUe7bem3FrGA0yBF9LE4n6oTxB05JLGiB1PryTkNjhRtDFrrGIzmFfhYyTCFxhTFXf81iLouGnl9cPhgEW937aA49aRPjo0RdXRtS28tTdfigo/nkRLbfPBxLUD6jnZbkUme26G0lMlkMjxHgXL//ffzEz/xE3z84x/nf/7P/8k3fdM38df/+l/nU5/6FAA/8AM/wH/+z/+ZX/qlX+LDH/4wTz31FN/xHd+xe773nje96U30fc9HPvIR/s2/+Tf863/9r/nRH/3RL++nynxFIMmnES/mwngyQmlF1/eIwHx/H6UVdVUxmYwZjWpsFxNgtY5BamnNzC5xVYwGo5B0gS3Kkr29GfWoxqU2S7SL6N3OGrRKeR3pb/46LuLzqV2zc3lI9LSURYVSGmt9nLAJcuoWc1C891jrcNbFJFeJplSjVPTLdBZlA8FF30zwEls7EtsiPqS4+BBNpkpFc2rcd+iR4JPAiKO9pHh5RRQKzrm4TyeJOWN0HLE+vUzwJBCWIDFrxXlH8C76RSTWkYwyGAxBAts+pvw669BAaYr4uX08Bx9CNBQPMm8Xx6+oirjROJPJZACUyJf2V5bDw0N+6qd+iu/8zu/k4sWLvO997+M7v/M7Afid3/kdXvnKV/L444/z+te/ng984AN827d9G0899RSXL18G4L3vfS8/9EM/xM2bN6mq6k/1nsvlkv39/S/ltDNnHKM1f/v/+//hylFHue4Q6zBa07XtLmOkLEskZWcUZcn+uQNu376N63vQGht82rSr0ckfMZ5MKMqS1XJJ2zQoFLO9KaYw8WIfPNvtFmN0vPAbjS4ULrhYhZGYjeIlVim0Bl0UGHWy+2ZUjQjBs1qtYttml1eiY1CbxIqHCyFOGk1HVOMK6x0eT1BCUZVoo3ESsBLYm83ixVtIFRkfj3UBcdA1Htt3cXS6dzGCPhh8iG0mnURAzG6RXaVHVGw/mVR5CV52bTFUatEYk0oxcc+OCic7eVCKuq5BQZOMxDoMj8F4MqZtu/jpdTTPqrLAlGYnIikNnYGbheP/OTrmiRs3/3z/YctkMn/uLBYL5vP5n3jMF52K5L3nF3/xF9lsNly9epWPf/zjWGt5wxvesDvmkUce4cEHH+Txxx8H4PHHH+dVr3rVTpwAvPGNb2S5XO6qMM9G13Usl8u7bpmvAlILJqRWQNu2MWfExwqAczF2fmjDtG0LQFEUnDt3jrquTyoXIpiiYDKdMtmbQWEQHSdQus6mVlLHdtsxRNWjFFVVUlVlOo/Y6plMptRVDajoO0Gl+PjYetk2DU3T4n3MAUHptL8wVk7EDzWX2PLpe0vXdrjeEpzHOYt4n1ofgvSWbtvinY8mV6Jvw6CTt2bILRnKRBoJJ99bCIIP4H38NQyeVUkR9j4+JqcGfk63n9IfBlrHEWGtNEqGZNl4U0m8IOpUngrxnNOfZeobxd1CafIoVlf8SQsst3gymUziOZtkP/nJT3L16lXatmU2m/HLv/zLPProo3ziE5+gqioODg7uOv7y5ctcu3YNgGvXrt0lTobHh8f+ON7znvfw7ne/+7meauYFTu8cHtltJx48IkEErRTeOULai4OCpmlwzmGIS+n6tospqTpmeHgfKyq22aKNZjQZoz1453A2Vjm896CEsqowadeOdS4Ki7TTpmkbvJfBfLHzYPjhQkvYGWyVisbZkEaNh7Ffk847pNh7kk9DDDgcRVFQ6TqaVZWiaxoInno83m0b1hQofMwtQQM67cOxSQukyaJnu+YPYSm7ULaTu+8+LC4eVEnMGaUI1p3koBAF1rCFeJeRkrYZO+uTP4bd+6gQkKAJKqA9sZ3mA6JDFiiZTGbHc66gvOIVr+ATn/gEH/3oR/m+7/s+3vrWt/LpT3/6z+LcdrzrXe9isVjsbk8++eSf6ftlzgbbvqcfskU4yedQsFvEh1ZM53vMDw52kzLOe5bHx3TbZlcqGERK33bYzqKVZjbbYzybgpAqMnYnOExZoIzBBb8TPkFihaRp2xPzLNFMavs+CpmQFvZJQNTgVZHkgRli6FPFQUePS3AhLv9zguscwYf4fj7sPpO1Pe22oW9beuvoekdvB6OsRokmJsep9BmiwRUJu/f2pFHr3TesTltOUiaKnPhURUNIU0CiMEZjtEn7jKLCiWFwcZpHq2Grs6QUW50qKqd8LcJJZSeZiMWnLcxDCFwmk8nwRVRQqqripS99KQCvec1r+NjHPsY/+2f/jO/6ru+i73uOj4/vqqJcv36dK1euAHDlyhV+67d+667XG6Z8hmOejbquY58781WFD0LvHHUSIzqJDFEnhtVA3KNTj2oWSzmpZPhk4oxFjhP/hfMorSiNoSxLVu0KJx4lClOY2JJIU0GmKnbtIZ+2BqcVNVEsGSiqGO/eit0JD4G468d5RMnOoOvTjhytFUrrKAaSrwXAiIn7eMQQfMDannI0oigKXG+jsbbrKbxKY70hfie7v2ecTBoxnKMijQ8/g2EqSU4dOzz71MFDoUUk0LYdhYpG3bveJx0Tz9PtRrdPzunudxfiFxkFG2jn4p9pIc88NJPJfBXzJW/mCiHQdR2vec1rKMuSD33oQ7vHPvOZz/DEE09w9epVAK5evconP/lJbty4sTvmgx/8IPP5nEcfffRLPZXMVxACOBG6tLFYlwWqMJiqRBcx7dWmtkvTdSxWK4qyZDSepCmbOE48bOeNMfGCtX0Me9Oa/f05e3t7BBGKsmI2m6OUJviYuSI+igdRmpBaFqnIkDyvcXzZpuoKSZwIKo7+SqwxDN6KnVFUwPY9fd/j/dAuiaJFK40KJ6/rU8hbEFL1potmWGvx1hKGpYGpt+L9M0Z1FSgtu2V/u/ue5XYXdz2YjLkSTqoezxASIimCHzkRZUrS0sFnr4sI8RjPUFHJ+iSTyZzwnCoo73rXu/jWb/1WHnzwQVarFe973/v49V//dX71V3+V/f193va2t/HOd76Tw8ND5vM573jHO7h69Sqvf/3rAfiWb/kWHn30Ub77u7+bn/zJn+TatWv88A//MI899liukGTuRoRF03ABhRQaLQqxoIxBhTgJE5Lpsula2r6DIMxms/iYGnbvRIGiBSBWQ8qyiNkoPuC9oyhLyqKiqEqMUljvCU7RNR3WO+LaG51SZ1NzQ8d2Spf8GAEd02m9B3GEtIRQJVOqeNnNOgdiYJv3QghxU7IyGlUY8EkMOGJ6bedQWhOCSkFsHmcEYySNDisEFwPYtEIZwKndiHHklDJJFZLdI8NDn6cMdiaVJEaiwPLDBuPUXjvtRbHBpekfUHhQcXpKA0ab9J17TqslpdRujNsHh/U5EymTyUSek0C5ceMG3/M938PTTz/N/v4+r371q/nVX/1V/spf+SsA/PRP/zRaa9785jfTdR1vfOMb+bmf+7nd840xvP/97+f7vu/7uHr1KtPplLe+9a382I/92Jf3U2Ve8Aiw7TucHuOJqaNRkITdmKwgaGN29yOwaZvo/xDBECsWQwUFiTHve3t7KGNoupamaREfcDiOFwts3xPwMVBNdBI7J5UEdPKOpPcfdgFLWhEsxCoLKraklICTsEtzVTKUK8Iu6CwIqCAxgVWrk7aSCxRGYkuqrFKibcxV0epkVNjbOG5stMEUiuB0DHJL3+XQ+Bk6L3ctD0wtK6UHP8nJ/UJI2S7DfuJUiRnERewfxUpI8tYMr2x2GkTQ2lAUBdoU+K6JEf5mEDdq9/o+fbZMJpOBL0MOyvNBzkH5ykcpxWtf8mK+ZjTnUusZB2g2mzh+q2ILxQePKQuKoqTr2t3kioSAQVFoQ1GksVjiY8oYzl+6xLnz57l1+xaLO9FM6yXEqSDfowuFLkqUVrjgCUqlloUCLcN1dectgTiqG5Khdyg+FEWBRtF3Hba3EGI1R7SKdtWUhzK8nikL0CqGoYlgakM1qijKIpptfSC4uP9Gax131xA3HwcfKIpYGQpO6FsbM1KS6VWSYZXh/U5KH8RdRFFRxG3Cqd2SWkeK1CZTMbY+GpSLVEWSXQz/6e3IRVGjTMyWqcoS7zzGaJz3BJMWEKYKlzKaXgeuGcfvrJbcXOQYgUzmK50/TQ5K3sWTObN4hE7B2vYYXdD7OEJcmmhAjcJDMZ6MsbaPFYiUzYFWlGXBfL7PdrVOF9F4Me22HUu9ZH20wvUuJrQGSeO6IKLiBE7av5PW60RRYZLHAlBG472Po8RedtMpEkeOYprqqQA3SdM8MSIfYn7Kqch7P7RfYiIsXuGtYIqhshHj6iHmi4SgUpqsAmXY7QJSpOA5CMHhJZVvoiHl875nQTA6jlX3fZpkSmc1fMcxkyW2YtAxjTekkSfZTQdFgRJUDLYzKhmWJaBQsT2EQkKIjR914tvxIeDwOek+k8nsyAIlc0YROh9otFAFxygITgLGFIzGY9qmpSiKXQrq3b6G6JeI+2piW8L7YbTXUJiSzXKN7yxiY1UieE/YrfBT+BAvuC79KmmnjEajSo2P6Wa43sUcltN5IhKFUBzrjZWVnbP2rswRlS7r6UId4u8C8eMErwi9j94UlSpAKfAsBNJEkj7ZsZN2DqE9RRn9H10IsfKSxIMi3BVKMtwrKomMIaskiZnB1KtkiKRT6KJAGZVeN2qioE7aQ1rF1+udTb4Th9YGg45vnQTcaYLISTptJpPJkAVK5qwi0DpHV2rsuGS96lASKEXiFM7uQh1YLlepkqGSaTR5KEKgaba7FkUQYTQeU9UVm9UK11m8dbtwtSAeZU68GEGdiJNhp41HKHRBXdX0zkbPhAoobXbPk52BN3o4huTYaFM5USiDGDotgCQkIaRN/DxC8mXE15OUn6L1MFkksbXDaW+JRpRHFVBWZfS0OB89JBCnkYZGT3qKcx6lTlJSoi84VktUkN2BimjiVUqfROI/+x8fIQSKMi43DLvR4yRCTj83jXDrMv/nKJPJnJD/ypI5kwjQdB0ra9mWhq0GqzReFF1n0UUZWw1KxW3YycwJpKyyk5h3pTUH585Rj0aIUjRNQ9d2WGcRFajqElMUsciRoumVTtt/zbCxN76e9z6NN2sUJgaP7aZdYsJtWVUgQ+5JSC2QuNdHmWiSVerE1wHJiJrGondemtRKIoDtHbb3aZlwgTEmjkQP6bppCeFux04gVVNAKbMLazvtm4nvO3R9Thb3QfTbKK3ueo4Q224++Lg3SMLu3PUubn/wAUVzb1GUcQqJ1NpSEqstiijG0ii3D+FZ20+ZTOarl/xXlsyZpbeWddtyMJnRAaOywqgCvIPBGaFOLpKDQBGiqTWEgLMOUylCqrD0fU/X9LE9oUAbxXhvQthuoFcwNDIkXlwrU7PdbJIIiP4O7z1N156q1KQ2jVaYomBvNuWOjRkmSmuUAWWgKEtcCpCLk0CplSJp2qgwMVhuaBH5eCEPvewqQ8EDhcLognqkaBuh6/tdsm48bhBMcT+Q94NxNz6mhjbLQBJDKn32+IUOoiS9WmoxMRQ+hBS5nw5XGqNVrK6kVpZC0fdd9Mbo6OuRu/6ckoclxEmnoix3k02ZTCaTBUrmzBJD2Hq6KWgEJ0ChCT7ZKJ6BThdxREfhosA7izeazXYbX9OBdSH97T+O8q43a4DkgVC7toxvLRi1C1lTyeCqAvjegRLMcCVPKWNeedbrbdzdA7s2SBBBlDs16aKTNyaee0ieD6WiWHHeIyGgC7Pz0hSFwVQakTjRo030pgQfazgqzlUnO46iKAxeAlqFuPdHDa0iuMsMA7vJo8//Qzj1+11lKk7zDGPUsXo1vHfMYBmWAwaRVBnSd1VZPu8ttKYaj+56u0wm89VNFiiZM0tvHeumYdG2mNKw6XqmRUk1XBgBkF2Oxng0wjtH1/XpGqwIROOohP5ke67cXUKwvY2ejxQrptExMdV5gpfdBVcrncLVhovqybmGJGJEPDbtyNHKUFXFLpk2OBcrGEOVIFU0RGL2ydCmUqZEBXDBpzRb2S0rLKtqt1k5WL/LVhlGgGP1h/hrEStBmN1XlQScnLTDODHM7ngW7cIpE6xKAsU8w0+zawsFdbI3Kd13Okz29MtHU7DgTBSXblBsmUzmq54sUDJnFussTdux2G7Zn+zRdj1b8RRDqwDiornk/ShMgRaF1x7nU6VCawJCYQr25zOWqxVt16fI+lhFkSBpZNeAxIu6QtDpmJjTptKYcvK+Ku4KFTtpWqTwtSAoJWhlMKXGWosPfmfulTT1S4ijteIFaz1FWcbnp0WDg0AJIZqD+66PU0MiUcCE+NnjaDW7SSC0oIbWjeHuMZu72JlgYBA5g5o4dfwwWRTFCSeVk13ui9rdd9qDopWOTSKJT1JotDZ3eVu8CF4rWttjnf1S/7HJZDJfIWSBkjmzOO9p+47j1ZqD8QRdl2xby0RXGIltn+Fv6t57VusVdVFhjNktrNt5U4ymHNXUfU/b210U/MlksCSDbLpj2DJ46vEh52TX72HIC2HnIwHurivcVS6ISwf1M1431lF0TLTVLrVJ5CRDJb1BCAHnLN6X8ZjB+0KcWPJO7fYPnSzsU2ijUe70Ar8/hqHN8ydM5pxol4D3kqo2QNAxxC7lwKhUcTrxm5y8vikLnHe7NF4ngisM266LZtlMJpMhC5TMGSYEwTlP23Usmi2T0YRV0zFJ24dLFMYUaAPeOpxz1IUwmYxZrwdTq9pVP+4cHRN82Bk9d14Jle64yyMRx4fZJatGn4cu9G4PTwhhl0UyDKAMu392TlRR9NYSUpgaxCWIyp8IGy+kaZsYYiY6toqikTYaXyT5Y0QgBI8xRRIy6WVECMHHKhCCdw582gKNwZg48TM8QSScavOw+/y7KSA+3y4SJ4vi6PEgjIZAfa1OBJ5OAi5uWj6ZABqMviiF0YaQfDp98ISipnMuB7VlMpkdWaBkziySck6sc6ybFre3R5iOWW87jGhCEilKYgtCp2TX0WhM3/f01qYLcbygWhfj5oeJEwmBgD/VfohL8JSKQ7FC2IW+hTQGKxID3Yw2sRUkKUJex4u7GnwkKgaZ9b3FO7+Lmw9q8KucNmVIjLk3BhtsylHxEJPsUVphlMK5GLrmXHog5aVQaILz0bSaxqKdi+0h8YLWwrgeoZWm6bokZHSKrX/2713S/M4wQjwYkIfKyHAMRMPvzhQbYry+NtF7EpcKGjprd/6Xtm3jmLSO31vnPL4wbLfrnUDKZDKZPNOXObuktsrQ6ln3PfXhPk2h2Q5LA2GXkipK4RHuHN3BOnd3qyUZTb2PF/c4WcKpI1K1weiYP5J8FEMUvVIGrWNiK0kkxDaM3vkuBhfqML0jkhYADuewS7s95fuAuysZ6ThTFLtKzc7hMuSwpGqRCJRliSkM2hTsRq9PukK7X523cbxZqWcktv5xgiB+ZyqdnzGGIt1UqjRF/4umqipilUkgCNrEBNoh+Valzz2MK6u0vVgbgw2ertT0hChiskDJZDKJXEHJnFmGv3GHEGi7jtuLJXv1iHpcseo3TIsythIk7XdJYsIGP/RwkvFT0uQKoKGuasqyZHF8jPhBJ+h0MSVVRU6qBLuIe2NiVcf7aM5N/wvDVEzacuy8R8mwbfiUb+UuRXSSbjsIGe89AcEYTVEUSFpM6J3DB7/zpngf0EoojAGlCQQYWiY+fl9xj2DcoxMgJskG4t6e1L8ZckhOn5fRaSNR+i53abHxhxg2p07uCght37Pb66ygLIp43iEQnCOWgmRnltVap2qU0EvAV4Y2JfpmMpnMQBYomTPNEDFvrWO93XJzueDKbI+27OiUYqSL2FYJajfxouITdxdNL+GkVBgtFNR1rDyEcHKBHjJIlKg07jqkqwZEhXhxT34UEZO8LcO+n1Q10HHyR1TcHTQUTdSuCpJOI/kwToefeU5EDeJ3plvviSPQaUxGKY3SJvlRFOgCZSSOFCdBBxqtTu3nAU4sKCp+pt0JpRpPMthq4mQN6ZEYv++TxlKEXcJsFG8qeWUkvY8XiWKNwUQ8fGiiMEnP98AmWEIxoem6mP2SKyiZTCaRBUrmzCNB8CrQ9T2L9YZRWbE3Kjlad9SVZpwyUIcqhZy66EK8SEraSqyUYF3PZhNHjNOCHIZo/F2Sqds9GQSC8nHyR4NIHEne7f4RSWFpCl2Z5AMZJlpCbNUIEIa1gOxaJ8Pzh/cKMYUttk90wJhocB0qGZL8Kz4FuYlScYdNMuYOI9FxX8+zZJzc9cUO35Lsvrfd++ymk5JBOLWzHD6ZdmM7KYigTlU+7hpaGrpow2j1UHHyHmU0vQhbE3BGs111WJcFSiaTOSELlMyZR0ix9d7T9B2rtqGezujHsLaeyhQpdj2O1JKCzLQ2u5JFAETHOob3Htvb2AbZ/eU+XqC7rovv+YwLpQpC0EPXKE7EBMJurFbS5IwSD0ahBh8LKeE2VSzU8Hl8wOEIPr4G+mSBnzEFdV0lv4klhLDbVrxbhhgE0aTEWomG2CAYFT+tSMD5kwwXUty80kNbZ9gxdPItn3hn4uvf5VVRimS3ufu7OfXbnRghDkYZdSKQ4pbkVCFKn9cqQZVjOh/oXcyJyQIlk8kMZIGSeUEweBqsc2zblqUpmE5mLFcbau+YBEWtNaYsot/CO06SQIZXkNTRkF0i7O4Ce7r68iwXyaFtNBlPsU7YbruUpULyc8RyQRBJrSZHVZZA3AM05KwohpwSOREnKi0mTKZfJwHftihiFswwQXPaoqFU9HQE506EVgpaE/zu3JTRGFMQMDF11pDU2jNEhgKtVUrlHULUwk50xFbbIK/Sk5IwI1VslD7ZBwQpbdZEY7H1LvltYgidB7beo2ZT2maDcz5tlc5kMplIFiiZFwY7M6mn7XpWumFS15TzKcvlhlI0RRjiWdnF30cTbbzbOxdbPV5F78bnVQK+8OXR2h4fNKcv1CIn/g0JcTy5qipGoxHbzTZlqKSk1TTFQpATcaJj5UeLQadKy9BuMacu+LuJovgJd9WYnYEVFQ2zpPchdX60TgH+J56ewX8yvIZWOrWFYpkoQEyJPfV+pytAhBPfikqTQVqfTO+IxD1FulDRzAv0QzibUjTe0RcapxRN1+FDyq3JEiWTySTymHHmBUG0cAg+BDrbs21bjtYrNlrRjmuWwWE1OAKiBz+GOhmRLUwcCUaliPuTC/mzvRckw6zWgE67ABXWe5x3sb2j2N2GCsTwos4FmqbDBxDRCDpO06jo3xjyYyWdiU8jupJuw8iw4uRz3IU6+WV4ZPC8xKJG2iAs7Co4cZQ6VjROLKw7Z0iK1I/j24N4O3nvdN8gltLUkhr8JUk8kb6POJEU/8zcLhBP4RB6Ag2C3puwaDa0XYfzJ+2yTCaTgVxBybyQkJPgNoDlRlGZgtHBOTrrWTcWpRS1MvECPezKGTJPVJyw8cQpG6UkjhQPDOOzIaasllVJsBbxlkBcPBg7NYIudMxhSYJCyVBdCKkVo0EzNElSNSe+l0YRnmGO1SHG3Q+j0rtliGnKZ6hcDOZafcoQOxhdY3YLu3HeYXIoSECHmExrCoNTaaRXoslWVGy7aNSp6Z2TGPu48C+NLEvYCTJTlCgFzrpdNWYQLGiNMgqMxqeWUdCANlit8KOKXsOmbbHex+WHWZ1kMplTZIGSecEwVFFiWcKxpePOesWorJgc7LP2R2jrEBWoGTJNnvkaISZymBMxoknuzyQDlI6Vi77tk6lTgdLoQoNJI7I+7KZy0AqtBNmNBsdlhpA2KcupwDKlEBVO9vqQxpuJqasBiW2iU+0XtVMmPGsH5HSrZ9fyOXVsLM44CB5NFb0vISTjbjpXGRwn6q7X3P2qTj7LYEURBK0LTCE46zBAVVV0ffzeVGpPBWIVxacR5U6BjCqONmvavo/jxeHZvT+ZTOarlyxQMi8oJPkjCAGsZQPcNAvGo5pzswnNsqHyUCBxl00IBAEzdDOV2o0BDxfZkBJRCfE5o9EYCZ71ZpNMsAqtC0JqdYQQR3xViMJCIbsI+NiqSZuOQzSr7jwkIYbFxbiScKqCkvb6hBAnWVLPZpi1ie0TdZc42cWlKHZVluH7UaTJoWeIFFFCcH3ayiwnGW3JF7Jb0/wsLSXn3E5khdTu8WmXzunWTxhETAh479HGpIqLxktgbXvsdMRR27BYr9OeonDSIspkMplEFiiZFxynRYp1jnXbcP34mNGFC1SzURQp5sRv4pP/IlYvduEc8XqsZXexj8ZSobVtbP+UGhUk7rNJJZchfwQfWzJGFFGRCKihoRMrMjE/hBSwFtIUT3xMiZyYR0LAD+0oBGVSS0UBp8yju7C3JIgkRMPr3eEjsXIRNzrH78qHoZUzLBfcSZ8UUZL8LyiEVFE6JVCGJNxBtES9oXeG2uHx3XslwSK7c9UECfQScGVBqxXHqyXdUD3J4iSTyTwLWaBkXpAMIsWHQNdblpsN18qC6cXLBBTttsWk/k6sQgzBbJy0QoBhOmUYEx7yTIblgaJAm+jdCAK99yfHpmpDJL6+Ji72Ez1MCZldn0kpFXUKQ0Xk1GX5dC8qeT70YDYdpoB2AiEt8DMaa+1dJlqlFEVRUKUclWFRoZd0vsQcl2EgORaIojrTWv/xcfNJ2CitMUkAqSGq3/uT0Ln0fK3UbgLJA1YCndHIdMSq3Zy0drI4yWQyfwxZoGResAzbjocAt6PVmqfqmpecv8Cib/FNxwRNoTTBh51xVJSO7YyhlaLSqPDwwkoTwjASmyZ5FAwCZ7gYD1UZFbf9ROMtmiDx4q8kSQAFRVHECP3enbRWdqi7fh72/Qx+lNiyORk3Hs6hquooEJIoGCaWhlHfuIE5Lh4cgtxOi4FdMQW1q4gI7DwwUbCpU9/TqYmheKLpfE7aTye+GpW+t7jAsVfAdEyrhW0Xxcngw8nhbJlM5tnIAiXzgmZoK1jr2OqOW4slo7Li0v6cZXsTHTylijkcUVCkv9V74ubiEKJX5LRAQMepncFbEcCJ5/RemWF6Rus0oZIu/kVVghUYBEZ6Xec9Ch1HbuML7PwtKvlFht0+IcT3NQpMkUaCU+UnSKzShBDoui7GxifzrU7ZLz4EQn861Y3kk4ktHY8gWp3sIRLSkkG/Eywy/L8a2mPx0KCSd2d43/SNqVN5Lad/9Qg9HlsX2MpwvFzQ9B0uhJTHksVJJpN5dnIOSuYFjXAiUnprWTdbrh0dcSc4+oM5ixCwEmd3UGBSsmpd1bFioOL2Xok6hCFcNgqUaC0JaYQ3SPSKhBD9LVqzCzeT1B7q+z5deANKC9qoZBAF58NuouV0ZqpA3JSs1UmLRWIjaIi3V4OgEaGqa7QxWGcZIkuifkiZJjJc/E9QSu8qMVono7AaDLjD8+KvJ1JM0nchu+8m7H4e5qEGiSFoY9LuIBPNsVrRibBFUNMJR23Dum3onUvG2Dy4k8lk/nhyBSXzgud0q6cd/CjGoM6dg3NzFouGfYHRsBtGQT0a0fYdElcFExcAnkz2SKp+nNhCh7FktbuoDm2QeEQcw9UmLS40itJUqMLQdR3es9v+e9JaOREIeNltHS7LMrV5HC75YeJJJhkSQmr9pIWAKRPlZDJpd8YxjyR9BO8DRkVBpSUg2qTNzCcnFODzDLJqGNlO3hfSxJLaGY7TNFMSPkOdySKsxdGNK1rXs9huaXt7Mq6c1Ukmk/kT+JIqKD/xEz+BUorv//7v393Xti2PPfYY58+fZzab8eY3v5nr16/f9bwnnniCN73pTUwmEy5dusQP/uAPxv8QZzJfJHFcOOCco+17FpsNt1Yr+umEzd6IlQr4FNjmQkjppakNo1UKc4sX15AMuLGqEjccG60xyqTMlJNNxbFFlDJRJJx4RkTH2Fifxn4ZWh8nM8Syi6IVJDhU8GjS8TqGtrFLmI3TSE4827alt3G53hBuNkwa3+0Nie9mTJq4GSZ2Ur1liKlPpxPj7VU0+eoU3DacgxIwKIzSlMZQaI1R8WeTYu5FgSeGvlkNa/G0dYmtK46abWzt+GHnThYnmUzmT+aLrqB87GMf41/8i3/Bq1/96rvu/4Ef+AF+5Vd+hV/6pV9if3+ft7/97XzHd3wHv/mbvwnEqYQ3velNXLlyhY985CM8/fTTfM/3fA9lWfLjP/7jX9qnyXzVIqkV44IHB6pVHBlNVRZcmE0hCHVjmQ3VFutR6aKaktIQUQgmVVWiYNHKUFUVCoXzMQtEB40LfmeAhWFqxeAloLwnGjZi1ULSBX5YRBgLD5oT40uaptGxMqGJ5+QgGWAljQkPVY6QBE4a501VDlEQfalpN47Sce8PCiSkjc8xq2S4DVWM03JB6yhAlIS7pnpU+pxamxSfcmoWKlVWAtHj0iFsDbi6YtG2rJ8hqnLxJJPJfCG+qArKer3mLW95C//yX/5Lzp07t7t/sVjwr/7Vv+Kf/JN/wjd90zfxmte8hl/4hV/gIx/5CP/jf/wPAP7rf/2vfPrTn+bf/tt/y9d//dfzrd/6rfzDf/gP+dmf/Vn6vv/yfKrMVyWSouedj/t6Vpst14+OuLZaclwX3KkN60KgLFBGx704POOmAKNRRu/GaJ33WO/iZmFjGI9GVEWRBnuSaZYYBqd3c8zJt5IW8IgMiwRTLL6KWSLRXKoZlE4IAWstwfnkP4ki6a4pnl17KBpivYTUXjEnE0b6JMtkCIETSbt5QpoSCh6fvCS7DwEpYfbUfp304G6gOhlcgwxjyrEag46m2DY4Ft5i64pl37HYbGi6Pkba58TYTCbzp+SLEiiPPfYYb3rTm3jDG95w1/0f//jHsdbedf8jjzzCgw8+yOOPPw7A448/zqte9SouX768O+aNb3wjy+WST33qU8/6fl3XsVwu77plMs/Gzo/iPF0fRcrtxYKjrmW7N2E1KmlKoas1XQF+6MQQfw0KRMcpFp/29nTe0nmLDR4rPk6yaL0ziJ5cyAV9elwZQBQhCQMlmpM1fTDIoiFpVVKLSdLEkDYFuqhQpkTpglPOEiAtJ9xVgKKoSs7duChQxXaVDSHmoBDPxQt4tVuXyLDIb+gJBaIfxg8VolRNCio9N4APKhlmA0HFm8NjlWcljr4u2ATP8XbDpuuwzuH9iajJZDKZL8RzbvH84i/+Ir/927/Nxz72sc977Nq1a1RVxcHBwV33X758mWvXru2OOS1OhseHx56N97znPbz73e9+rqea+SplCHFzwaOsZd3E1kldlJjJiLBtGXU2iQkh+UzvSncdJlei+AiAQpuYxtraPm0cJlZKUtEEicclr23KSImeDgBjFDjw3D2ie4LaZYrIYNaV6IHRpFHmNMmzi6NP1ZJY8eFk3De1fqLQIiXVpmC1YTpHxZbN0GpSgxhJn1/vfhy8KifLEtXOmBs/gUewElirQDcq2So42m7YdC39aXGSqyeZTOZPyXOqoDz55JP83b/7d/l3/+7fMRqN/qzO6fN417vexWKx2N2efPLJP7f3zrwwiftgYnsmjh833Fgcc6tp2IxHbKuSXnzcSDw8h7vlAmpobKSKSgiE4FFKnzKbPvNJ7DJDhmC3wQurjUFrg8Y84+CTlzhJhZWdmLj7yFNGW04ma4YiiEpTOrvR5OFzDcFtfL5IOP1ctXv9oUKiTnw6p6Z2htcNKk7rtMGzDI6lVqwkcGezSb4Tt/PhZHGSyWSeC89JoHz84x/nxo0b/IW/8BcoioKiKPjwhz/Mz/zMz1AUBZcvX6bve46Pj+963vXr17ly5QoAV65c+bypnuHn4ZhnUtc18/n8rlsm84WI2SVJpDjLumm5tV6zdBY3m9DUJZ06meLRaTIl7rNJF/pCgQG0MPhaRQVMZdCFuaufo1LGSEySlzhWLIIPghehdz1eSdyIrGIlYtcTGhYCpXbNifxRu88RktJR6J2fZLjwS/KYgI61D2VQGIKPWS7RUBtSxUjdJWAGtSGykzOkl0J0anlFhyyi9e5nrxXWKBoVWClhWxZ0xnDcNGy6LlZOQq6cZDKZL47nJFC++Zu/mU9+8pN84hOf2N1e+9rX8pa3vGX3+7Is+dCHPrR7zmc+8xmeeOIJrl69CsDVq1f55Cc/yY0bN3bHfPCDH2Q+n/Poo49+mT5WJnN35cA6T2st67blzmbDRgR9eEA/raNIKRTlqKSq693FW5+ahFFGUVQFuowBZNV4RD0Zo0xxUllIZlGlFaLZTbSElH/iVSBowZ+6BROzQ4wxSXgM4XDsWkw+mX8HARJTRwxIXAjofaC3LnV+dioqCZMkaoYqi46tJj1US0TuqhzdXUFKrSINQSvCzu+idr6dlsBSPBujaLRi2bY0fY/1d4uTLE8ymcxz5Tl5UPb29vjar/3au+6bTqecP39+d//b3vY23vnOd3J4eMh8Pucd///27j44qup+A/hz7r5lQ9wEE7MLShAtNSLRQahxRds/yBBpplplrGWi4svooPEnWoYitWptB2F02k7tVKxOq85opTLjK0WdGHwpGgOkRHnRiBUNg9lkKmYXTPbl3vv9/XHv3mQBhcSQ3A3PZ+bUsPdkc+5pYJ8597z83/8hGo3i/PPPBwDMnTsX06ZNw9VXX40HHngAsVgMv/71r9HQ0IBAIDBMt0Vk6T/52EAmA/Qq2CFCgxkKoeCEcVCqD+hNQ5km/EqDaLBW3sDaPVZpGnw+H0wju7rFtA/pAzSfzwoS2a3fNWU9ELKXPR/0MMV5JRuCPNlVPAKYumEtkx7Qdthb0WcfNvVvye9MlbWWIitx6ivYJx3bc1Y0zWPPo7F3kVX9k4Ctd0b/Zm/OGUNWmMnOPVHOT7RGcgwlSIugT0wkvR70mia+7k2jT88grRvWrrkcOSGi72DYd5L94x//CE3TMH/+fKRSKdTW1uLhhx92rns8Hqxbtw4333wzotEoxo0bh4ULF+K3v/3tcDeFCEB2FAXQYQAZBYUkxLS2pQ8VFqKsKAjxaDASfSg0FXyahuz+8dbGbIChG86EVE1ZH+4i1qRUeDwwTMNaPOPVIIa9x4iC9QhHFA4eQlAAVHbJrcdaYQNNgzLFCQP9s2OyIzTo35HNWfZr/6+9Fb91IKD1qEjTNFibwtj1B0yi1exVRdlTnrOhxVnMo7LhxGJk5+8q2OfrmOhVJnqVoE8EBzIZpHQdacOAzp1iiWgYKMnDf0USiQSKi4tHuxmUZ7ITSL2aBp/XC7/Ph2DAj5Jx41BaWIiijIlxB9Lw6yb8UFC6Yc1HgTVBtn/HWdgjJNaXRiYNw9Dh8Xrh8/mQ1g0k7TN5DFEQ0ezHM9ZyXEDgsVf7KFjzOqA81jwTwwDsDdkM07A3cLOexCoAppjQJDuV1XpUY527Y8Dn88Dr9dtb01ujNbpuQPP4YBoZKCXweDwQBWRME0l7LxQl1gRe094F1wphA/Y9gXUCtCgFQwFpCJIeQa8X6DUNHEhb4UQ3TOj2YYdm/v2zQkQjKB6PH3E+Kc/ioeOGs0eKvTrGFGvnWd0wkMpkML6wEJkiPwp7MwikdRR4NXjtOaweQzmjJg4FiG5t3a55rAmzGUOHaQeP7AF9zmOa7Ie+stqiKc1uFzBwZ5QB8277zwOyJ+1aG5iYTgiBUtBEg2EYMAwTSpnQNA2mYVg7zNrLjYH+EZHcB0/ZwNX/860a/a0w7Xk0OoBeMZHSBGmPhq91Hb32yEnGHjUyheGEiIYHAwodV7JzUsQwIWJtvW4Y1sZuGcNAZtw4lIYKkOlNwUwZKFQafKIgolvfMyBCWO9hjaOIwBlS8WgaxOtFxjSgrNP3nOnoAwOKNZqiBryHOI9WrJOMNQzcNtYwDCdIWDNKYF1XpjWZFf0rcaxda2EviTbtJcSH28RV7Im4yo4lypk/ozRrom3G3uMkoyl8rQRpD9CXyaA3k0Ha0KGb9inPfKxDRMOIAYWOO9kzcQyxNy4zxdkOPp3JoDcYRElBEKXBINIHUig0BD6P9agG2TkbsE8F9nqt7zUM+5RhgSEGsvu4WWGjf76IV/NDYEI30/YME3tpsf2n7LQPBXtFEKxVPNmN4ZwThJWyXzehYNhzR7JzSjzQPB578qz9MCg7IGLPX3GCT3ZjODuoiMpuNGfak2E1pDSFtALSGtBrmkhmDCR1HRl7vom17T0YTohoWDGg0HEpO9qgTECUdYCdtRxZRyqdwdfJJL4uHIdQgQ+FBhBMCjy6Aa8IvPbusN7sDq6AfbBe//tmd3y1wpA1B6V/3ogH0PwD2iLWB/zBa37sU4VhmtbEUwiU8liPegZsr68GvJNpH/CXHcWx9kvJjvoctJhY2XNglD33VmXn8woyAugC6B4gqQFJCJK6jqTRH0yM7Hk8DCdEdAwwoNBxLRsmRFkrTwzThG5Yk1x7kyn0BPw4IVCAkoAfAc0LT18GQQH8IvAD8Jr9621EU/YqmmwYsEYjnA9x+2cqe5mMMydEzMM9e+nfn0RZe6WIva2+IaZ94KB1srAVQuz5Mcra4h/2NWTDhx1/TNPeQM5e0SOmwNRMZERgQkGHQFdARilkNCCjTCRN6wDAtL0MeuDjHGvk5Zj/30RExyEGFCL07zoL+wRjwzBg2CMqyXQGXwf8KPIHMG6cHxlTwZvS4TUMBAD4lFhLjPvXAgNiLeWF/QFuIneEQlT/45xv/oDvPzNHnEcxcM76sc7KsVblWE+e7JEYMQFlwOv19j/CsUOSqQSmUjA1a86JoUz0GYIkBDoEhqaQ1hQyEOgiSBsmMqZpr9DpnwgrdsOZTYjoWGFAIbJlP3Qh1oe4GNYKHd00kTEM9KV1JHw+FHi9CHg1+DxA0FAImIBPgAKP9dhHMwVKxNrvVQOUJjAMa+dVsSfIQvU/0hmw3chh2yTZRzH2a6a99Nl5tqP66x7yfc7KHSuQiKZB14C0EmREkFQmejVBSgQZAIayTj/OGCZ0+xGONWJi2iEODCZENCIYUIgO4sxPsf9rPRoxkdZ1JDNpHNA0+DweBLw+FPp8CHo98JmAzzQREIEfJnxiwqc0eDz2VvmaQBnZkRRlb6dihwfThNIM5zge2I9vnPN6kJ2PYj8usgOJtefsgBENZ/6svWpIBBDDuStTAboGJMVEr2kgKSb6TMMOJ9aIiWE/5nIe40CcR1QMJkQ0khhQiL5BduKnYZrWCmJ7tY7SNKSUQlLLoM/rgd/jRYHXh6DXB79Xg8cQBEyFgBjwiWlNqtU0axTDnm5iCKBp9se9BjhxZeCEU2Vtjm+KOJu1wRRAs7abN524oNnvIdaBfso6iVg3dPuyZu3+qoCUCJJihZOkmEiZ1kiJng0m2bklOfNmOM+EiEYeAwrRtxg418LaydV6fKMAGMpAxtDQB2tUxe/zIuD1IeDxwKcpeA3AKwYCEPhFgw8e6xGQCCAmNBH75Bz7fCB78zU1YFkxlAZlmjmrbCD2wX3Zzd+07DJiK5g4Jyjb8cIQhZQCkspECoKUmNa+Jvbjq+xhhDkH++Vs3EZENPIYUIiOUnZkI7tDqykCZdqboBlA2tCR1NLweqwg4rGXCfsA+GAgoAwUKA98UNCUQIPAA/tsYrGWLmuagib9m66JslcHZed/wAopJrIHD4ozeRYADBFrRARARkyYCsiIQp8YSNsbrumm/SjnoGXCctB7ERGNJgYUoiGQASMMKrv6J7vyxz7ZWLNPCPYoBY+yzgAKaIYVUGCNyGgQeAXwQcGjKXhNDzxQ8CA7wdVaFpzdXC67XNiwg0R2xMS0v9ZNga4EhgZ7LxNBWkykTcMaKZEBIyUDHuEwmBCR2zCgEH1HBz8GGnj2jQKgKwWlDGhKIan6R1YAsQ7qU1Yg0UwFTenwQMELwANrYixgTaQdGIrMAf81IBBRztemsrao143+s3EGzi+x3oOhhIjcjQGFaBjJwUHFnlMCWIFFh2mFE9W//4m9c72z06y1CMcKN9qANciqf7zD2QzO3v5kwIZsgBjZ/VD655QMnFvCUEJE+YABhegYcELAwEDApTBEREdNG+0GEBERER2MAYWIiIhchwGFiIiIXIcBhYiIiFyHAYWIiIhchwGFiIiIXIcBhYiIiFyHAYWIiIhchwGFiIiIXIcBhYiIiFyHAYWIiIhchwGFiIiIXGdQAeU3v/mNdYT8gFJZWelcTyaTaGhoQGlpKYqKijB//nx0dXXlvEdHRwfq6upQWFiI8vJyLF26FLquD8/dEBER0Zgw6NOMzzrrLLz++uv9b+Dtf4s77rgD//rXv7B27VoUFxfj1ltvxeWXX4533nkHAGAYBurq6hCJRPDuu++is7MT11xzDXw+H+6///5huB0iIiIaE2QQ7r33XjnnnHMOe62np0d8Pp+sXbvWee3DDz8UANLc3CwiIuvXrxdN0yQWizl1Vq9eLaFQSFKp1FG3Ix6PC6wT7VlYWFhYWFjyrMTj8SN+1g96DsquXbswceJEnHbaaaivr0dHRwcAoLW1FZlMBjU1NU7dyspKVFRUoLm5GQDQ3NyMqqoqhMNhp05tbS0SiQR27Ngx2KYQERHRGDWoRzzV1dV44okncMYZZ6CzsxP33XcfLrroImzfvh2xWAx+vx8lJSU53xMOhxGLxQAAsVgsJ5xkr2evfZNUKoVUKuX8OZFIDKbZRERElGcGFVDmzZvnfH322WejuroakydPxrPPPotgMDjsjctauXIl7rvvvmP2/kREROQu32mZcUlJCb7//e/jk08+QSQSQTqdRk9PT06drq4uRCIRAEAkEjlkVU/2z9k6h7N8+XLE43Gn7Nmz57s0m4iIiFzuOwWUAwcO4L///S8mTJiAmTNnwufzoampybne3t6Ojo4ORKNRAEA0GsW2bdvQ3d3t1GlsbEQoFMK0adO+8ecEAgGEQqGcQkRERGPYUS+dEZElS5bIm2++Kbt375Z33nlHampqpKysTLq7u0VEZNGiRVJRUSEbNmyQLVu2SDQalWg06ny/rusyffp0mTt3rrS1tcmrr74qJ510kixfvnwwzeAqHhYWFhYWljwuR7OKZ1AB5corr5QJEyaI3++Xk08+Wa688kr55JNPnOt9fX1yyy23yPjx46WwsFAuu+wy6ezszHmPzz77TObNmyfBYFDKyspkyZIlkslkBtMMBhQWFhYWFpY8LkcTUJSICPJMIpFAcXHxaDeDiIiIhiAejx9xugbP4iEiIiLXYUAhIiIi12FAISIiItdhQCEiIiLXYUAhIiIi12FAISIiItdhQCEiIiLXYUAhIiIi12FAISIiItdhQCEiIiLXYUAhIiIi12FAISIiItdhQCEiIiLXYUAhIiIi12FAISIiItdhQCEiIiLXYUAhIiIi12FAISIiItdhQCEiIiLXYUAhIiIi12FAISIiItdhQCEiIiLXYUAhIiIi12FAISIiItdhQCEiIiLXYUAhIiIi12FAISIiItdhQCEiIiLXGXRA2bt3L6666iqUlpYiGAyiqqoKW7Zsca6LCO655x5MmDABwWAQNTU12LVrV8577Nu3D/X19QiFQigpKcENN9yAAwcOfPe7ISIiojFhUAHlq6++wuzZs+Hz+fDKK69g586d+P3vf4/x48c7dR544AE89NBDeOSRR9DS0oJx48ahtrYWyWTSqVNfX48dO3agsbER69atw9tvv42bbrpp+O6KiIiI8psMwrJly+TCCy/8xuumaUokEpEHH3zQea2np0cCgYA888wzIiKyc+dOASCbN2926rzyyiuilJK9e/ceVTvi8bgAYGFhYWFhYcnDEo/Hj/hZP6gRlJdeegmzZs3CFVdcgfLycsyYMQOPPfaYc3337t2IxWKoqalxXisuLkZ1dTWam5sBAM3NzSgpKcGsWbOcOjU1NdA0DS0tLYf9ualUColEIqcQERHR2DWogPLpp59i9erVmDp1Kl577TXcfPPNuO222/Dkk08CAGKxGAAgHA7nfF84HHauxWIxlJeX51z3er048cQTnToHW7lyJYqLi50yadKkwTSbiIiI8sygAoppmjj33HNx//33Y8aMGbjppptw44034pFHHjlW7QMALF++HPF43Cl79uw5pj+PiIiIRtegAsqECRMwbdq0nNfOPPNMdHR0AAAikQgAoKurK6dOV1eXcy0SiaC7uzvnuq7r2Ldvn1PnYIFAAKFQKKcQERHR2DWogDJ79my0t7fnvPbxxx9j8uTJAIApU6YgEomgqanJuZ5IJNDS0oJoNAoAiEaj6OnpQWtrq1Nnw4YNME0T1dXVQ74RIiIiGkOOatmMbdOmTeL1emXFihWya9cuefrpp6WwsFCeeuopp86qVaukpKREXnzxRfnggw/k0ksvlSlTpkhfX59T5+KLL5YZM2ZIS0uLbNy4UaZOnSoLFiw46nZwFQ8LCwsLC0v+lqNZxTOogCIi8vLLL8v06dMlEAhIZWWlPProoznXTdOUu+++W8LhsAQCAZkzZ460t7fn1Pnyyy9lwYIFUlRUJKFQSK677jrZv3//UbeBAYWFhYWFhSV/y9EEFCUigjyTSCRQXFw82s0gIiKiIYjH40ecT8qzeIiIiMh1GFCIiIjIdRhQiIiIyHUYUIiIiMh1GFCIiIjIdRhQiIiIyHUYUIiIiMh1GFCIiIjIdRhQiIiIyHUYUIiIiMh1GFCIiIjIdRhQiIiIyHUYUIiIiMh1GFCIiIjIdRhQiIiIyHUYUIiIiMh1GFCIiIjIdRhQiIiIyHUYUIiIiMh1GFCIiIjIdRhQiIiIyHUYUIiIiMh1GFCIiIjIdRhQiIiIyHUYUIiIiMh1GFCIiIjIdRhQiIiIyHUYUIiIiMh1BhVQTj31VCilDikNDQ0AgGQyiYaGBpSWlqKoqAjz589HV1dXznt0dHSgrq4OhYWFKC8vx9KlS6Hr+vDdEREREeW9QQWUzZs3o7Oz0ymNjY0AgCuuuAIAcMcdd+Dll1/G2rVr8dZbb+GLL77A5Zdf7ny/YRioq6tDOp3Gu+++iyeffBJPPPEE7rnnnmG8JSIiIsp78h0sXrxYTj/9dDFNU3p6esTn88natWud6x9++KEAkObmZhERWb9+vWiaJrFYzKmzevVqCYVCkkqljvrnxuNxAcDCwsLCwsKShyUejx/xs37Ic1DS6TSeeuopXH/99VBKobW1FZlMBjU1NU6dyspKVFRUoLm5GQDQ3NyMqqoqhMNhp05tbS0SiQR27NjxjT8rlUohkUjkFCIiIhq7hhxQXnjhBfT09ODaa68FAMRiMfj9fpSUlOTUC4fDiMViTp2B4SR7PXvtm6xcuRLFxcVOmTRp0lCbTURERHlgyAHlb3/7G+bNm4eJEycOZ3sOa/ny5YjH407Zs2fPMf+ZRERENHq8Q/mmzz//HK+//jqee+4557VIJIJ0Oo2enp6cUZSuri5EIhGnzqZNm3LeK7vKJ1vncAKBAAKBwFCaSkRERHloSCMojz/+OMrLy1FXV+e8NnPmTPh8PjQ1NTmvtbe3o6OjA9FoFAAQjUaxbds2dHd3O3UaGxsRCoUwbdq0od4DERERjTWDWLQjIiKGYUhFRYUsW7bskGuLFi2SiooK2bBhg2zZskWi0ahEo1Hnuq7rMn36dJk7d660tbXJq6++KieddJIsX758UG3gKh4WFhYWFpb8LUezimfQAeW1114TANLe3n7Itb6+Prnllltk/PjxUlhYKJdddpl0dnbm1Pnss89k3rx5EgwGpaysTJYsWSKZTGZQbWBAYWFhYWFhyd9yNAFFiYggz8Tj8UNWCxEREVF+6OnpQXFx8bfWycuzeL788svRbgIREREN0f79+49YZ0ireEbbiSeeCMA61+dICYz6JRIJTJo0CXv27EEoFBrt5uQF9tngsc+Ghv02eOyzoRnNfhMR7N+//6i2KMnLgKJp1sBPcXExfymHIBQKsd8GiX02eOyzoWG/DR77bGhGq9+OdmAhLx/xEBER0djGgEJERESuk5cBJRAI4N577+XusoPEfhs89tngsc+Ghv02eOyzocmXfsvLZcZEREQ0tuXlCAoRERGNbQwoRERE5DoMKEREROQ6DChERETkOnkZUP7yl7/g1FNPRUFBAaqrq7Fp06bRbtKoWblyJX7wgx/ghBNOQHl5OX7605+ivb09p04ymURDQwNKS0tRVFSE+fPno6urK6dOR0cH6urqUFhYiPLycixduhS6ro/krYyaVatWQSmF22+/3XmNfXaovXv34qqrrkJpaSmCwSCqqqqwZcsW57qI4J577sGECRMQDAZRU1ODXbt25bzHvn37UF9fj1AohJKSEtxwww04cODASN/KiDEMA3fffTemTJmCYDCI008/Hb/73e8wcG3C8d5vb7/9Nn7yk59g4sSJUErhhRdeyLk+XP3zwQcf4KKLLkJBQQEmTZqEBx544Fjf2jH1bf2WyWSwbNkyVFVVYdy4cZg4cSKuueYafPHFFznv4fp+G9Qxwi6wZs0a8fv98ve//1127NghN954o5SUlEhXV9doN21U1NbWyuOPPy7bt2+XtrY2+fGPfywVFRVy4MABp86iRYtk0qRJ0tTUJFu2bJHzzz9fLrjgAue6rusyffp0qampka1bt8r69eulrKxMli9fPhq3NKI2bdokp556qpx99tmyePFi53X2Wa59+/bJ5MmT5dprr5WWlhb59NNP5bXXXpNPPvnEqbNq1SopLi6WF154Qd5//3255JJLZMqUKdLX1+fUufjii+Wcc86R9957T/7973/L9773PVmwYMFo3NKIWLFihZSWlsq6detk9+7dsnbtWikqKpI//elPTp3jvd/Wr18vd911lzz33HMCQJ5//vmc68PRP/F4XMLhsNTX18v27dvlmWeekWAwKH/9619H6jaH3bf1W09Pj9TU1Mg///lP+eijj6S5uVnOO+88mTlzZs57uL3f8i6gnHfeedLQ0OD82TAMmThxoqxcuXIUW+Ue3d3dAkDeeustEbF+UX0+n6xdu9ap8+GHHwoAaW5uFhHrF13TNInFYk6d1atXSygUklQqNbI3MIL2798vU6dOlcbGRvnRj37kBBT22aGWLVsmF1544TdeN01TIpGIPPjgg85rPT09EggE5JlnnhERkZ07dwoA2bx5s1PnlVdeEaWU7N2799g1fhTV1dXJ9ddfn/Pa5ZdfLvX19SLCfjvYwR+0w9U/Dz/8sIwfPz7n7+ayZcvkjDPOOMZ3NDIOF+wOtmnTJgEgn3/+uYjkR7/l1SOedDqN1tZW1NTUOK9pmoaamho0NzePYsvcIx6PA+g/ULG1tRWZTCanzyorK1FRUeH0WXNzM6qqqhAOh506tbW1SCQS2LFjxwi2fmQ1NDSgrq4up28A9tnhvPTSS5g1axauuOIKlJeXY8aMGXjsscec67t370YsFsvps+LiYlRXV+f0WUlJCWbNmuXUqampgaZpaGlpGbmbGUEXXHABmpqa8PHHHwMA3n//fWzcuBHz5s0DwH47kuHqn+bmZvzwhz+E3+936tTW1qK9vR1fffXVCN3N6IrH41BKoaSkBEB+9FteHRb4v//9D4Zh5HwoAEA4HMZHH300Sq1yD9M0cfvtt2P27NmYPn06ACAWi8Hv9zu/lFnhcBixWMypc7g+zV4bi9asWYP//Oc/2Lx58yHX2GeH+vTTT7F69Wr84he/wK9+9Sts3rwZt912G/x+PxYuXOjc8+H6ZGCflZeX51z3er048cQTx2SfAcCdd96JRCKByspKeDweGIaBFStWoL6+HgDYb0cwXP0Ti8UwZcqUQ94je238+PHHpP1ukUwmsWzZMixYsMA5HDAf+i2vAgp9u4aGBmzfvh0bN24c7aa42p49e7B48WI0NjaioKBgtJuTF0zTxKxZs3D//fcDAGbMmIHt27fjkUcewcKFC0e5de717LPP4umnn8Y//vEPnHXWWWhra8Ptt9+OiRMnst9oRGQyGfzsZz+DiGD16tWj3ZxByatHPGVlZfB4PIespujq6kIkEhmlVrnDrbfeinXr1uGNN97AKaec4rweiUSQTqfR09OTU39gn0UikcP2afbaWNPa2oru7m6ce+658Hq98Hq9eOutt/DQQw/B6/UiHA6zzw4yYcIETJs2Lee1M888Ex0dHQD67/nb/m5GIhF0d3fnXNd1Hfv27RuTfQYAS5cuxZ133omf//znqKqqwtVXX4077rgDK1euBMB+O5Lh6p/j7e9rVjacfP7552hsbHRGT4D86Le8Cih+vx8zZ85EU1OT85ppmmhqakI0Gh3Flo0eEcGtt96K559/Hhs2bDhkOG7mzJnw+Xw5fdbe3o6Ojg6nz6LRKLZt25bzy5r9ZT74Q2ksmDNnDrZt24a2tjanzJo1C/X19c7X7LNcs2fPPmT5+scff4zJkycDAKZMmYJIJJLTZ4lEAi0tLTl91tPTg9bWVqfOhg0bYJomqqurR+AuRl5vby80LfefWY/HA9M0AbDfjmS4+icajeLtt99GJpNx6jQ2NuKMM84Ys493suFk165deP3111FaWppzPS/6bUSm4g6jNWvWSCAQkCeeeEJ27twpN910k5SUlOSspjie3HzzzVJcXCxvvvmmdHZ2OqW3t9eps2jRIqmoqJANGzbIli1bJBqNSjQada5nl8zOnTtX2tra5NVXX5WTTjppzC6ZPZyBq3hE2GcH27Rpk3i9XlmxYoXs2rVLnn76aSksLJSnnnrKqbNq1SopKSmRF198UT744AO59NJLD7scdMaMGdLS0iIbN26UqVOnjpnlsoezcOFCOfnkk51lxs8995yUlZXJL3/5S6fO8d5v+/fvl61bt8rWrVsFgPzhD3+QrVu3OqtNhqN/enp6JBwOy9VXXy3bt2+XNWvWSGFhYV4vM/62fkun03LJJZfIKaecIm1tbTmfDQNX5Li93/IuoIiI/PnPf5aKigrx+/1y3nnnyXvvvTfaTRo1AA5bHn/8cadOX1+f3HLLLTJ+/HgpLCyUyy67TDo7O3Pe57PPPpN58+ZJMBiUsrIyWbJkiWQymRG+m9FzcEBhnx3q5ZdflunTp0sgEJDKykp59NFHc66bpil33323hMNhCQQCMmfOHGlvb8+p8+WXX8qCBQukqKhIQqGQXHfddbJ///6RvI0RlUgkZPHixVJRUSEFBQVy2mmnyV133ZXzIXG899sbb7xx2H/DFi5cKCLD1z/vv/++XHjhhRIIBOTkk0+WVatWjdQtHhPf1m+7d+/+xs+GN954w3kPt/ebEhmwpSERERGRC+TVHBQiIiI6PjCgEBERkeswoBAREZHrMKAQERGR6zCgEBERkeswoBAREZHrMKAQERGR6zCgEBERkeswoBAREZHrMKAQERGR6zCgEBERkeswoBAREZHr/D8Mvs7zbf+OwQAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "plt.imshow((segm_mask[...,None]/255.*video[0,0].permute(1,2,0).cpu().numpy()/255.))" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "b42dce24-7952-4660-8298-4c362d6913cf", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Video saved to ./videos/segm_grid_pred_track.mp4\n" + ] + } + ], + "source": [ + "pred_tracks, __ = model(video, grid_size=grid_size, segm_mask=torch.from_numpy(segm_mask)[None, None])\n", + "vis = Visualizer(\n", + " save_dir='./videos',\n", + " pad_value=100,\n", + " linewidth=2,\n", + ")\n", + "vis.visualize(\n", + " video=video,\n", + " tracks=pred_tracks, \n", + " filename='segm_grid');" + ] + }, + { + "cell_type": "markdown", + "id": "5a386308-0d20-4ba3-bbb9-98ea79823a47", + "metadata": {}, + "source": [ + "We are now only tracking points on the object (and around):" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "1810440f-00f4-488a-a174-36be05949e42", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 24, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "HTML(\"\"\"\"\"\")" + ] + }, + { + "cell_type": "markdown", + "id": "a63e89e4-8890-4e1b-91ec-d5dfa3f93309", + "metadata": {}, + "source": [ + "## Dense Tracks" + ] + }, + { + "cell_type": "markdown", + "id": "4ae764d8-db7c-41c2-a712-1876e7b4372d", + "metadata": {}, + "source": [ + "### Tracking forward **and backward** from the frame number x" + ] + }, + { + "cell_type": "markdown", + "id": "0dde3237-ecad-4c9b-b100-28b1f1b3cbe6", + "metadata": {}, + "source": [ + "CoTracker also has a mode to track **every pixel** in a video in a **dense** manner but it is much slower than in previous examples. Let's downsample the video in order to make it faster: " + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "id": "379557d9-80ea-4316-91df-4da215193b41", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "torch.Size([1, 48, 3, 719, 1282])" + ] + }, + "execution_count": 25, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "video.shape" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "id": "c6db5cc7-351d-4d9e-9b9d-3a40f05b077a", + "metadata": {}, + "outputs": [], + "source": [ + "import torch.nn.functional as F\n", + "video_interp = F.interpolate(video[0], [100,180], mode=\"bilinear\")[None].cuda()" + ] + }, + { + "cell_type": "markdown", + "id": "7ba32cb3-97dc-46f5-b2bd-b93a094dc819", + "metadata": {}, + "source": [ + "The video now has a much lower resolution:" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "id": "0918f246-5556-43b8-9f6d-88013d5a487e", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "torch.Size([1, 48, 3, 100, 180])" + ] + }, + "execution_count": 27, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "video_interp.shape" + ] + }, + { + "cell_type": "markdown", + "id": "bc7d3a2c-5e87-4c8d-ad10-1f9c6d2ffbed", + "metadata": {}, + "source": [ + "Again, let's track points in both directions. This will only take a couple of minutes:" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "id": "3b852606-5229-4abd-b166-496d35da1009", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 9/9 [02:07<00:00, 14.18s/it]\n" + ] + } + ], + "source": [ + "pred_tracks, __ = model(video_interp, grid_query_frame=20, backward_tracking=True)\n" + ] + }, + { + "cell_type": "markdown", + "id": "4143ab14-810e-4e65-93f1-5775957cf4da", + "metadata": {}, + "source": [ + "Visualization with an optical flow color encoding:" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "id": "5394b0ba-1fc7-4843-91d5-6113a6e86bdf", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Video saved to ./videos/dense_pred_track.mp4\n" + ] + } + ], + "source": [ + "vis = Visualizer(\n", + " save_dir='./videos',\n", + " pad_value=20,\n", + " linewidth=1,\n", + " mode='optical_flow'\n", + ")\n", + "vis.visualize(\n", + " video=video_interp,\n", + " tracks=pred_tracks, \n", + " query_frame=grid_query_frame,\n", + " filename='dense');" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "id": "9113c2ac-4d25-4ef2-8951-71a1c1be74dd", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 30, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "HTML(\"\"\"\"\"\")" + ] + }, + { + "cell_type": "markdown", + "id": "95e9bce0-382b-4d18-9316-7f92093ada1d", + "metadata": {}, + "source": [ + "That's all, now you can use CoTracker in your projects!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "54e0ba0c-b532-46a9-af6f-9508de689dd2", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "stereoformer", + "language": "python", + "name": "stereoformer" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..ad6dc97 --- /dev/null +++ b/setup.py @@ -0,0 +1,18 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from setuptools import find_packages, setup + +setup( + name="cotracker", + version="1.0", + install_requires=[], + packages=find_packages(exclude="notebooks"), + extras_require={ + "all": ["matplotlib", "opencv-python"], + "dev": ["flake8", "black"], + }, +) diff --git a/train.py b/train.py new file mode 100644 index 0000000..ebb7d08 --- /dev/null +++ b/train.py @@ -0,0 +1,665 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import os +import random +import torch +import signal +import socket +import sys +import json + +import numpy as np +import argparse +import logging +from pathlib import Path +from tqdm import tqdm +import torch.optim as optim +from torch.utils.data import DataLoader +from torch.cuda.amp import GradScaler + +from torch.utils.tensorboard import SummaryWriter +from pytorch_lightning.lite import LightningLite + +from cotracker.models.evaluation_predictor import EvaluationPredictor +from cotracker.models.core.cotracker.cotracker import CoTracker +from cotracker.utils.visualizer import Visualizer +from cotracker.datasets.tap_vid_datasets import TapVidDataset +from cotracker.datasets.badja_dataset import BadjaDataset +from cotracker.datasets.fast_capture_dataset import FastCaptureDataset +from cotracker.evaluation.core.evaluator import Evaluator +from cotracker.datasets import kubric_movif_dataset +from cotracker.datasets.utils import collate_fn, collate_fn_train, dataclass_to_cuda_ +from cotracker.models.core.cotracker.losses import sequence_loss, balanced_ce_loss + + +# define the handler function +# for training on a slurm cluster +def sig_handler(signum, frame): + print("caught signal", signum) + print(socket.gethostname(), "USR1 signal caught.") + # do other stuff to cleanup here + print("requeuing job " + os.environ["SLURM_JOB_ID"]) + os.system("scontrol requeue " + os.environ["SLURM_JOB_ID"]) + sys.exit(-1) + + +def term_handler(signum, frame): + print("bypassing sigterm", flush=True) + + +def fetch_optimizer(args, model): + """Create the optimizer and learning rate scheduler""" + optimizer = optim.AdamW( + model.parameters(), lr=args.lr, weight_decay=args.wdecay, eps=1e-8 + ) + scheduler = optim.lr_scheduler.OneCycleLR( + optimizer, + args.lr, + args.num_steps + 100, + pct_start=0.05, + cycle_momentum=False, + anneal_strategy="linear", + ) + + return optimizer, scheduler + + +def forward_batch(batch, model, args, loss_fn=None, writer=None, step=0): + rgbs = batch.video + trajs_g = batch.trajectory + vis_g = batch.visibility + valids = batch.valid + B, T, C, H, W = rgbs.shape + assert C == 3 + B, T, N, D = trajs_g.shape + device = rgbs.device + + __, first_positive_inds = torch.max(vis_g, dim=1) + # We want to make sure that during training the model sees visible points + # that it does not need to track just yet: they are visible but queried from a later frame + N_rand = N // 4 + # inds of visible points in the 1st frame + nonzero_inds = [torch.nonzero(vis_g[0, :, i]) for i in range(N)] + rand_vis_inds = torch.cat( + [ + nonzero_row[torch.randint(len(nonzero_row), size=(1,))] + for nonzero_row in nonzero_inds + ], + dim=1, + ) + first_positive_inds = torch.cat( + [rand_vis_inds[:, :N_rand], first_positive_inds[:, N_rand:]], dim=1 + ) + ind_array_ = torch.arange(T, device=device) + ind_array_ = ind_array_[None, :, None].repeat(B, 1, N) + assert torch.allclose( + vis_g[ind_array_ == first_positive_inds[:, None, :]], + torch.ones_like(vis_g), + ) + assert torch.allclose( + vis_g[ind_array_ == rand_vis_inds[:, None, :]], torch.ones_like(vis_g) + ) + + gather = torch.gather( + trajs_g, 1, first_positive_inds[:, :, None, None].repeat(1, 1, N, 2) + ) + xys = torch.diagonal(gather, dim1=1, dim2=2).permute(0, 2, 1) + + queries = torch.cat([first_positive_inds[:, :, None], xys], dim=2) + + predictions, __, visibility, train_data = model( + rgbs=rgbs, queries=queries, iters=args.train_iters, is_train=True + ) + vis_predictions, coord_predictions, wind_inds, sort_inds = train_data + + trajs_g = trajs_g[:, :, sort_inds] + vis_g = vis_g[:, :, sort_inds] + valids = valids[:, :, sort_inds] + + vis_gts = [] + traj_gts = [] + valids_gts = [] + + for i, wind_idx in enumerate(wind_inds): + ind = i * (args.sliding_window_len // 2) + + vis_gts.append(vis_g[:, ind : ind + args.sliding_window_len, :wind_idx]) + traj_gts.append(trajs_g[:, ind : ind + args.sliding_window_len, :wind_idx]) + valids_gts.append(valids[:, ind : ind + args.sliding_window_len, :wind_idx]) + + seq_loss = sequence_loss(coord_predictions, traj_gts, vis_gts, valids_gts, 0.8) + vis_loss = balanced_ce_loss(vis_predictions, vis_gts, valids_gts) + + output = {"flow": {"predictions": predictions[0].detach()}} + output["flow"]["loss"] = seq_loss.mean() + output["visibility"] = { + "loss": vis_loss.mean() * 10.0, + "predictions": visibility[0].detach(), + } + return output + + +def run_test_eval(evaluator, model, dataloaders, writer, step): + model.eval() + for ds_name, dataloader in dataloaders: + predictor = EvaluationPredictor( + model.module.module, + grid_size=6, + local_grid_size=0, + single_point=False, + n_iters=6, + ) + + metrics = evaluator.evaluate_sequence( + model=predictor, + test_dataloader=dataloader, + dataset_name=ds_name, + train_mode=True, + writer=writer, + step=step, + ) + + if ds_name == "badja" or ds_name == "fastcapture" or ("kubric" in ds_name): + + metrics = { + **{ + f"{ds_name}_avg": np.mean( + [v for k, v in metrics.items() if "accuracy" not in k] + ) + }, + **{ + f"{ds_name}_avg_accuracy": np.mean( + [v for k, v in metrics.items() if "accuracy" in k] + ) + }, + } + print("avg", np.mean([v for v in metrics.values()])) + + if "tapvid" in ds_name: + metrics = { + f"{ds_name}_avg_OA": metrics["avg"]["occlusion_accuracy"] * 100, + f"{ds_name}_avg_delta": metrics["avg"]["average_pts_within_thresh"] + * 100, + f"{ds_name}_avg_Jaccard": metrics["avg"]["average_jaccard"] * 100, + } + + writer.add_scalars(f"Eval", metrics, step) + + +class Logger: + + SUM_FREQ = 100 + + def __init__(self, model, scheduler): + self.model = model + self.scheduler = scheduler + self.total_steps = 0 + self.running_loss = {} + self.writer = SummaryWriter(log_dir=os.path.join(args.ckpt_path, "runs")) + + def _print_training_status(self): + metrics_data = [ + self.running_loss[k] / Logger.SUM_FREQ + for k in sorted(self.running_loss.keys()) + ] + training_str = "[{:6d}] ".format(self.total_steps + 1) + metrics_str = ("{:10.4f}, " * len(metrics_data)).format(*metrics_data) + + # print the training status + logging.info( + f"Training Metrics ({self.total_steps}): {training_str + metrics_str}" + ) + + if self.writer is None: + self.writer = SummaryWriter(log_dir=os.path.join(args.ckpt_path, "runs")) + + for k in self.running_loss: + self.writer.add_scalar( + k, self.running_loss[k] / Logger.SUM_FREQ, self.total_steps + ) + self.running_loss[k] = 0.0 + + def push(self, metrics, task): + self.total_steps += 1 + + for key in metrics: + task_key = str(key) + "_" + task + if task_key not in self.running_loss: + self.running_loss[task_key] = 0.0 + + self.running_loss[task_key] += metrics[key] + + if self.total_steps % Logger.SUM_FREQ == Logger.SUM_FREQ - 1: + self._print_training_status() + self.running_loss = {} + + def write_dict(self, results): + if self.writer is None: + self.writer = SummaryWriter(log_dir=os.path.join(args.ckpt_path, "runs")) + + for key in results: + self.writer.add_scalar(key, results[key], self.total_steps) + + def close(self): + self.writer.close() + + +class Lite(LightningLite): + def run(self, args): + def seed_everything(seed: int): + random.seed(seed) + os.environ["PYTHONHASHSEED"] = str(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + seed_everything(0) + + def seed_worker(worker_id): + worker_seed = torch.initial_seed() % 2 ** 32 + np.random.seed(worker_seed) + random.seed(worker_seed) + + g = torch.Generator() + g.manual_seed(0) + + eval_dataloaders = [] + if "badja" in args.eval_datasets: + eval_dataset = BadjaDataset( + data_root=os.path.join(args.dataset_root, "BADJA"), + max_seq_len=args.eval_max_seq_len, + dataset_resolution=args.crop_size, + ) + eval_dataloader_badja = torch.utils.data.DataLoader( + eval_dataset, + batch_size=1, + shuffle=False, + num_workers=8, + collate_fn=collate_fn, + ) + eval_dataloaders.append(("badja", eval_dataloader_badja)) + + if "fastcapture" in args.eval_datasets: + eval_dataset = FastCaptureDataset( + data_root=os.path.join(args.dataset_root, "fastcapture"), + max_seq_len=min(100, args.eval_max_seq_len), + max_num_points=40, + dataset_resolution=args.crop_size, + ) + eval_dataloader_fastcapture = torch.utils.data.DataLoader( + eval_dataset, + batch_size=1, + shuffle=False, + num_workers=1, + collate_fn=collate_fn, + ) + eval_dataloaders.append(("fastcapture", eval_dataloader_fastcapture)) + + if "tapvid_davis_first" in args.eval_datasets: + data_root = os.path.join( + args.dataset_root, "/tapvid_davis/tapvid_davis.pkl" + ) + eval_dataset = TapVidDataset(dataset_type="davis", data_root=data_root) + eval_dataloader_tapvid_davis = torch.utils.data.DataLoader( + eval_dataset, + batch_size=1, + shuffle=False, + num_workers=1, + collate_fn=collate_fn, + ) + eval_dataloaders.append(("tapvid_davis", eval_dataloader_tapvid_davis)) + + evaluator = Evaluator(args.ckpt_path) + + visualizer = Visualizer( + save_dir=args.ckpt_path, + pad_value=80, + fps=1, + show_first_frame=0, + tracks_leave_trace=0, + ) + + loss_fn = None + + if args.model_name == "cotracker": + + model = CoTracker( + stride=args.model_stride, + S=args.sliding_window_len, + add_space_attn=not args.remove_space_attn, + num_heads=args.updateformer_num_heads, + hidden_size=args.updateformer_hidden_size, + space_depth=args.updateformer_space_depth, + time_depth=args.updateformer_time_depth, + ) + else: + raise ValueError(f"Model {args.model_name} doesn't exist") + + with open(args.ckpt_path + "/meta.json", "w") as file: + json.dump(vars(args), file, sort_keys=True, indent=4) + + model.cuda() + + train_dataset = kubric_movif_dataset.KubricMovifDataset( + data_root=os.path.join(args.dataset_root, "kubric_movi_f"), + crop_size=args.crop_size, + seq_len=args.sequence_len, + traj_per_sample=args.traj_per_sample, + sample_vis_1st_frame=args.sample_vis_1st_frame, + use_augs=not args.dont_use_augs, + ) + + train_loader = DataLoader( + train_dataset, + batch_size=args.batch_size, + shuffle=True, + num_workers=args.num_workers, + worker_init_fn=seed_worker, + generator=g, + pin_memory=True, + collate_fn=collate_fn_train, + drop_last=True, + ) + + train_loader = self.setup_dataloaders(train_loader, move_to_device=False) + print("LEN TRAIN LOADER", len(train_loader)) + optimizer, scheduler = fetch_optimizer(args, model) + + total_steps = 0 + logger = Logger(model, scheduler) + + folder_ckpts = [ + f + for f in os.listdir(args.ckpt_path) + if not os.path.isdir(f) and f.endswith(".pth") and not "final" in f + ] + if len(folder_ckpts) > 0: + ckpt_path = sorted(folder_ckpts)[-1] + ckpt = self.load(os.path.join(args.ckpt_path, ckpt_path)) + logging.info(f"Loading checkpoint {ckpt_path}") + if "model" in ckpt: + model.load_state_dict(ckpt["model"]) + else: + model.load_state_dict(ckpt) + if "optimizer" in ckpt: + logging.info("Load optimizer") + optimizer.load_state_dict(ckpt["optimizer"]) + if "scheduler" in ckpt: + logging.info("Load scheduler") + scheduler.load_state_dict(ckpt["scheduler"]) + if "total_steps" in ckpt: + total_steps = ckpt["total_steps"] + logging.info(f"Load total_steps {total_steps}") + + elif args.restore_ckpt is not None: + assert args.restore_ckpt.endswith(".pth") or args.restore_ckpt.endswith( + ".pt" + ) + logging.info("Loading checkpoint...") + + strict = True + state_dict = self.load(args.restore_ckpt) + if "model" in state_dict: + state_dict = state_dict["model"] + + if list(state_dict.keys())[0].startswith("module."): + state_dict = { + k.replace("module.", ""): v for k, v in state_dict.items() + } + model.load_state_dict(state_dict, strict=strict) + + logging.info(f"Done loading checkpoint") + model, optimizer = self.setup(model, optimizer, move_to_device=False) + # model.cuda() + model.train() + + save_freq = args.save_freq + scaler = GradScaler(enabled=args.mixed_precision) + + should_keep_training = True + global_batch_num = 0 + epoch = -1 + + while should_keep_training: + epoch += 1 + for i_batch, batch in enumerate(tqdm(train_loader)): + batch, gotit = batch + if not all(gotit): + print("batch is None") + continue + dataclass_to_cuda_(batch) + + optimizer.zero_grad() + + assert model.training + + output = forward_batch( + batch, + model, + args, + loss_fn=loss_fn, + writer=logger.writer, + step=total_steps, + ) + + loss = 0 + for k, v in output.items(): + if "loss" in v: + loss += v["loss"] + logger.writer.add_scalar( + f"live_{k}_loss", v["loss"].item(), total_steps + ) + if "metrics" in v: + logger.push(v["metrics"], k) + + if self.global_rank == 0: + if total_steps % save_freq == save_freq - 1: + if args.model_name == "motion_diffuser": + pred_coords = model.module.module.forward_batch_test( + batch, interp_shape=args.crop_size + ) + + output["flow"] = {"predictions": pred_coords[0].detach()} + visualizer.visualize( + video=batch.video.clone(), + tracks=batch.trajectory.clone(), + filename="train_gt_traj", + writer=logger.writer, + step=total_steps, + ) + + visualizer.visualize( + video=batch.video.clone(), + tracks=output["flow"]["predictions"][None], + filename="train_pred_traj", + writer=logger.writer, + step=total_steps, + ) + + if len(output) > 1: + logger.writer.add_scalar( + f"live_total_loss", loss.item(), total_steps + ) + logger.writer.add_scalar( + f"learning_rate", optimizer.param_groups[0]["lr"], total_steps + ) + global_batch_num += 1 + + self.barrier() + + self.backward(scaler.scale(loss)) + + scaler.unscale_(optimizer) + torch.nn.utils.clip_grad_norm_(model.parameters(), 10.0) + + scaler.step(optimizer) + scheduler.step() + scaler.update() + total_steps += 1 + if self.global_rank == 0: + if (i_batch >= len(train_loader) - 1) or ( + total_steps == 1 and args.validate_at_start + ): + if (epoch + 1) % args.save_every_n_epoch == 0: + ckpt_iter = "0" * (6 - len(str(total_steps))) + str( + total_steps + ) + save_path = Path( + f"{args.ckpt_path}/model_{args.model_name}_{ckpt_iter}.pth" + ) + + save_dict = { + "model": model.module.module.state_dict(), + "optimizer": optimizer.state_dict(), + "scheduler": scheduler.state_dict(), + "total_steps": total_steps, + } + + logging.info(f"Saving file {save_path}") + self.save(save_dict, save_path) + + if (epoch + 1) % args.evaluate_every_n_epoch == 0 or ( + args.validate_at_start and epoch == 0 + ): + run_test_eval( + evaluator, + model, + eval_dataloaders, + logger.writer, + total_steps, + ) + model.train() + torch.cuda.empty_cache() + + self.barrier() + if total_steps > args.num_steps: + should_keep_training = False + break + + print("FINISHED TRAINING") + + PATH = f"{args.ckpt_path}/{args.model_name}_final.pth" + torch.save(model.module.module.state_dict(), PATH) + run_test_eval(evaluator, model, eval_dataloaders, logger.writer, total_steps) + logger.close() + + +if __name__ == "__main__": + signal.signal(signal.SIGUSR1, sig_handler) + signal.signal(signal.SIGTERM, term_handler) + parser = argparse.ArgumentParser() + parser.add_argument("--model_name", default="cotracker", help="model name") + parser.add_argument("--restore_ckpt", help="restore checkpoint") + parser.add_argument("--ckpt_path", help="restore checkpoint") + parser.add_argument( + "--batch_size", type=int, default=4, help="batch size used during training." + ) + parser.add_argument( + "--num_workers", type=int, default=6, help="left right consistency loss" + ) + + parser.add_argument( + "--mixed_precision", action="store_true", help="use mixed precision" + ) + parser.add_argument("--lr", type=float, default=0.0005, help="max learning rate.") + parser.add_argument( + "--wdecay", type=float, default=0.00001, help="Weight decay in optimizer." + ) + parser.add_argument( + "--num_steps", type=int, default=200000, help="length of training schedule." + ) + parser.add_argument( + "--evaluate_every_n_epoch", + type=int, + default=1, + help="number of flow-field updates during validation forward pass", + ) + parser.add_argument( + "--save_every_n_epoch", + type=int, + default=1, + help="number of flow-field updates during validation forward pass", + ) + parser.add_argument( + "--validate_at_start", action="store_true", help="use mixed precision" + ) + parser.add_argument("--save_freq", type=int, default=100, help="save_freq") + parser.add_argument("--traj_per_sample", type=int, default=768, help="save_freq") + parser.add_argument("--dataset_root", type=str, help="path lo all the datasets") + + parser.add_argument( + "--train_iters", + type=int, + default=4, + help="number of updates to the disparity field in each forward pass.", + ) + parser.add_argument( + "--sequence_len", type=int, default=8, help="train sequence length" + ) + parser.add_argument( + "--eval_datasets", + nargs="+", + default=["things", "badja", "fastcapture"], + help="eval datasets.", + ) + + parser.add_argument( + "--remove_space_attn", action="store_true", help="use mixed precision" + ) + parser.add_argument( + "--dont_use_augs", action="store_true", help="use mixed precision" + ) + parser.add_argument( + "--sample_vis_1st_frame", action="store_true", help="use mixed precision" + ) + parser.add_argument( + "--sliding_window_len", type=int, default=8, help="use mixed precision" + ) + parser.add_argument( + "--updateformer_hidden_size", type=int, default=384, help="use mixed precision" + ) + parser.add_argument( + "--updateformer_num_heads", type=int, default=8, help="use mixed precision" + ) + parser.add_argument( + "--updateformer_space_depth", type=int, default=12, help="use mixed precision" + ) + parser.add_argument( + "--updateformer_time_depth", type=int, default=12, help="use mixed precision" + ) + parser.add_argument( + "--model_stride", type=int, default=8, help="use mixed precision" + ) + parser.add_argument( + "--crop_size", + type=int, + nargs="+", + default=[384, 512], + help="use mixed precision", + ) + parser.add_argument( + "--eval_max_seq_len", type=int, default=1000, help="use mixed precision" + ) + args = parser.parse_args() + + logging.basicConfig( + level=logging.INFO, + format="%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s", + ) + + Path(args.ckpt_path).mkdir(exist_ok=True, parents=True) + from pytorch_lightning.strategies import DDPStrategy + + Lite( + strategy=DDPStrategy(find_unused_parameters=True), + devices="auto", + accelerator="gpu", + precision=32, + num_nodes=4, + ).run(args)