From f595afe6c8bd1a11599d5ca0edfab7ef1c221199 Mon Sep 17 00:00:00 2001 From: allegroai <> Date: Mon, 10 Jun 2019 20:00:28 +0300 Subject: [PATCH] Initial beta version --- README.md | 218 + docs/contributing.md | 54 + docs/faq.md | 160 + examples/absl_example.py | 43 + examples/jupyter.ipynb | 160 + examples/keras_tensorboard.py | 113 + examples/manual_model_config.py | 29 + examples/manual_reporting.py | 51 + examples/matplotlib_example.py | 36 + examples/pytorch_matplotlib.py | 479 + examples/pytorch_mnist.py | 124 + examples/pytorch_tensorboard.py | 126 + examples/pytorch_tensorboardX.py | 126 + examples/samples/dancing.jpg | Bin 0 -> 40484 bytes examples/samples/picasso.jpg | Bin 0 -> 114573 bytes examples/tensorboard_pr_curve.py | 237 + examples/tensorboard_toy.py | 76 + examples/tensorflow_eager.py | 358 + examples/tensorflow_mnist.py | 171 + examples/trains.conf | 131 + requirements.txt | 31 + setup.cfg | 4 + setup.py | 77 + trains/__init__.py | 7 + trains/backend_api/__init__.py | 3 + trains/backend_api/config/__init__.py | 16 + trains/backend_api/config/default/api.conf | 41 + .../backend_api/config/default/logging.conf | 9 + trains/backend_api/schema/__init__.py | 0 trains/backend_api/schema/action.py | 38 + trains/backend_api/schema/service.py | 201 + trains/backend_api/services/__init__.py | 22 + trains/backend_api/services/v2_1/__init__.py | 0 .../services/v2_1/async_request.py | 414 + trains/backend_api/services/v2_1/auth.py | 1112 +++ trains/backend_api/services/v2_1/debug.py | 194 + trains/backend_api/services/v2_1/events.py | 2846 ++++++ trains/backend_api/services/v2_1/models.py | 2675 ++++++ trains/backend_api/services/v2_1/news.py | 70 + trains/backend_api/services/v2_1/projects.py | 1847 ++++ trains/backend_api/services/v2_1/storage.py | 681 ++ trains/backend_api/services/v2_1/tasks.py | 8460 +++++++++++++++++ trains/backend_api/session/__init__.py | 7 + trains/backend_api/session/apimodel.py | 8 + trains/backend_api/session/callresult.py | 131 + trains/backend_api/session/datamodel.py | 145 + trains/backend_api/session/defs.py | 7 + trains/backend_api/session/errors.py | 17 + trains/backend_api/session/request.py | 76 + trains/backend_api/session/response.py | 49 + trains/backend_api/session/session.py | 425 + trains/backend_api/session/token_manager.py | 95 + trains/backend_api/utils.py | 86 + trains/backend_api/version.py | 1 + trains/backend_config/__init__.py | 4 + trains/backend_config/bucket_config.py | 291 + trains/backend_config/config.py | 412 + trains/backend_config/converters.py | 46 + trains/backend_config/defs.py | 53 + trains/backend_config/entry.py | 96 + trains/backend_config/environment.py | 25 + trains/backend_config/errors.py | 5 + trains/backend_config/log.py | 30 + trains/backend_config/reloader.py | 32 + trains/backend_config/utils.py | 9 + trains/backend_interface/__init__.py | 2 + trains/backend_interface/base.py | 147 + trains/backend_interface/metrics/__init__.py | 4 + trains/backend_interface/metrics/events.py | 258 + trains/backend_interface/metrics/interface.py | 192 + trains/backend_interface/metrics/reporter.py | 457 + trains/backend_interface/model.py | 408 + trains/backend_interface/session.py | 28 + trains/backend_interface/setupuploadmixin.py | 43 + trains/backend_interface/task/__init__.py | 1 + trains/backend_interface/task/access.py | 85 + trains/backend_interface/task/args.py | 314 + .../task/development/__init__.py | 0 .../task/development/stop_signal.py | 48 + .../task/development/worker.py | 26 + trains/backend_interface/task/log.py | 110 + .../backend_interface/task/repo/__init__.py | 2 + .../backend_interface/task/repo/detectors.py | 248 + trains/backend_interface/task/repo/freeze.py | 11 + .../backend_interface/task/repo/scriptinfo.py | 162 + trains/backend_interface/task/repo/util.py | 12 + trains/backend_interface/task/task.py | 811 ++ trains/backend_interface/util.py | 77 + trains/config/__init__.py | 64 + trains/config/cache.py | 40 + trains/config/default/__init__.py | 1 + trains/config/default/__main__.py | 132 + trains/config/default/logging.conf | 27 + trains/config/default/sdk.conf | 126 + trains/config/defs.py | 31 + trains/config/remote.py | 17 + trains/debugging/__init__.py | 4 + trains/debugging/log.py | 181 + trains/debugging/timer.py | 112 + trains/errors.py | 3 + trains/logger.py | 684 ++ trains/model.py | 1006 ++ trains/storage/__init__.py | 2 + trains/storage/helper.py | 1361 +++ trains/storage/util.py | 18 + trains/task.py | 1192 +++ trains/task_parameters.py | 163 + trains/utilities/__init__.py | 1 + trains/utilities/absl_bind.py | 88 + trains/utilities/args.py | 176 + trains/utilities/async_manager.py | 53 + trains/utilities/config.py | 46 + trains/utilities/deferred.py | 121 + trains/utilities/dicts.py | 98 + trains/utilities/enum.py | 24 + trains/utilities/frameworks.py | 1611 ++++ trains/utilities/matplotlib_bind.py | 188 + trains/utilities/plotly.py | 353 + trains/utilities/py3_interop.py | 39 + trains/utilities/seed.py | 76 + trains/version.py | 1 + 121 files changed, 34975 insertions(+) create mode 100644 README.md create mode 100644 docs/contributing.md create mode 100644 docs/faq.md create mode 100644 examples/absl_example.py create mode 100644 examples/jupyter.ipynb create mode 100644 examples/keras_tensorboard.py create mode 100644 examples/manual_model_config.py create mode 100644 examples/manual_reporting.py create mode 100644 examples/matplotlib_example.py create mode 100644 examples/pytorch_matplotlib.py create mode 100644 examples/pytorch_mnist.py create mode 100644 examples/pytorch_tensorboard.py create mode 100644 examples/pytorch_tensorboardX.py create mode 100644 examples/samples/dancing.jpg create mode 100644 examples/samples/picasso.jpg create mode 100644 examples/tensorboard_pr_curve.py create mode 100644 examples/tensorboard_toy.py create mode 100644 examples/tensorflow_eager.py create mode 100644 examples/tensorflow_mnist.py create mode 100644 examples/trains.conf create mode 100644 requirements.txt create mode 100644 setup.cfg create mode 100644 setup.py create mode 100644 trains/__init__.py create mode 100644 trains/backend_api/__init__.py create mode 100644 trains/backend_api/config/__init__.py create mode 100644 trains/backend_api/config/default/api.conf create mode 100644 trains/backend_api/config/default/logging.conf create mode 100644 trains/backend_api/schema/__init__.py create mode 100644 trains/backend_api/schema/action.py create mode 100644 trains/backend_api/schema/service.py create mode 100644 trains/backend_api/services/__init__.py create mode 100644 trains/backend_api/services/v2_1/__init__.py create mode 100644 trains/backend_api/services/v2_1/async_request.py create mode 100644 trains/backend_api/services/v2_1/auth.py create mode 100644 trains/backend_api/services/v2_1/debug.py create mode 100644 trains/backend_api/services/v2_1/events.py create mode 100644 trains/backend_api/services/v2_1/models.py create mode 100644 trains/backend_api/services/v2_1/news.py create mode 100644 trains/backend_api/services/v2_1/projects.py create mode 100644 trains/backend_api/services/v2_1/storage.py create mode 100644 trains/backend_api/services/v2_1/tasks.py create mode 100644 trains/backend_api/session/__init__.py create mode 100644 trains/backend_api/session/apimodel.py create mode 100644 trains/backend_api/session/callresult.py create mode 100644 trains/backend_api/session/datamodel.py create mode 100644 trains/backend_api/session/defs.py create mode 100644 trains/backend_api/session/errors.py create mode 100644 trains/backend_api/session/request.py create mode 100644 trains/backend_api/session/response.py create mode 100644 trains/backend_api/session/session.py create mode 100644 trains/backend_api/session/token_manager.py create mode 100644 trains/backend_api/utils.py create mode 100644 trains/backend_api/version.py create mode 100644 trains/backend_config/__init__.py create mode 100644 trains/backend_config/bucket_config.py create mode 100644 trains/backend_config/config.py create mode 100644 trains/backend_config/converters.py create mode 100644 trains/backend_config/defs.py create mode 100644 trains/backend_config/entry.py create mode 100644 trains/backend_config/environment.py create mode 100644 trains/backend_config/errors.py create mode 100644 trains/backend_config/log.py create mode 100644 trains/backend_config/reloader.py create mode 100644 trains/backend_config/utils.py create mode 100644 trains/backend_interface/__init__.py create mode 100644 trains/backend_interface/base.py create mode 100644 trains/backend_interface/metrics/__init__.py create mode 100644 trains/backend_interface/metrics/events.py create mode 100644 trains/backend_interface/metrics/interface.py create mode 100644 trains/backend_interface/metrics/reporter.py create mode 100644 trains/backend_interface/model.py create mode 100644 trains/backend_interface/session.py create mode 100644 trains/backend_interface/setupuploadmixin.py create mode 100644 trains/backend_interface/task/__init__.py create mode 100644 trains/backend_interface/task/access.py create mode 100644 trains/backend_interface/task/args.py create mode 100644 trains/backend_interface/task/development/__init__.py create mode 100644 trains/backend_interface/task/development/stop_signal.py create mode 100644 trains/backend_interface/task/development/worker.py create mode 100644 trains/backend_interface/task/log.py create mode 100644 trains/backend_interface/task/repo/__init__.py create mode 100644 trains/backend_interface/task/repo/detectors.py create mode 100644 trains/backend_interface/task/repo/freeze.py create mode 100644 trains/backend_interface/task/repo/scriptinfo.py create mode 100644 trains/backend_interface/task/repo/util.py create mode 100644 trains/backend_interface/task/task.py create mode 100644 trains/backend_interface/util.py create mode 100644 trains/config/__init__.py create mode 100644 trains/config/cache.py create mode 100644 trains/config/default/__init__.py create mode 100644 trains/config/default/__main__.py create mode 100644 trains/config/default/logging.conf create mode 100644 trains/config/default/sdk.conf create mode 100644 trains/config/defs.py create mode 100644 trains/config/remote.py create mode 100644 trains/debugging/__init__.py create mode 100644 trains/debugging/log.py create mode 100644 trains/debugging/timer.py create mode 100644 trains/errors.py create mode 100644 trains/logger.py create mode 100644 trains/model.py create mode 100644 trains/storage/__init__.py create mode 100644 trains/storage/helper.py create mode 100644 trains/storage/util.py create mode 100644 trains/task.py create mode 100644 trains/task_parameters.py create mode 100644 trains/utilities/__init__.py create mode 100644 trains/utilities/absl_bind.py create mode 100644 trains/utilities/args.py create mode 100644 trains/utilities/async_manager.py create mode 100644 trains/utilities/config.py create mode 100644 trains/utilities/deferred.py create mode 100644 trains/utilities/dicts.py create mode 100644 trains/utilities/enum.py create mode 100644 trains/utilities/frameworks.py create mode 100644 trains/utilities/matplotlib_bind.py create mode 100644 trains/utilities/plotly.py create mode 100644 trains/utilities/py3_interop.py create mode 100644 trains/utilities/seed.py create mode 100644 trains/version.py diff --git a/README.md b/README.md new file mode 100644 index 00000000..d4ca762b --- /dev/null +++ b/README.md @@ -0,0 +1,218 @@ +# TRAINS - Magic Version Control & Experiment Manager for AI + +

"Because it’s a jungle out there"

+ +Behind every great scientist are great repeatable methods. Sadly, this is easier said than done. + +When talented scientists, engineers, or developers work on their own, a mess may be unavoidable. Yet, it may still be +manageable. However, with time and more people joining your project, +managing the clutter takes its toll on productivity. +As your project moves toward production, +visibility and provenance for scaling your deep-learning efforts are a must, but both +suffer as your team grows. + +For teams or entire companies, TRAINS logs everything in one central server and takes on the responsibilities for visibility and provenance +so productivity does not suffer. +TRAINS records and manages various deep learning research workloads and does so with unbelievably small integration costs. + +TRAINS is an auto-magical experiment manager that you can use productively with minimal integration and while +preserving your existing methods and practices. Use it on a daily basis to boost collaboration and visibility, +or use it to automatically collect your experimentation logs, outputs, and data to one centralized server for provenance. + +![nice image here? Maybe collection of all the projects or main screen, this can also be an inspirational image insinuating clutter](img/woman-3441018_1920.jpg) + +## Why Should I Use TRAINS? + +TRAINS is our solution to a problem we share with countless other researchers and developers in the +machine learning/deep learning universe. +Training production-grade deep learning models is a glorious but messy process. +We built TRAINS to solve that problem. TRAINS tracks and controls the process by associating code version control, research projects, performance metrics, and model provenance. +TRAINS removes the mess but leaves the glory. + + +Choose TRAINS because... + +* Sharing experiments with the team is difficult and gets even more difficult further up the chain. +* Like all of us, you lost a model and are left with no repeatable process. +* You setup up a central location for TensorBoard and it exploded with a gazillion experiments. +* You accidentally threw away important results while trying to manually clean up the clutter. +* You do not associate the train code commit with the model or TensorBoard logs. +* You are storing model parameters in the checkpoint filename. +* You cannot find any other tool for comparing results, hyper-parameters and code commits. +* TRAINS requires **only two-lines of code** for full integration. +* TRAINS is **free**. + +## Main Features + +* Seamless integration with leading frameworks, including: PyTorch, TensorFlow, Keras, and others coming soon! +* Track everything with two lines of code. +* Model logging that automatically associates models with code and the parameters used to train them, including initial weights logging. +* Multi-user process tracking and collaboration. +* Management capabilities including project management, filter-by-metric, and detailed experiment comparison. +* Centralized server for aggregating logs, records, and general bookkeeping. +* Automatically create a copy of models on centralized storage (TRAINS supports shared folders, S3, GS, and Azure is coming soon!). +* Support for Jupyter notebook (see the [trains-jupyter-plugin]()) and PyCharm remote debugging (see the [trains-pycharm-plugin]()). +* A field-tested, feature-rich SDK for your on-the-fly customization needs. + + +## TRAINS Magically Logs + +TRAINS magically logs the following: + +* Git repository, branch and commit id +* Hyper-parameters, including: + * ArgParser for command line parameters with currently used values + * Tensorflow Defines (absl-py) + * Manually passed parameter dictionary +* Initial model weights file +* Model snapshots +* stdout and stderr +* TensorBoard scalars, metrics, histograms, images, and audio coming soon (also tensorboardX) +* Matplotlib + +## See for Yourself + +We have a demo server up and running [https://demoapp.trainsai.io](https://demoapp.trainsai.io) (it resets every 24 hours and all of the data is deleted). +You can test your code with it: + +1. Install TRAINS + + pip install trains + +1. Add the following to your code: + + from trains import Task + Task = Task.init(project_name=”my_projcet”, task_name=”my_task”) + +1. Run your code. When TRAINS connects to the server, a link prints. + +1. In the Web-App, view your parameters, model and tensorboard metrics. + + ![GIF screen-shot here. If the Gif looks bad, a few png screen grabs: + Home Page + Projects Page + Experiment Page with experiment open tab execution + Experiment Page with experiment open tab model + Experiment Page with experiment open tab results + Results Page + Comparison Page + Parameters + Graphs + Images + Experiment Models Page] + +## How TRAINS Works + +TRAINS is composed of the following: + +* the [trains-server]() +* the [Web-App]() (web user interface) +* the Python SDK (auto-magically connects your code, see [Using TRAINS (Example)](#using-trains-example)). + +The following diagram illustrates the interaction of the TRAINS-server and a GPU machine: + +
+    TRAINS-server
+    
+    +--------------------------------------------------------------------+
+    |                                                                    |
+    |   Server Docker                   Elastic Docker     Mongo Docker  |
+    |  +-------------------------+     +---------------+  +------------+ |
+    |  |     Pythonic Server     |     |               |  |            | |
+    |  |   +-----------------+   |     | ElasticSearch |  |  MongoDB   | |
+    |  |   |   WEB server    |   |     |               |  |            | |
+    |  |   |   Port 8080     |   |     |               |  |            | |
+    |  |   +--------+--------+   |     |               |  |            | |
+    |  |            |            |     |               |  |            | |
+    |  |   +--------+--------+   |     |               |  |            | |
+    |  |   |   API server    +----------------------------+            | |
+    |  |   |   Port 8008     +---------+               |  |            | |
+    |  |   +-----------------+   |     +-------+-------+  +-----+------+ |
+    |  |                         |             |                |        |
+    |  |   +-----------------+   |         +---+----------------+------+ |
+    |  |   |   File Server   +-------+     |    Host Storage           | |
+    |  |   |   Port 8081     |   |   +-----+                           | |
+    |  |   +-----------------+   |         +---------------------------+ |
+    |  +------------+------------+                                       |
+    +---------------|----------------------------------------------------+
+                    |HTTP
+                    +--------+
+    GPU Machine              |
+    +------------------------|-------------------------------------------+
+    |     +------------------|--------------+                            |
+    |     |  Training        |              |    +---------------------+ |
+    |     |  Code        +---+------------+ |    | TRAINS configuration| |
+    |     |              | TRAINS - SDK   | |    | ~/trains.conf       | |
+    |     |              |                +------+                     | |
+    |     |              +----------------+ |    +---------------------+ |
+    |     +---------------------------------+                            |
+    +--------------------------------------------------------------------+
+
+ +## Installing and Configuring TRAINS + +1. Install the trains-server docker (see [Installing the TRAINS Server](../trains_server)). + +1. Install the TRAINS package: + + pip install trains + +1. Run the initial configuration wizard to setup the trains-server (ip:port and user credentials): + + trains-init + +After installing and configuring, your configuration is `~/trains.conf`. View a sample configuration file [here]([link to git]). + +## Using TRAINS (Example) + +Add these two lines of code to your script: + + from trains import Task + task = Task.init(project_name, task_name) + +* If no project name is provided, then the repository name is used. +* If no task (experiment) name is provided, then the main filename is used as experiment name + +Executing your script prints a direct link to the currently running experiment page, for exampe: + +```bash +TRAINS Metrics page: + +https://demoapp.trainsai.io/projects/76e5e2d45e914f52880621fe64601e85/experiments/241f06ae0f5c4b27b8ce8b64890ce152/output/log +``` + +*[Add GIF screenshots here]* + +For more examples and use cases, see [examples](link docs/examples/). + +## Who Supports TRAINS? + +The people behind *allegro.ai*. +We build deep learning pipelines and infrastructure for enterprise companies. +We built TRAINS to track and control the glorious +but messy process of training production-grade deep learning models. +We are committed to vigorously supporting and expanding the capabilities of TRAINS, +because it is not only our beloved creation, we also use it daily. + +## Why Are We Releasing TRAINS? + +We believe TRAINS is ground-breaking. We wish to establish new standards of experiment management in +machine- and deep-learning. +Only the greater community can help us do that. + +We promise to always be backwardly compatible. If you start working with TRAINS today, even though this code is still in the beta stage, your logs and data will always upgrade with you. + +## License + +Apache License, Version 2.0 (see the [LICENSE](https://www.apache.org/licenses/LICENSE-2.0.html) for more information) + +## Guidelines for Contributing + +See the TRAINS [Guidelines for Contributing](contributing.md). + +## FAQ + +See the TRAINS [FAQ](faq.md). + +

May the force (and the goddess of learning rates) be with you!

+ diff --git a/docs/contributing.md b/docs/contributing.md new file mode 100644 index 00000000..7f17422c --- /dev/null +++ b/docs/contributing.md @@ -0,0 +1,54 @@ +# Guidelines for Contributing + +Firstly, we thank you for taking the time to contribute! + +The following is a set of guidelines for contributing to TRAINS. +These are primarily guidelines, not rules. +Use your best judgment and feel free to propose changes to this document in a pull request. + +## Reporting Bugs + +This section guides you through submitting a bug report for TRAINS. +By following these guidelines, you +help maintainers and the community understand your report, reproduce the behavior, and find related reports. + +Before creating bug reports, please check whether the bug you want to report already appears [here](link to issues). +You may discover that you do not need to create a bug report. +When you are creating a bug report, please include as much detail as possible. + +**Note**: If you find a **Closed** issue that may be the same issue which you are currently experiencing, +then open a **New** issue and include a link to the original (Closed) issue in the body of your new one. + +Explain the problem and include additional details to help maintainers reproduce the problem: + +* **Use a clear and descriptive title** for the issue to identify the problem. +* **Describe the exact steps necessary to reproduce the problem** in as much detail as possible. Please do not just summarize what you did. Make sure to explain how you did it. +* **Provide the specific environment setup.** Include the `pip freeze` output, specific environment variables, Python version, and other relevant information. +* **Provide specific examples to demonstrate the steps.** Include links to files or GitHub projects, or copy/paste snippets which you use in those examples. +* **If you are reporting any TRAINS crash,** include a crash report with a stack trace from the operating system. Make sure to add the crash report in the issue and place it in a [code block](https://help.github.com/en/articles/getting-started-with-writing-and-formatting-on-github#multiple-lines), +a [file attachment](https://help.github.com/articles/file-attachments-on-issues-and-pull-requests/), or just put it in a [gist](https://gist.github.com/) (and provide link to that gist). +* **Describe the behavior you observed after following the steps** and the exact problem with that behavior. +* **Explain which behavior you expected to see and why.** +* **For Web-App issues, please include screenshots and animated GIFs** which recreate the described steps and clearly demonstrate the problem. You can use [LICEcap](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [silentcast](https://github.com/colinkeenan/silentcast) or [byzanz](https://github.com/threedaymonk/byzanz) on Linux. + +## Suggesting Enhancements + +This section guides you through submitting an enhancement suggestion for TRAINS, including +completely new features and minor improvements to existing functionality. +By following these guidelines, you help maintainers and the community understand your suggestion and find related suggestions. + +Enhancement suggestions are tracked as GitHub issues. After you determine which repository your enhancement suggestion is related to, create an issue on that repository and provide the following: + +* **A clear and descriptive title** for the issue to identify the suggestion. +* **A step-by-step description of the suggested enhancement** in as much detail as possible. +* **Specific examples to demonstrate the steps.** Include copy/pasteable snippets which you use in those examples as [Markdown code blocks](https://help.github.com/articles/markdown-basics/#multiple-lines). +* **Describe the current behavior and explain which behavior you expected to see instead and why.** +* **Include screenshots or animated GIFs** which help you demonstrate the steps or point out the part of TRAINS which the suggestion is related to. You can use [LICEcap](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [silentcast](https://github.com/colinkeenan/silentcast) or [byzanz](https://github.com/threedaymonk/byzanz) on Linux. + + + + + + + + diff --git a/docs/faq.md b/docs/faq.md new file mode 100644 index 00000000..5eb8c0fa --- /dev/null +++ b/docs/faq.md @@ -0,0 +1,160 @@ +# FAQ + +**Can I store more information on the models? For example, can I store enumeration of classes?** + +YES! + +Use the SDK `set_model_label_enumeration` method: + +```python +Task.current_task().set_model_label_enumeration( {‘label’: int(0), } ) +``` + +**Can I store the model configuration file as well?** + +YES! + +Use the SDK `set_model_design` method: + +```python +Task.current_task().set_model_design( ‘a very long text of the configuration file content’ ) +``` + +**I want to add more graphs, not just with Tensorboard. Is this supported?** + +YES! + +Use an SDK [Logger](link to git) object. An instance can be always be retrieved with `Task.current_task().get_logger()`: + +```python +logger = Task.current_task().get_logger() +logger.report_scalar("loss", "classification", iteration=42, value=1.337) +``` + +TRAINS supports scalars, plots, 2d/3d scatter diagrams, histograms, surface diagrams, confusion matrices, images, and text logging. + +An example can be found [here](docs/manual_log.py). + +**I noticed that all of my experiments appear as “Training”. Are there other options?** + +YES! + +When creating experiments and calling `Task.init`, you can pass an experiment type. +The currently supported types are `Task.TaskTypes.training` and `Task.TaskTypes.testing`: + +```python +task = Task.init(project_name, task_name, Task.TaskTypes.testing) +``` + +If you feel we should add a few more, let us know in the [issues]() section. + +**I noticed I keep getting a message “warning: uncommitted code”. What does it mean?** + +TRAINS not only detects your current repository and git commit, +but it also warns you if you are using uncommitted code. TRAINS does this +because uncommitted code means it will be difficult to reproduce this experiment. + +**Is there something you can do about uncommitted code running?** + +YES! + +TRAINS currently stores the git diff together with the project. +The Web-App will soon present the git diff as well. This is coming very soon! + +**I read that there is a feature for centralized model storage. How do I use it?** + +Pass the `output_uri` parameter to `Task.init`, for example: + +```python +Task.init(project_name, task_name, output_uri=’/mnt/shared/folder’) +``` + +All of the stored snapshots are copied into a subfolder whose name contains the task ID, for example: + +`/mnt/shared/folder/task_6ea4f0b56d994320a713aeaf13a86d9d/models/` + +Other options include: + +```python +Task.init(project_name, task_name, output_uri=’s3://bucket/folder’) +``` + +```python +Task.init(project_name, task_name, output_uri=’gs://bucket/folder’) +``` + +These require configuring the cloud storage credentials in `~/trains.conf` (see an [example](v)). + +**I am training multiple models at the same time, but I only see one of them. What happened?** + +This will be fixed in a future version. Currently, TRAINS does support multiple models +from the same task/experiment so you can find all the models in the project Models tab. +In the Task view, we only present the last one. + +**Can I log input and output models manually?** + +YES! + +See [InputModel]() and [OutputModel](). + +For example: + +```python +input_model = InputModel.import_model(link_to_initial_model_file) +Task.current_task().connect(input_model) +OutputModel(Task.current_task()).update_weights(link_to_new_model_file_here) +``` + +**I am using Jupyter Notebook. Is this supported?** + +YES! + +Jupyter Notebook is supported. + +**I do not use ArgParser for hyper-parameters. Do you have a solution?** + +YES! + +TRAINS supports using a Python dictionary for hyper-parameter logging. + +```python +parameters_dict = Task.current_task().connect(parameters_dict) +``` + +From this point onward, not only are the dictionary key/value pairs stored, but also any change to the dictionary is automatically stored. + +**Git is not well supported in Jupyter. We just gave up on properly committing our code. Do you have a solution?** + +YES! + +Check our [trains-jupyter-plugin](). It is a Jupyter plugin that allows you to commit your notebook directly from Jupyter. It also saves the Python version of the code and creates an updated `requirements.txt` so you know which packages you were using. + +**Can I use TRAINS with scikit-learn?** + +YES! + +scikit-learn is supported. Everything you do is logged, with the caveat that models are not logged automatically. + Models are not logged automatically because, in most cases, scikit-learn is simply pickling the object to files so there is no underlying frame to connect to. + +**I am working with PyCharm and remotely debugging a machine, but the git repo is not detected. Do you have a solution?** + +YES! + +This is such a common occurrence that we created a PyCharm plugin that allows for a remote debugger to grab your local repository / commit ID. See our [trains-pycharm-plugin]() repository for instructions and [latest release](). + +**How do I know a new version came out?** + +Unfortunately, TRAINS currently does not support auto-update checks. We hope to add this soon. + +**Sometimes I see experiments as running while they are not. What is it?** + +When the Python process exits in an orderly fashion, TRAINS closes the experiment. +If a process crashes, then sometimes the stop signal is missed. You can safely right click on the experiment in the Web-App and stop it. + +**In the experiment log tab, I’m missing the first log lines. Where are they?** + +Unfortunately, due to speed/optimization issues, we opted to display only the last several hundreds. The full log can be downloaded from the Web-App. + + + + diff --git a/examples/absl_example.py b/examples/absl_example.py new file mode 100644 index 00000000..e2269666 --- /dev/null +++ b/examples/absl_example.py @@ -0,0 +1,43 @@ +# TRAINS - example code, absl logging +# +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import sys + +from absl import app +from absl import flags +from absl import logging + +from trains import Task + + +FLAGS = flags.FLAGS + +flags.DEFINE_string('echo', None, 'Text to echo.') +flags.DEFINE_string('another_str', 'My string', 'A string', module_name='test') + +task = Task.init(project_name='examples', task_name='absl example') + +flags.DEFINE_integer('echo3', 3, 'Text to echo.') +flags.DEFINE_string('echo5', '5', 'Text to echo.', module_name='test') + + +parameters = { + 'list': [1, 2, 3], + 'dict': {'a': 1, 'b': 2}, + 'int': 3, + 'float': 2.2, + 'string': 'my string', +} +parameters = task.connect(parameters) + + +def main(_): + print('Running under Python {0[0]}.{0[1]}.{0[2]}'.format(sys.version_info), file=sys.stderr) + logging.info('echo is %s.', FLAGS.echo) + + +if __name__ == '__main__': + app.run(main) diff --git a/examples/jupyter.ipynb b/examples/jupyter.ipynb new file mode 100644 index 00000000..99325b89 --- /dev/null +++ b/examples/jupyter.ipynb @@ -0,0 +1,160 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "pycharm": { + "is_executing": false + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "TRAINS Task: created new task id=e8fc2b809a384c3f8ec3ded54a2aae44\n", + "TRAINS results page: http://ec2-3-218-72-191.compute-1.amazonaws.com:8080/projects/ec4476fb59c64d89af880ff0445c836b/experiments/e8fc2b809a384c3f8ec3ded54a2aae44/output/log\n" + ] + } + ], + "source": [ + "from trains import Task\n", + "task = Task.init(project_name='examples', task_name='Jupyter exmaple')" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "import matplotlib\n", + "%matplotlib inline" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXcAAAD8CAYAAACMwORRAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzsvXmQHOd5p/l8edZ9dvWNRuMGAfDAQUIED/GSREqUaNmSLHk0Pteyd2zPTuzsbMxEbGxMTMxsTOyud7zhcNgrT8iybOu+LJG6KPEmARAAcZ/E0UAf6Ku6667Kqsr89o8CwAbQ6LOqL+QTwQiiOyvz7e6qX375fu/7e4WUEhcXFxeXlYWy2AG4uLi4uNQfV9xdXFxcViCuuLu4uLisQFxxd3FxcVmBuOLu4uLisgJxxd3FxcVlBeKKu4uLi8sKxBV3FxcXlxWIK+4uLi4uKxBtsS7c1NQku7u7F+vyLi4uLsuSQ4cOjUopE9Mdt2ji3t3dzcGDBxfr8i4uLi7LEiHE5Zkc56ZlXFxcXFYgrri7uLi4rEBccXdxcXFZgbji7uLi4rICccXdxcXFZQWyaNUyLi71plAtkCyPM2olGbJGqdgVBODVvLR6WogZEeJmDEPRFztUF5eG44q7y7LGkQ5XS0OcSJ3hYv4yQoAEPMJAEbUH06o1wrncBRQUVKGyLbyZzcENRIzw4gbv4tJAXHF3WbakKxleH36HvuIApmIQNyI3BP1OVJ0qR1MnOTx+nAci97Izdr+7kndZkbji7rLskFJyNnueN0b2ogiFhBFHCDGj12qKRtyIYkubI6njXMz38NHWJ0mY8QZH7eKysLgbqi7LCiklh1PH+dXwGwQ0PxE9NGNhn4gqVBJmnLJT5of9P+FqcagB0bq4LB6uuLssK06mz7A3eYAmoz4bo0EtgKkYvHj1FyStsTpE6OKyNHDF3WXZkLTGeHN0H3E9iirUup3Xp3rRUHl56DWqTrVu53VxWUxccXdZFlSdKq8Mv4WpGGhK/beKgnqA8XKaw6njdT+3i8ti4Iq7y7LgQq6HEStJSA827BoxI8Kh8aNkK7mGXcPFZaFwxd1lySOl5EjqOAHV19DrXE/1nMteaOh1XFwWAlfcXZY8I1aSsXIKr+pp+LVCWoBj6VPY0m74tZYyUjrIai+ychZZOYWsXkTK4mKH5TILpk1eCiG+AjwPDEspt03yfQH8v8DHgQLwu1LK9+odqMvdy2BpCIGYU8njbDEUg0w1T6qcIW5GG369pYZ0csjKcbBeB5kCOeF3LlSksRthPIhQWxcvSJcZMZOV+1eBZ6f4/nPAhmv/fQn46/mH5eLyAQPFIUzFWLgLSsl4JbVw11siyOpFZPb/htI/g5SgtIPa9sF/Igrlvcjsf8Mp/QIpncUO2WUKphV3KeUbwFQFwC8AX5M19gERIURbvQJ0cRm2RvCo5oJdTxEKw6XRBbveUsCpvI/MfRkwaqKu+G8/SOigtILSAqVfIIs/Rkq54LG6zIx65Nw7gN4J/+679jUXl7pQcqy61rVPhyZUSnZpwa632EhnDApfAxEGJTD9C4QGSgeU30KW3218gC5zYkG9ZYQQX6KWuqGrq2shL+2ynFmExaFcjIsuEtLaD1QnX63fCaGC0gTWr5DGLsQC3nyXCpadoeLksWUFVejoih9TDS12WDeoh7j3A6sm/Lvz2tduQ0r5ZeDLALt27bp7Pj0u88JQdGzpTOv4WC9s6Sxsjn8RkdKC8jsg5mCcJrxg90P1Augb6x/cEsSRNulyDwP5dxkvX0AIhdrqQyClQ8xcT5vvIcLGapRFvuHVQ9x/BPypEOKbwG4gLaW8WofzurgAkDDjjFhJ9AZ0pk6GLW2aPYkFudaiU30fZBnmejMTHmR5L+IuEPd0+Qrn0j/AsrNoiolfa74m7jWkdMiUexmz3sdUI2wK/xohY9UUZ2ws0y6FhBDfAPYCm4QQfUKIPxBC/LEQ4o+vHfIT4CJwHvhb4F81LFqXu5J2bytFx1q4CwpBVL87BnlIewyYR4mp8IG98h01R0tnOD72NaSUBPRWPGr0JmEHEELBo0UJ6G1IaXN87GskS2cXKeIZrNyllF+Y5vsS+JO6RbTCcRyH9EiG8aE0lXIVRREEIn5ibRFM78JVhCwnWjzNtdK8BlMp2hQzJSq2jR41kKZckNr6xcVifnUVKrCyN5/T5cucSX0XjxpBU2bWSGeoARShcSb1Xe6N/fairOCX7bCObKHE+YEklapNZyJMW2xuvt4LgZSSkb4kx14/xYm3TlMtX+9+vCZYQiAdSdvaFh589gHW3Lcaw1z+04GkrFKqXKTqjGPLPAIPquLFo69DU2a+8dTqaSaoBSjZVt1LIotJi8GjKYZPpLHSZSxZJmZE+YrxIwyPztqtHdz38Eba1zShKCuwoVv4gfl049qAt07BLD0cWeVM6vsYanDGwn4dTfHUXp/+Hrua/hRFLKzcLktxP9Fzle+/eZyq41zTR8H29e18as9W1CX2ASxki7z2rbc5+dYZFE0l2hJG02//tUspSY9m+Oe/+hnBaICP/+HTrN6yePm6+VB10uSsI6RLr2HbGWqDTQUIB2q9pgSMHQQ9ezC1rmlvyopQeCC6jTdH9tVN3Kslm0uvDDFwaAwhBJ6wjq/FRDiSteFVeFUPlXKV88d7OX3wEu1rEnzs8w8Ta1lZ6RqhtsyvLkhmQH+oXuEsOVLli1ScPAF9bh25hhogVxkkXb5M1FxX5+imZtmJezpf4ntvHicS8GJeE0lHSg6930d3a4zt65dOif3Vi0N8/y9eopgvkehKoCh3FjEhBMFogGA0QD5d4Jv/9Yc89PHtPP7Zh1HV5VNmli0dYjT/bZA2qhLD1DpvO0ZKm1zlGFnrID5jG82BL6BMsyraGFzH4fHj5KsF/Nr8DMTyIyVOfP0yVrZCoNmDuPZ3KdhFEmb8hoeNbmjEmsNIKUleTfG1//NFnvviI2za3j2v6y8p1DWgxMDJz64UEmqpMllBmCtX3Pvz+9CV+T2ZaIqH/vy+BRf3pbXMnQFn+4axHXlD2AEUIQj7vRw4e2URI7uZgQuDfOO//gChCJpXNU0p7LfiD/toWZ1g/08O88uvvY7jLI8271TxNUZy/4imRDG0DtQ7fCiEUDGUZgy1g2L5FFez/x+2U5jy3IZi8HTL4+TtAs482t4LSYujf9+DXXYItHhvCHtFVtAUjU5v+yTxCiKJEMFYgB999Q3OHrk86+vasspAsZcLubNcyJ1loNiLLRd/MIgQCpgfrvnIzBaZAnV1rWt1BVKsjpEuX8GYRQpxMkwlTKp8iZK9sJYWy27lbpWrTPYUr2sKBWvxPywAuVSe7/23F/H6PfjDc1tlKqpCa3eCw6+eoKkzzs6P3F/nKOtLzjpCMv/PGGobipjZfoEQAkNrx6r2M5z7R1qCvzfla9u9reyI3seh8aMkjPis696dqsOZ7/chbQdv7IP0ji1tyk6FTcH1Uw4CMT06sUSIn/7j2zR3RIkmpv/QV5wyZzInOJ05RtmxbjRHCQSGYrIldD+bQlvRF7GuXhgPIMt7wRmtNSbNBFkEWUR4n1+ye13zpezkEGL+hnVCCAQKZTuHR43UKbrpWXYr967mKFLK2zwt0rki96xa/NpkKSW/+qc3qJQqcxb26yiKQqIjzmvfeofk1fE6RVh/HFliNP8dDLV5xsI+EUNppVg+TcE6Oe2xD8V2sC20mZFycta2vP0HxsheLd4k7FVZpeRYrAt0z2gQiOk1UFTBy9/aP62vimWX+OXgixwe34+pmMSMJuJGgriRIGY0YSom743v45dDL2Etot2BEB6E/3drm6vO0PSVSU4OnDHwfRGhrV6QGBcDR1aYV5noTUhsWa7TuWbGshP3VYkIW7paGEhmKJTKWJUqQ2NZfB6T3fcs/hut//wgZw9cIN4Rq8v5dFNHVRXe+t6+upyvEeStUzhOCUXMzW9dCIGqhEmXXp1WMBWh8FjiYR6MbidZHidXzc/oGnbFofetEbyx2gpZIinaRarSZmNgHTFj5va+0USI3vODDPUm73iMIx1eH/kFY+VRmszmSVfmumIQNxKMWSO8MfKLeaWb5otQoojAvwJ1LTgD4AyCrHxwgHTASdY6UgWIwJdQjNscwFcU9a1uEahiYZ/Oll1aRlEEv/H4faw518+Bs1coVao8uGkVe7Z2EwksfknW4V8dxzD1uj6qRlrCnDt0kUwySyjeuDFzc0FKSbr0Kuo885KqCGFV+yjb/ZNuwk5EEQoPxXew2t/JL4feZNhKElB9eFXPHX/vqUt5qkUbM6JTcixsaRM1wnT5VmEos3vaEEKgGSrH956ntWvyNMZgqZ+h0gAxfeo0hxCCiB5jsDTAUGmANu/UP3sjEUoQEfgDpD2ELB+A8v6awF+/3+qbEMajoK29K7xkdMVfG1oi59fvIKVE4qArjZ0kdivLTtwBdFVl9z1d7L5naZmPWUWLswfO09Ren1X7da7XV58/eokdT91X13PPl4o9SLk6gKHOr0qpltvUyFnvTSvu12nxNPO5VZ/iYu4yh1PHGS3Xyhp1oWEqJqpQkNQ2M/svDFNWyghbEtHDtHgSBLXAnD+0gbCfS2cG7vj9M5njGMKc0fmFEBjC5HTm2KKK+4141BaE93mk5+NAGaQNwkQscJ32YuNV4wT0Viw7i6nOfVFVdrIEjU486sIOf7m7/loNZuxqbTdcUeuf7fL4THrPDCw5cbedLEKodXlSUYSHij0yq9fois6m0Ho2BtcxaiUZtcYYKA0yYo1ScaogBF7Vh3c4yNqmCPFwBLMOtfKGqTE8mqVUsPD4bj5fxSkzUOwlqs/cjCugBRko9lJxyou6uTqRWnu9p35p52WGEIIO3x7Opr83L3GvOAXW+z6x4BvPrrjXkbHBFI7dmDZ5b9DD1QtLz8PDoVy3gQ0CFWeOczqFECQ8TSQ8TdwTvt3E6m+dH6D6FAy1Pp2/16sorFJlEnGvzHosYK2iQlBxKktG3F0gZq5HFR6qThFtDvXulWuvi5hrGxDd1Cy7DdWpyKUL5NJT10s3EqtgNWyRo+oaxdzS8/AQ1G9/QeIgRGP8dRRF1H1qkBC1ksZbUYWK5PaKrqmo5WXlgg4lcZkeVTHYGH6Boj2GPXGDeQbYsoxlp9gYegF1DlVk82XFiHtmLMff/ecf8Hf/+QdkUzOroKg3QhGNG/Ig5awaoRYKVQkgqU+VhyNL6OocfMVnQKw5jFWa3YdzKhzHQSDwBW+vEDIUk5AeoeTM/Cmk5BQJ6xEMxTWPW2rEPRvZEPoUheoIVWdmC6yKU6RQHWVD6JPEPOsbHOHkrBhxdxyJYzs4toN0FmcOiD/sa5i5VLlUIdS0tCplAAy1DV1JYDu5eZ2n1rtQIWDsrFNkN9O5voVivn62waW8RVN7BE2/faUthGBr6H6K9syfIot2gS3hB1ZsQ9Byp9W3nS2R36TiFMlVBinbuduezKSUlO0sucogVafE1sgXaPE9sEgRr6Cce6QpyO/8hxcACMVmMAeyAcTaog3bfCpki6y9f/Hr+G9FCIWw9wlGc99Bncn8zTvgyByG1oqpNaYCavWmVvgR8y5ru042XWTnk1vu+P1V/jUcTr1LoZrHp03t2VKo5vGoXlb51sw7LpfGEfds4iHjX5O03qc//w756hACBYlEIJA4+PUW1gQ/SszcgLrIeycrRtwBIjNoB28k0ZYwhtegXCpjeOr7h61YFVZvmXuZ3KiV4Wymj/FyDlva+DUPa/wtrPa3oCnzy/P6jftIih/gSAtlDjlzKSVVJ0XC9/GGrVwT7VHaVsdJJXOEorM0yLqFasVGUQSbpzAQMxSTJ5uf4+XBH5GtZAhowdt+NikluWoWicNHWz6F4W6kLnlUxaTZu42EZyuF6ghlJ4cjKyhCx1AC+LTEknn6WlHivtioqsquj97P2z98l5au+lkhVKwKps+ge9vsV7WXcoO8M3qay/khFCHQFR0BVKXDgeQ5fJrJ7vhmtkfXYc6xkkRVvMT8nyKZ+w6G2j7reuiKM4ypd+M3753T9WeCEIInfm0XX/+Ln+EPelC1ud/QkoMpHnnufvyhqasn4maCZ9s+zd7ka4xawyhCxbx287NkCUc6NJnNPBx/gohR394Il8YihMCvN+OnebFDuSOuuNeZbY9sZu+PD1KxKuh1GrgxNphizwsPzmqAh5SSt0dO8drIMfyqhxZPdNIVRcku86uhI5zKXOGzqx4jqM+tyzdkPoxtpxgvvjxj8zApJRVnCE2J0hL83Tmt+mdD+5oEDz2zlXdfPkFLV3xOK6yx4TTNHTF2PbV1RsdHjBjPtn6a8fIoF/LnyJRrHkEhI8o6/0aiRtOSWem5rCxcca8zoXiQD3/2YV75p7do6Z7/I1p2PEc4EeLBZ2e3MbM3eZrXho/R7IlMmXbxqAZt3hijVppvXnmdL3Y/hVedfXpACEHU9xyqEiCZ/xFCKGhKHGUSP41aGmYcR+bx6OtoCf42qrIwm8WPPHc/2bECpw9dJNERnfEKXkpJcjBNMOLn1/7wyUk3Uu+EEIKYmSBmLr6xncvdgyvuDWD70/dy4UgPfeeukuice2lfqWBRyBb5F//647PK4Q8Ux3ht6Pi0wj6RJjPMUGmc14eP8WzbrjnFK4Qg7H0cn7GZTOkA2dLbVKSFQEUIFYmDvDbSzatvJOx5HK++fkHb2lVN5dl/8TChpgDvvnwcj88gFJvahqCYt0iNZlm3tYOPfv5hAvN0+3RxWQhEvRs7ZsquXbvkwYMHF+XaC0ExX+K7f/5jBi8Nk1gVn3WJZD5dIJfK82t/9hwbdsyuu+3F/nc5lblMkzm7kXBVx2a8nOXPNr6AT5t/isSRJQrWGSrOCLbMo2CgKiF8xkZ0dfFzlVcvj/Lmi4fpfX8IRM2v3fToIATVcpVSwcKREI762f7RrYQ3R8lULGwp0RWVJo+fhNePqbprJJeFQwhxSEo57QrMFfcGUipYvPL1Nzn2xmnC8eCM/N3tqk1yYIxAxM/zf/xROjfePhloKvLVEn957kfEzRDqLIdZAAyVxvlY6052xBan8WIxSA6m6bswRN/FYcaHM0gp8Qe9tK9NYMVVTuspLmTHrpW71drUBB9YBuxMdLCndTWdgYUbxOBy9+KK+xJBSsnFY5d59RtvMTaYRjdUfCEfps+80XFasSoUcyWKuSKKprL9qXvZ88KDt3mWzITTmV5+0PsOrd65OdDlqkWCmpffW/vROb1+pTBSzPOdC0c5n04S0AyipnfS1E3VcUhaeSq2zZ62bp7r2oxXW/hWc5fGkS+VGUhmsCpVBOAxNDoTkZtGfS4kMxV393mywQghWHd/N2vu7WLg/CBn3n2f3rMDjPSNcn0J6PV76NjQytr7V7Nx5zq88/ClL1YtmIcFgqFo5Kv16+RcjpxLjfB3Zw6gCMEqf3jKfLymKLR4g9jSYd/gFd5PjfKHW3YT87h5+eWMlJKBZIaDZ3s5cmHgxhMbsvbUpmkqD23uYvv6dhLhxWmanA5X3BcIRVHo3Nh+I81iV22qlSpCUdANbUmVwzXMH2cZcCkzxn8/tZ+w4cWvz3wTWxUK7f4QI6Ucf3NyL3967yOEjLlNpnJZXCq2zUv7TvHe+X50VaUp7Ee9xca7UrXZe6qHt05c4qkH1vP4vWuXnPfTivGWWW6omorpNes+tckzhzLGiVQcG38d/M6XI4VqmX84e4iAbs5K2CeS8ARIl0t8/+KJurtQujSeim3z7deO8t77/bTGQiQigduEHUDXVFqiQZojAX753jl+fvDskvt7u+K+wljlq9XW23Ocx5mtFLkvcnd6nPzsyllyVWveK+5Wb5CjyQFOjA3WKTKXheIXB89ytneYtngIZQaLLk1VaI+HePvkJQ6c7V2ACGfOjMRdCPGsEOKsEOK8EOLfT/L9LiHEq0KIw0KIY0KIj9c/1PoipSSXLTE6kmV8LEelYi92SHUhqHvZElpNqjx7l0ZbOihCcE94aY0vXAiyZYt9Q1do8cy/mUoIQdTw8nLfuSW3mnO5M6lckXfP9NIau90HaCoURSERCfDLw+9TqS4dHZk25y5qk3D/CvgI0AccEEL8SEp5asJh/xvwbSnlXwshtgA/AbobEO+8kFIyOJDiyHs9nD83SKlYuZEnkxISzSEe2NXNxs1teL3L18RpZ2w9J9I9VB17VqZgSSvDfZFu/Nrdlys+lryKlBK1TpbNQd1kIJ+hP5+hMzC7fgOXxeHYxQGEEHOy7TZ1jbFMnvf7R9iyurUB0c2emWyoPgScl1JeBBBCfBN4AZgo7hK4bskYBu48OXiRyGSK/PKnxzh/bhBd1wiFvYQn1J07jqSQt/jFS0d57Zen+Mhz93LP1o4ltdE5Uzq8cR5LbOP14eO0eqMzqncfs7JEjABPNC+tGa0LxdnUMD6tfjf06++b/nzaFfdlQMW2eefUZaLBuVWqObaDoai8fuQC93S1LAndmIm4dwATk0l9wO5bjvmPwC+EEH8G+IFnJjuREOJLwJcAuroW7tH/av843/nGPqpVm5bWyUvbFEXgD5j4AyalUoUff+8QvT2jPPPsfaja8tqaEELwaGILVWnzzugpgpqXgDZ5nXbZqTBmZUl4wnyu63F8d+GqHaAnO06gDl25EzFVjUuZMXa33H1pruVGNm9hVSpEZlGGLB1JOpnl6qVhxofTgCQvgbd7eOhj97H5wfXTOoc2knqVQn4B+KqU8s+FEA8D/yCE2Cblzbt6UsovA1+GWhNTna49JSPDGb75j+9gmhqRyMxqjz0enZa2MEfeu4xQBB957r4lcSeeDYpQeLL5Pjq8cd4ZPc3VYhJFKJhKrTqnKm3KdgWPavBYYhu74hvnZBi2EnCkpFCtEDXq+0E0FJV0eenNvXW5HataZTaTdkp5izMHLlDIFtF0FX+wtnhy7CpSFbz67X28/r13eeYLe7j30c2Loh8zEfd+YNWEf3de+9pE/gB4FkBKuVcI4QGagOF6BDlXqlWbl374HpqqEAjMbkWqKIKW1jCHD/awdl0z6ze1NSjKxiGEYFOok43BDoatFKfSV64N63DwqibrAq2sD7ajK267QyO4blPgsvTRFGXGvX+lvMWJt8/g2HJSEzmf3yQY8FC2Kvzsq69TKVfZ+XTjZhXciZl8qg8AG4QQa6iJ+ueB37rlmCvA08BXhRD3AB5gpJ6BzoUTR3oZGkzT1j43zw9FEYQjPn720lH+aG0z+ixsXpcSQghaPFFaPHOzJFjpKEIQNjyUHbuuJmCWXSXudqouC3weAweJI+WUJZBSSs4evIBtO3j9Ny8Yq1KiCXGjBNEwdRKdMV751ju0djfTsa6lgT/B7UybTJZSVoE/BX4OnKZWFXNSCPGfhBCfunbYvwX+UAhxFPgG8LtykWvAHEeyf+/7M07F3Amfz6BQKNNzcVEfQlwaTHcwSq5Srus5K47NmpA7YWk54PcYbOpMkMoVpzwuO5YjnyneJuwARcemy/TclILRDA3TY3Dol8frHvN0zGiZIqX8CbXyxolf+98n/P8p4JH6hjY/RoczZDMlmlvmP1fV69E5cayPDQuUmnEciW07aJqy7HL9y5V7oi0cHb1at/NJWTNxWOU6RS4bPnTPas70Tp1wuHppZNICi+t/7w7zdtEPJ4KcO3yJzFiOUGzhfGhWbLJ1dDRbtwYSn9+gvzeJlLJhYmtZFc6/P8S7+y8wMpwBaumUNWub2fXgGlZ1xSdtg3apD9tiLQgFegujpKp5Ko4NSDShEjMCNJsRjFnsTYxbRTaE4yS8S9NUyuV2VrdEiYd8pPNFwv5JNtelZGxwHN8k5ZJ5xyah6/jU21O3yrV8/tVLw66414PUWL5uRj66rjGWzFOp2BhGfX9lUkqOH+vlVy+foFKxCQRMmltCtZ13R9Lfl+TC+SHCYS+f+vRO2tvdvHm9GS1l2TdykZQzzpXsOGHDi3rtJm7JCqlKnov5IZrNMB3eOEFt6qoaWzrkqhbPdG5ciPBd6oSqKPzmhx/gb3+6j1zRIuC9uTTWtmvFf7cu8AqOjaEobPFPIdxSUrEqdY95KlbsUtBpRMq/Aafcv/cCL/34CIGgh5bWMP7ABzk7RRFEon5a28JUbYev/8M7XLk8Wv8g7mLOpgf5yzOvcCDZw5ZICwlPEEeCrmjoioap6gQ0Dz7VZLSc5XDqIoOl1B2fCqWUXC1keLRtDevCcx+x6LI4tMaC/M5HHsSqVBlJ5XCcD6q5FUVByg9cUx0pydhVdCHYFQzjmaobXAi0BfZ/X7HiHgx6cZz6qLFdddB0dVZDkWfC2TNXee3VUzS3BKd9IggGPfgDBt/7zruMj+frGsfdytn0IP9wYS9BzUOLJ4Sp6twfb0cAxerNqyxFCHyqgVc1OJvrY7A0ftv5pJRcLWbpDsb4eNfmusVZsssUqiXXp2aB6GqO8EefeJh7upoZTuW5OpYhV7Qo2zZG0CRXKpOxqxQcm1Wmh92hCP5J0jHXkVKClEQS89//mw0rNi0TT8zO/GcqCsUybe2Ruvo1Syl58/UzhMJeNO2DN0alVCE5lMYqWEhHYnh0IokQvqAXn8+kkCtz5L3LPPn0lrrFcjcyXMrw9Uv7iRi+m2wH/LrBg82rODTST6ZsEdCNm0rjVKHgU03O5a/i1Uwiuh+olT0OFXNsiiT4l5t24KnDNKbB4hhvjR7lUq620RsxgjzctI2toW53o73BNIX9fObx+/nozhLHL13l5OUhClaZtu5meo/3sjEcotUw0WfgQ1PIFGnqiNGyumkBIv+AFSvuzS0hdF2lUqmiz/NxqJC32PNYffOn/f3jjI3lblTzFLMl+t4fZHRgDClBUQUgao+Fp/oIRgOs2thGOOrn8Hs97Hl0A6bpjnObK+8MX0DApH4yAd1kT+tq3k+P0pdLIwT4VP2GqZgqFHSh0JMfZoO/g1S5iKYo/MbabXyoZXVdzMf6CyN888qv0IRKwoyiCEGhWuLFgbdJl7M8krg7PYAWmpDfwyPb1vDItpoNdrlU5m/+13/CayszEnYpJelkjsd+/aEFvyGv2LSMYWhsf3ANY2OFeZ2nUrFRFMGme2Y3qHrEPkUKAAAgAElEQVQ6Th7vRdNUhBCkR7IcffM0ycFxvEEvgbAPX8CLL+AhEPLhD/so5S1O7nufocsjVCtVLve4ufe5kq9avDd2mZjhv+MxuqKyJdrCntbVdAWilB2bTNkiW7bIVizKtsPVUoqSLPPJ7i38hx1P8UjbmroIu5SSl4cO4FEMokbwxpODT/PQYkZ5J3mS9BwsnV3mj+ExeO73n2B8OINVnLovQkrJ8JUkG7evYfOudQsT4ARW7ModYOeDazh6qIdisTwnC18pJaMjWZ54Zgs+f31NpcbHC5gejVyqwOl3z6ObOvod8u4Cgek10E2Ny6f6iK5KkM/f3XNO58Px8T4cR87IDjmgm2yKJNgYacKyqxSrVSQSVQiy1RKPtHTyePvausaXLGcYsdI0G7fXyKtCRUrJhdwAO2JuNc5isOGBNXzyS0/zk6+8iqKqRBLBm/bjpJTkUgWyqTwbt6/h47//JKq28N3tK1rc/QEPz33yAb77zf1ozcqs0zPJ0Rwdq2Lseqj+d13pSISEC8evoGjKHYV9Ioqi4At6GewZIZ+ZupPO5c6cTA0Q0Gd3sxYIPKqOR/0gFeZRNY6P9/NsR319Q8pOBQVxx8d4VSgUbNeQbDG556H1JDpjHHntFMffOottOzUfIVH7bLeuSfD0Fx5h/f2rF0XYYYWLO8D6TW08+/wD/Pylo4TD3hmtwB3HYWQkS6I5xKc/91BDLH/9fpPLF4fIpQr4wzN3I1TV2spt5Iqblpkr+YqFLub/gdMUlYxVf5EN6wEkEls6k3rx29IhYbqdr4tNU3uMZ37rUR751C4Ge0awSmU0TSXUFCTREVv0Te8VL+4AD+zsJhL185MfHWZwME0k7MUzSZrGcRxS4wXK5SoP7Ozm8ae24PE0ZtPynq0dvPKTIyiKQMzCO1DKWgVN39EepHx00d9AyxEHWRe7RnH9XHXGr3nYGlrDicxFmo3oTX/jXLWIX/OwJlDfPSCXueMNeFizbdX0By4wd4W4A3SvTfB7f/QEJ4/1cmDvBYaHai3+12uHhSJASjZubmfn7rV0dDbW8GnN2gSVYhlllrXzpbJNS8xPOV/CKpbx+Oq7F3A34FMNUuUCzHPxXpUOXrUxN/8nWrYzXs7SVxzGVIxaKsax8CoGn1n15KysEFzuTu6qd4jXa7Br9zp2PLiWdCpPcjRH2aoiFEE47CXWFGzYSv1WNE2lNeKhZzCLac7Ms8Z2JLYjaW8KUBrLYq+Qod7zRUpJpmAhpSToM6etWFkXaua1wTME9PlNnUqXi2yJNMZMzquafK7rKXryg5zO9FBxqnT7W9kUWn1Xzrh1mT13lbhfR1EE0ViAaJ1MfLKVDMnyKKPWELlqDhCE9BBxI0HCbManTV5yt2ZVjHSmRKZUIeDRpxR423EoWFXWtYUJenVKUqIv0I1oqSKl5OTlIV47foHhVA4hIOj18OF717Jzfecdm862x7p49eqZab27p7t22bHZ3VTfSpmJaIrK+mAH64MdDbuGy8rlrhT3ejFiDXE09R59xSsgQRUa2rXH5SuFHiQ1X4pu31rui+wgatyc6tn4QDdXLw7h83sYGi+gKOAxtJsEx7YdStdW6Bs6IrREfWTH87SvbcG4y5uY9p6+zEsHThPxe2mN1jqSi1aFH+49yeB4lucfumfSG2bM9LMx3MLlXJK4ObcbfL5q0ewNssrv+rXPhLJVwSpY+ILeRaseudtwxX0OVJ0qR1IHOZE+iqGYxPSmO666HenQW7xMT+EiOyIPsTV8H8q1Coh7HlrPmz88wNrWIG1xH1fHCgynCh8YlIla2VtXc5BExIvnWrlkMVfiwd9+fCF+1CVLOl/i5++doyUaRJ/g6+E1ddr1EO+evcIDa9tZlZi8quSxlo2cSb9BxbHRZ1DvPhFbOoyXC3y+Y+G7DpcbdtVm70+OcOiVk9hVG6/fw2O/tpOtH9rg/u4ajCvus6TiVHht+GX6ileIGnHUaUrqFKEQ1qNUnSoHx/eRqozzSNOHUYRCMOrnngfXc+bgBRIdMTZ0GHS3hLAqNo6UqIrAY2ioE9ILuVSeQMRH95a7+1H95JVBpJQ3Cft1FEWgayqHL/TfUdzXBJr4ROe9vNh7jFZveMYCb0uHq8UUj7Vs4L5o57x+hruBt198j30/O0qiI4qma5QKFj/56huYPpMN969e7PBWNCvWfqARSCl5J/k6/cU+4kZiWmGfiKZoxI0E53NnOTT+7o2vP/HZ3UQSQZKDKQB0TSHg1Qn5DPwe/SZhz2cKlAoWL/zxRxbcPnSpMZYtTCrs1/EYOqOZqd0z9yTW81zHvQyWMmQqxWldF/NVi4FCij3N6/lYxzZ35TkNVrHMe6+eItERu/F+9fhMgjE/+392dJGjW/m44j4LevIXuZg7T8yIz+mDLYQgZjRxIn2EoVLN6c8X9PLZf/MJ4q0RBntGyI7nbxOZUsFiqHeUSrnK5/7n52lb01yXn2c5E/F7r01LmhyrXCUy2TSdCQgheLx1I7+77mGCuoeBUprhUoaKU62NTZOSqmMzauUYKKZQhcLn1jzIJzrum7S5yOVmirlSzebhlnJfr99k7NpixqVx3N3Lv1lgyyr7x94mqIXntWJThIJP9bM3+RYvtH8GIQTBqJ/P/7tPcvH4FQ784hiDPSM3Kj0k4A95efKzH2LzrnX4w/Mb+L1S2NLVwi8On8O2ndvGD0opsapVdqyfWepqU7iNjaFWBoop3h3t4WSqn5Jd83M3FY0NwWY+1LyO1f7Yjf0Sl+nxh33ohkbZqty0+Z9LF2jrTixiZHcHrrjPkP5iHyW7NOfqiol4VR9j5VFGrGGaPS0A6IbGpp1r2bhjDWNDafLpAtKRmD6DRKc7P/VWYkEfj29by6tHzxMP+fEaNfEoV6qMpPPcv7adrsTMRxIKIejwRfl0V5RPd22/NvBYumI+D3RD45Hnt/PLb+4l3BTE4zfJpfKU8mX2PL9jscNb8bjiPkMuZM9hKLN3lpwMIQSKULlSuHhD3Cd+L94aId7qeodMx9P3ryfkM3n92EUGx7OAxNA0ntm+gUe3rpnXcBUhZmcL4TI525/Ygukz2ffTI4z2j9OxtplH/nAnHetapn+xy7xwxX0GSCkZsgbxqjM3+JoOr+pl8Fre3WVuCCF4aGMXO9Z1kszkcaQkHvRh3OWbzUsJIQRbd69n6+71ix3KXYf7KZgBFVnBckr4tfp0tAIYikmqcvscTpfZo6kKLdHgYofh4rKkcBOKM8CRDnWxEZyAQFw7r4uLi0v9ccV9BmhCA2Rdp8/b0kavUw7fxcXF5VZccZ8BmqIR0sOUnalnJs4GyymRMN1yMBcXl8YwI3EXQjwrhDgrhDgvhPj3dzjmc0KIU0KIk0KIr9c3zMWn3bOKoj11x+NMkY5DcnyMFsMduODi4tIYphV3IYQK/BXwHLAF+IIQYsstx2wA/gPwiJRyK/BvGhDrorIusAFb2nVJzaRG0lw+1YcxPLkVsIuLi8t8mcnK/SHgvJTyopSyDHwTeOGWY/4Q+Csp5TiAlHK4vmEuPnGjiRZPG7lqZv4nC9s8vPVh1q9rnBe4i4vL3c1MSiE7gN4J/+4Ddt9yzEYAIcTb1IaX/Ucp5c9uPZEQ4kvAlwC6urrmEu+iIYTg4fhj/PPAd6k4FXRlbl7qll1CU3U+tuVZ19fapS7YtsNg3xgjg2n6Lo0yNpKhWq3ZMkTiATq7m0i0hmnvit/m8+KycqlXnbsGbACeADqBN4QQ90opb3IHklJ+GfgywK5du+o/WbjBRIwoe+KP8dboa0T0KNosBb7sWOSqWZ5pee6O05lWImXb5sz4MG9cvUh/Po0jJc3eAI+1reXeeCte7e4eOjJX8rkSZ472cuDNs+SzJaQEj9fA8GgIIahWHfoujXD+VD8Aptdg5571bN3RTSjiehStdGYi7v3AxNHende+NpE+YL+UsgJcEkKcoyb2B+oS5RJiQ3AztrTZl3wLj+qdUWOTlJJcNUtFVnii+SN0+pbXU8t8GCxk+crpdxmzigQ0gyaPH4EgXy3z7QtHefHyKX5n0y7WheOLHeqyQUrJ2eO9vPzDw1ilCuGYn+b2yX10vD6DULS2kChbFfa+cpr9r53lyU/cx7Zda1zPohWMmG6DUAihAeeAp6mJ+gHgt6SUJycc8yzwBSnl7wghmoDDwANSyuSdzrtr1y558ODBOvwIi8OINcSbI6+SqabxKj68qu82t0gpJXk7h2WXaDITPNL0xG2j9lYyo6U8f3n8LaQDMc/kK8VcxSJbsfiTbXvoCs7c6GuuVBybC+kxLmfHuZAeY7SYx5YOpqrR6Q+xJhRjTThGhz+0JP3arVKFn3//EGeP9RJpCuDxzr5XomxVGRvO0LW+med/czf+oDtwezkhhDgkpdw17XEzqf4QQnwc+Atq+fSvSCn/ixDiPwEHpZQ/ErVPwZ8DzwI28F+klN+c6pzLXdyhNm6vt3CZk5ljJK0RhFD4YEYeSCStnna2hu6lzds5q+EeK4G/P3OQM6lhWrxTWwOkrCIB3eTfPvB4wwS1UCnzzuAV3ui/RL5aRhGCgG5gqtqNbuFCtUKxWrP6bfeHeGbVerbFW+c8RLvelIplfvC1t7l6ZYymtvlZT0spGRvOEo76+MzvP04wXD/fJJfGUldxbwQrQdzzxTKZfAnHcajKCo5RRDMkQgg8qpewHl7yXajDIxm+86NDKELwmRd2kojXx6Nl3Crwfxx6hTZfaFpxlFLSV8jwZ/fuoTtY/yeb91OjfOPcEdJliyaPH482dTZSSkmmbJEul9gSa+Yz6+4l6llc8bOrNt//2tv0XhqlqaV+TxVjIxkisQCf/9ITmB5372M5MFNxd43DZoHjSPqGUxw508fF/iSZfAlFuZ6zlEhZG9Dc3Rbjgc0dRDuW/q9336FLFAoWjoSDh3t47pl763Le8+nkNT/06UVICIEuBCeSg3UVdyklr/Rd4KWeM0RML52B8IxeJ4QgbHoIGSYX0mP8+eE3+B+2PkR3qPFpoztxeO8FLr8/THNHpK5PN7FEiOGBFO/88hRPPn9/3c7rsvgsffVZAkgpOd87yi/2nSGZyqNrKkGfSWs8eNsHrVyxudA/yslLgwR9Jk8/tJF717fPy1u8kbS1hDl2sg8hoLV5ZuI3E3IVa1Z+6LqikSlbdbs+wCt9F3ix5zTt/hDaDAdgT0QIQYsvQKZs8Tcn9vMn936IVcGF99lPDmd4/efHibXc/n6rB/GWEAffPseGbR10djfV/fwui4Mr7tNQKJV5ed9ZDp/tI+z30NYUmvJ4Q1eJ6bXNw6JV4QevHOPkhat84tGthINLL6+564HVJOIBEILVnfVbNXtUbcpuXls6KIgbYmVLB49av7fjufERXuw5M2dhn0jIMAHJV04f5H/Z/jh+fWFTbXt/dQpNU9Ab5FOvqgq+gIfXf3KM3/ofn1ySG8kus8etg5qCdK7I3/1oP8ffH6AtHiLgM2f1eq+p054I0XN1jL/9wV6GktkGRTp3hBB0dzXRvWpuQ7/vxKpAFIS4TeCzRYvD5/t549hF3jrZw+XhcRwpKTs2m6L1MVIrVMp84/2jRE3vvIX9OiHDQ65c5sWeM3V1B52ObLrI2RP9RGL1myUwGcGwl6t9Y4xcTTf0Oi4LhyvudyBXsPiHlw6QyZdoiQfnnFYRQpCIBADJ1158l9FUrr6BLlE6/CE6/WHS5dKNrxXLFQ6f7ydXsgh4TQxN4cLAKOcGRvBrBhsj9RH3twcvkylbBI3Z3Yyno9Uf5N2hK/Tn62BBMUPOHq81hysNrkcXQqBpKife62nodVwWDlfcJ0FKyY9fP0EqW6IpXJ9O0nDAi5SS7/zyCJWqXZdzLmWEEDy/+h6yFYuSXQVgcCyLIyVeQ0cAqqLg9eicHxvlIx0b0euwyi7bNq/3XyLhrX8HsCIEmlDZN3il7ue+E5fODeL11/cmdSf8IQ+Xzg4uyLVcGo8r7pNw7P0Bzl4ZpjlaX4GIhnwMj+V4++jFup53qbI+0sRvb9rJWCnPUCFLplRCu1Zd5EhJzimTp0K3E+WeQH1W7efTSUrVCmYd8/cTafL6OTjcR+laPXwjkVJytXccr39hcvymR2c8mccqNf5nc2k87obqLVjlKj975wzx8O0dp/WgORrgzfcu8sDGTiJLcIO13tzf1E6zN8Deocu8mDpNyi7ilwZSQpsWpE2G8BgaoUB9uiR7MuMoonFrFk1RsB3JUCHH6gaXRuazJaqVKtoCGcwJIVAUQWosR8sd7AwazeholjOnBxhL5tB1le41Cdata8EwXamaLe5v7BbOXR7GKleIhRojvJqqIAQcPdfPh3feHRPh2/whfn3tvTzRso6//tk7jOeKNPn8yDIULIuPPrIVXa2PgF1MJxtezSKRDBayDRf3Stmm3rN7p0MIqFYWPm2Yz1v89KUjXDw/jKoq6IaKIyXHj/ViGBpPP7ONbfd1upU8s8AV91t459ilWVfFzJZo0Mv+E5d55P6acdOIleZE6jKj5TSWXcGjGqzyJdga7iKorxz3vpjfx//08cd490wvZ3qHCMe8fOieLta116+2eqSUx6c1VtxVRWGkWJ+pXFMhBAut7bXrLvBFi4Uy3/r6XsbH8zS33t59W7aqvPTiYaq2zfYd3Qsa22wp2RWSVpaKU0UTKmHDR1BfnCd0V9wnUCiVGRrL0hqrTwv+nTB0jbFMkSODPZyq9NBXGEURAo9qoAiBU3Y4nxvg1eGjbAl1sadpCwlP/RqMFpOA1+Sp7et5antjnlpsKWl0v5iCwF6AckjDoyOd2mD2hVqxSikxPAsrC/v2nmd0NEtL6+TvccPUaEoE+NXLJ1i7rplweOkteIZLad4b6+FQ8iKOdG58XQL3hDt4ML6OLn+8oSnDW3HFfQKj4/mbGmsahUQy4h3hm319dEZjtHhubykP6X4c6XA228+ZTB+f63qMNYHWhsa1lHCk5HIqxYVkkqxVpuo4BA2DzkiYjU1NGHdI4xiKii1lQ9/YtpSYdUojTYXXZ+ALmFTK9oLknB3HQSAaXlM/EcuqcPi9HmLxqa+p6xpSwqkT/Tz8yIYFim56bOnw2uBJ3ho+gyIU4mbgpt4KRzqcz17lZLqXjcE2fr3rITzqwmyQu+I+gfFsAWcBVmSD6ghDwSHWWi2E9DtX5ChCockMUahafOvKG3yx+yk6fSu7PbxYqXBiaIhXL15iJF+72eqqggCqjkPVqZVSPt69mh3tHcR8Nz/ytvuDXMmlG1YtA7UPbKuvsU93UNvg7Fgd58qFkQUR91KxQlNbaEGnNQ30j2NXHfQZXDMU9nL8WO+SEXcpJT/rP8L+5HnavBHUSVblNcEP1ixMskP806W3+OKaxzDVxpu0uaWQE6hU6zMAeyqKosQlvRev7WHC09uU+DQTn2by/b63qTort0b+ajbL//PWO3zn+AkqVZvOUIj2UJCE30+T309rMEhnOERA13n5/AX+rzff5NjgzXXZ68LxG7a9jUK55jmzEKzb3LZgpYn5dJGN2zoX5FrXKZftGe8raJqCZS2dMs33xi6xf/Q87XcQ9okIIWjxhOgvjPHTgSMLEp8r7hMQovEpmSF1FBAoUpnVtQKal1y1xKX8ymwy6U9n+Ku9+6jYVTpCIQLmnR9dTU2jPRgkbJr8/XuHebev78b31oZiOFI27CZdqlbwqDoJ78KI+/otHWi62vAKFsdxavnh+xd2SphpTu1BNJFKxcbrXRq2xLZ0eGP4NHEzMOM8uhCCZk+Io+OXSZcLDY7QFfeb8Jr6tRKFxmBjM6iN4JEmjpSYxuwetb2qwf7k2QZFt3iMF4v894MHMVSVqHfmlQVeXafF7+fbx05wdmQEgK5ghBZvgFyl3JBYx0pFPtyx5kYzVqMxPToP7F7L+GhjfYlSo3k2bG1f8Nmq7R1RdF2jUqlOe2wmXeK++1cvQFTT05MbIVMp4p1lZZYiFASCY+ON73J2xX0C8Yi/oZUWGSWPIxzUa7/2wCxHpIU0H735EXLVYiPCWzReu3iJYqVC2DP7RiZT04h4TH5w8hTOtaqSp1etJ2UV6756t+wqihDsbO6o63mn48HHN+HxGhTz9bVEvk7ZqiCl5PGP1cfLfzYYhsbOXWtIJqcuLS1bVVRVsGXrwv7u78TR8R4MZW77IBHDx4Hk+TpHdDuuuE8gHvIBAtuZYTJ8llRFFWStWkZK8HlmJ+7X00Yle+nkHedLoVzh3b4+Er65Wz0ETZNksUjP+DgADzS1sT4SZ7SOtehS1rpSP7lmMxFzYeuWfX6Tj/36TtJjeWy7vu9NKSXJ4SxPPn8fkWkqVhrF7ofX0d4WYXgwjePcfkMuFsskkzme/fj9BJbIvNfxcn7Om/amopGpFm8qmWwErrhPQNNUtq5rZTzTuJWxpDbQI+gz8cwyLQPX954WZzRiIzg2OEjVcdDm6XpoqipvX74M1JqMPrv+PiS1oSH1YKiQY0MkzsOti5MWWLu5jQcf38TwQAqnTgIvpWR4IMXW7V3cu2tNXc45F0xT57Of/xD3bOlgdDjL8GCasWSO0dEsg1fTVCsOn/6NB9m6wJu9U+HI+TV7CUTDP8VuKeQt7NqyimPnBhrSOKJJFSFq4r6+s2nW+X0pJQ4SU1kam0r1YF9vL2Fz/h3BcZ+PE0PDFMoVfIZOwuvnD7Y+yJdP7EdK5mz/K6VkuJCjyevnX27agbpAufZbEULw+LPbsG2HQ2+do6k1jD7LxYEjbHLeNHkzQ9ZIky1nCW8MkNpg8+pIkTZPK6v9q/Br9XfUnA6PR+f5F3bw2BObef/cIKmxPLqusWp1nNWrm1C1pbUODeom4+UsMPv3VVU6mIo2bYXNfHHF/RY6myOsao0wOp4jGqrv5lLA8VOtOJi6TmwOVsK5aokWT5SAtnQNx8qVKucvj3DkdD9Fq0xHc4QHtnTSeocJVuPFIkFj/k0diqitowqVMj6jdvNbH47zx9s+xN+dPsjVfIYWX3BGM11v/Cx2laFCjnXhOL+9eUfd/eFni6IoPPX8/URifl7/6TF0QyMc80+7CHGEzWhokNHwAI5iUyk5yBJ0rWqhY3UCB4feQh8Xchd5a1SwLrCGXbEdhPWpp441gnDYx64H1y74dWfL1vAqzqSvEpnDW3fcyrMj3vgnJVfcb0EIwScf38bffPdtKlUbvY6OfKqj4MsFad6oo84hDZGvlvhI6/Yla56UL1h8/cWDDI5k8HkNdE3lyOk+Dh6/zNN7NvHw9ts/tOWqjWLW6ecRUL4lZbE2HOPf7fgw/3zxJEdGB9AVjSaPb8oVuGVXSZbyKCh8et1W9rSuXrQV+60IIdj5yAa6N7bw8+8dYuBKEt3QiMT8kw70KBg5+hLnsbQiMqsiywp+v5cN29sJTDDHM691TTrS4VL+MpdyPexJfIh7gpuW7PttMdkYasNUNcpOdVYbq1JKbGmzI+aK+6KQiAb4yIc28dO3T9HeFEKpwwdbSsngWJYntmzjkvcSVcee1Qi4ol3Go+qsD7bPO5ZG8eJrJxgZy9E2YdC2z2tQtW1efvssbYkw3Z3xm17j0TQcKanLLVSKSW0JQobJFzdt57H2Nbxz9TJHRgeQ1P4muqIihMCWDraUCGrzXz/atZFdiU6inqX5lBRPhPjCHz3B1StjHNl/gTPH+0DWbBs0VUEogpw/xXDiEkpRRa0YROMB2lbFCEV8d5wspgiFmBGh4lR4ffgtxsrj7InvXlBPlOWAqersblrPG0OnafPebh9yJ5JWjq5AgpYF8Ipyxf0O7N62mvFMkf3He2htCs5r5Sal5Goyy+buFn59933sHffxxshxWj3RGX1oLLtCqpzjN7sen3P5VaMZS+V5v2eElqbb2/I1VcXr0dl3tOc2cY/7fYwVCoTn6dVSvVbhFDAm348QQtAditIdivLC2i0MFrIM5rOMlPLYjoOparQHQjR7A7T4AnWZCtVohBC0r47TvjrO05/azthIluRwhvHRHEknyXH/RdaanYT9AXx+c1a2ArqikzDjnEidRBc6u+O7GviTLE8eTWymJzdCf3GMZvN2N8tbSZUL6IrGp1c9uCBPQ0tTKZYAQgg+9vBmTF3ljcMXCPs9c7ICLloVxtIF7t/Uzicf24amqTya2ILlVNifPENED+DTJj+vlJJstUChWuaFzoeX9Kp9eKw2G/ZOb9pQ0MOlvtHbvv7o6tX809Gjc6pxn0iyUGRnRzseffrNZr9usC4cZ104Pu2xywXTo9O2Kkbbqhhlp8x3en9At2zDr81936jmixLjcOooXb5O2rx3j3HdTDBUjS90P8J3r+zj/cwgYcNLQPPc9hko2WXGrAIRw8cX1zxKxFiYDWtX3KdAUQRPPfT/t3fnwXGe92HHv7/3ffe+cR8EAR4QRYqnSFGHbR3RYUm1pbi2WjkTx07Vuk7q/JOZTtNmxpNxptOmM03aTtxp1CZ1nMvXxK4SSeNLsiVLlixKpCkeIs2bAEgCBBaLvXff9336x4ISDwC7IHaxOJ4PhzOL3XfffR7s4rfP+xy/5xbWr2njuy8f5MLlKeKRQGUlaxXFkk0yncfrMXnq4Z1sWd/1/ptuiMFDnTvp8MV4bewwFwtJvIZF0PQhCC4u2XIBG4feQBuf7NtBX7A+29A1SrWGiOuqGVd1bu5or/RdOs6smR6vcFyXQtnGmc5e6LFMfNNjImXX4a6+vpsu/0ryTvIAWTtLm29hX16uUowVkyjl8uPRV3mq7xNYS/TKsVkClpdPD3yIo6lhfjp2jAv5SaDSyKnMuIOQ5eejPdvZnugnNEtDrhH0O1WDgZ4WvvCpD3HwxAiv/+I0F8anEAS/18Lvsyo52FUloOeLZUDh93m4f/dGdt26ZsYWv4iwI7GebfEBzuXG2D9xkrFiipKy8RseBlt62RlfT7svtiwGtNZ0VvodHcedcQ2j+bcAACAASURBVLB4MpVjx+YZ5ikr2NXVxStnz9IXi2KZ5jWzh4tlm4upNJdSGTKF0vSXiMD06lPTNPB5LNZ3ttAdac4inKWk4BQ5lDpCwhtf8LnSdpYT6XN4DQ+WGAznR+gPLW7umeXAMky2JdayNd7HSD7JaCFF3injMyzi3hD9obZ5ja/VrVy1HCQijwL/HTCB/6OU+s+zHPdJ4NvAHUqpfXUr5RLg93nYe1s/uzf3MXRpkguXpzh7YYLRiQxl28UwDbrbogz0tNDVFqW/K1HT3peGGAyEOhkIdS5CLRonFPSxd3s/P9t/iq722DUDdrl8CQXcsa0fx3U5NTrBu+cucmYsyXg2h+06nEyN897wKBGPl1gwQDzkZypXZHSq0t3jsyzCPu+Nl7x2mYlsjrZUkP/yg1d5Ytut3NbTuSy+EBvhTPYsjnIxZeHBJGQG6PK3EbIC+E0/76YO6+A+BxGhN9hCb7Cl2UUBQKrl3xAREzgOPAwMAW8Bn1ZKHbnuuAjwPOAFvlgtuO/Zs0ft27ei4n9D2M446dy3KNvnscweoqGnsMyOZhdrRrbj8v2fHuWdw+eBSreW4yoCPg+/+vB2JkoFXjpyknS+iNcyCfm8+D0WIkLOLvPz0WEKto1TdhjPVHLDtIQCJMKBGRd8FB2bkuOws7ubtlCIbKlEMptnx5puntyxmVAd5s8vN9+/+CMu5C8S8dT3KkYpxUQpyW+u+4zummkyEXlbKVV1hLuWd2kvcEIpdWr6xF8HngSOXHfcHwJ/BPzbeZZVm4Wr8iTT/xNXpTElTtk5x0T6T2mL/jsMY/FXEVZjmQaP33cbd+4Y4MTZMQrFMu0tYRItIf5h/1FOjyVpDQfpSdy4OCZoebizo5dXz5/lYjqHZQpe0yJdKJItlmmPhgh4PYCi5DgUbQePabCnt5f4dCbJkNdLwOPh8MglRtMZ/sU9u4kucKB2OVFKcbFwCb9Z/37dK1dCk+XUgvvytcVRy/y+XuD8VT8PTd/3PhG5HehTSj0/14lE5PMisk9E9o1Np2jVZmfbw7juBJbRgYgXy2jHddOUnfPVn9xErfEQd+4Y4L69g/ijPv7spTe5mErTm4hOB+iZZQpFsGFNJELI46XsuiiBsnI4NzHJhdQU6WIJj2GytbOTD/f3vx/YrzBE6IpFGM/k+OrP3qFoV08lu1I4yqHgFLCkcS3rrN34PORafSz4UyAiBvDHwOeqHauUehZ4FirdMgt97ZXPQN2QXkixXPK9nRlL8n9/vI9IwEdojs03AIq2zaHRUYIeC49pEvH6KLtupZtGubhKUSzb9IYibO7uqNqn3hENMzI5xQ/eO8HHtt5az2otWe70Z6WR4w03fh61paqWKDEMXD3HbM30fVdEgK3Aj0XkDHAX8JyI6FUPC+Sx+vCYayk7wzjuFLYzgmX24LGWxoYFc5nKF/jrn75D2O+tGtgBjo1dxlUKz1XTIT2GQcTrJe7z0+IP0BkKc2EiTTJbW9bOzmiYn/7yDGcnJm+6HsuJJWYl22ADt4r0NPCqQKuvWoL7W8CgiKwTES/wNPDclQeVUimlVJtSakApNQC8ATyx0mbLNIOIh0TkCwR992KabQR899AS+W0MaW4Cq2qUUjy//z1KjkvYX72smWKJS9kMoSoLkAxD8HssjgxdouxU33bONAz8Xg8vHztVc9mXM0MMEt4ERbcxm3oARJuQTEy7OVW/hpVStoh8EfgelamQf6GUOiwiXwb2KaWem/sM2kIYRpBo6BPNLsa8XJnqONPA6UxG0lOVrI41dCd4LZN0oci5y5Ns6Kw+sJcIBjh2aYzxbI7W0OJuIdcMPYEujk4dw2/WdyC57Np4DA/hJqQD1m5OTddYSqkXgBeuu+9Lsxx7/8KLpS1nrx8/i9/rqSlYK6UYmUoTsGq/3A96vQyNpxhoS1TNrnklxe+J0XFa16384L4+vI53U9dPZFu4tJ1mW2zrql0/sBwtj5E5bdlIZvMcGxkjEaotm2LBtrFdd16J2UyjshXi5XRt2+j5PBanxydqPv9y1unrIO6JkavjrBZXubjKZVNkY93OqTWeDu5aXQ1NpABq3hQjV66ka5gv0zS4nKktgIW83lUzqCoi3NN6Jxk7W7c9OidKk2yObiJeh5QG2uLRQ99aXQ2Np+bVCq9sRj7/S32vaZLK1TZrxjINUvnSvF9juVoT7GVz9FaOpY/T6m1ZUFdKxs4StALc0XLt5LepZJYjb5/hvQNnKRXLtLRF2fmhQQY2dc8rtbDWODq4a3V1fiJVU9bMD9xc4DENg0yhOGuismuom32V5UlEuLttL5PlSUYLY7R4EzcV4LN2Dtu1eaL3n1yz6vXIO2f43jfeqOxNGwtgWQajIxP8v6++SltXjE88cx/RhB54bTbdLaPVVbFcxpxHIPHcxHaDMJ1iWCppaaspOQ6RGqZkriRew8ujXQ/TE+hhrDhO2S3X/FylFOPFCRSKj/c+fk26gTPHLvD8X79OrCVER0+cQMiH1+chmgjRuSZBaiLD3//vH1Mq1v56WmPo4K7VlcyzjRycntveyIU3uVKJ/pbV11/sM3081v0wH267m7Sd4XJxgpI7e/eUoxySpUkul8bZEFnPU33/lHZf2/uPK6V45fkDROIBvLNcnbV0RBkfneLEoaG610ebH90to9VVNOgnlS9Q66RDr1nZcMN23WtWp1bjKoUgNfXvl2yHda2Jms+9khhisDW+hXXhfn6ZPsnB1CGmyhmE6aseqfwer/wbjGxgc/RW2n1tN3TljI1MMjaSpKN37t9lKOrn7VeOsWV34zeB1mang7tWV+vaExwfGSMerG0qpIiwJhrl5ERyXsHddlzCfu+sGz1f4bguIsJgR9ucx610ISvEzsR2tse3krbTJEspCk4BhcJjeIh7YsQ8UTzG7OMl6VQOwzSq9t8HQz7GL6XqXQVtnnRwX+Vsx+Xs0DjvnbrE+ZEkqXQeAWLRAGt7W7h1fSdre1qqD1pO645H5j162RWJcHIiiatUzVMoy45DZ6x6zvLxTI6da7pXXZ/7bAwxiHlixDyxeT/XNA1q6T1zXYVp6R7fZtPBfZVSSvHL06O8+JPDpLOVzTOCAS+JaKXFXSzbHDw6xNsHzxGPBXj8ga2s76ve+l3bFsdjmZRsB28NO1EBBDwe+mIxzqcmifiqB2GlwHEV7dG5g3vJdnCV4v5bdPdAPXT0JjBEcGx3zuA9lcyyaYfesanZ9NfrKmTbDs+/fIhv/OPbGCJ0t0dpTYQI+D2YpoFpGgT9XtoSYbo7ojiOy9985+d879WjOM7cC2O8lsXdG9cykZ3fCskNLS34LYtCDfnXy45DyOclFpj9i0ApxWg6w2O33UKH3lu1LoJhP5t3DzAxOjXrMY7tUirabL97cBFLps1EB/dVxnFcnvvRu+w/fJ6ujijBQPV0vOGgj672CG/uP82LPzmM6859bX7Hhj4EmddGGZZpsK2rC9t1Kc2R8VEpKJRt1ne0TM+HnOkYxcjkFLd1d3D3et2CrKcPP7aDaEuIsZFJXPfaL/pioczoSJI7H7qNrr6lsY/oaqaD+yrzzqFzHDo2Qnd7tOb+bQDDMOhuj/LOoXO8e2wYpUqoWZa3J0IBHtt5C2NTmXlNcYz5/dze00PZccmV7RmfmyuVaI+E6IjOvEim7DgMTU6xubuDp/dsn9dqWa26UMTP07/9EBu39TE2kuLS0ETl/3CSYr7EQ5+8gw8/ul0nGFsCdJ/7KjI5leOHr79He0to3n98ClCkCYeH+M733yYUSRIKKDxmL0H/r+D33YYhH6SZ3buhj6PDY5wem6ArFqn5dRKBAHvXrOHw6CWmikWCHg/WdIAulm1MMdjU035Dq10pxUQuT7Fs8+iWQT6ycWBes2+02oWiAT7+mQ+R/vhOhk9fxrYdQhE/fRs6deqBJUQH91XkF0eHQIHXM7+3XWFTKB3Cdi5iGCaOE+D02V523qZwVJpU9q9I54IkIs/g9WwAKukBnr5nB1/9yT4uTKbpjIZr/kIJ+7zcsWYNQ6kUp5NJcuVyJc2AGNyxfg2+6fLbrkuuVCJXLOMoxWBHK49uuYXeuN5QYjFE4iFu3aXTDCxVOrivEq6r2HfwHPFobfPPr1C4FIoHsN1xDIkiIkQjcOAw7NoqmBLFNKI4bprxqa/QGv037wf4oNfD5+7bwzd/9guOXbhMeyT0fmCuxhBhbTxOVzjMycsT5B2bvrYYmXKJbKqyytIyDPoScTasa2FbbxftYR1oNO0KHdxXiVQ6T6Fkzzu4l+wz2O7l9wM7gM8LY+OQy8OVtUqmUel6Sab/nPbEl97vogl6PfzGR3az79QQzx94DzenSAQDVYO847pMZHKUHJdHbhvk4W2DBLweSo5D2XEwRPBZ1rzGDTRtNdHBfZW4sjhpPhQu5fIZDLm2j14EDAOmMh8Ed6gE+LI9RKF4mKB/9/v3G4awd2Mfm3raeef0MK8dP/v+VEmfZeGxTIRKN0uhVEYhiMDO/h7u3NjHmpYPFtx4TROv7kvXtKp0cF8lqs1Pn/k5l1GUMGTm1r47wykNI0qu8MNrgvsVsaCfB27bwIc3DTCcnOLSZJozl5Ok80VcBX6vRX9rnO5ElN5ElJC/+jRNbX7yTo6SWyRgBvEaetXuSqaD+yrh9c7/rXZUmrlmy3pmSENiSISyM4RSDiIzt7A9lslAe4KB9gR3Dup56IthsjTBgcmfMZw/h4EBIqwPbWJHfC9+c35ddbXKlscYzu3jcvE4gtDu30xP8HaCVvWNzbWF08F9lWiJh1BKoZSqfRqkcmZM4euqyv/4DDMcZTrRusJG0N0nS0GqPMH3L34HhSLhqWR7dJTDicxRxooXebjzSXymv/qJ5mG8cJLDk99CEHxmDBSM5N7mQm4/21ueJubVX+qNpld4rBJBv4fWRIjcPLabE/GhuLHvJZuD7o6ZW+5K2SAmgu5SWSoOJH+Oi0vUE3//i90UkxZvG5OlcU5nj9X19Wy3yNHUd/AaEYJWO6Z4MQ0vIasDywhwZPLvcVXtq5e1m6OD+yohIty1az1TmULNz7HMVkDdsFI0l4ddt838HNu9TNB3h16huEQUnDzD+TNErJmzQIatKO+l363ra44XT+CoIh7jxu4erxGi5OZIls7U9TW1G+ngvops3tBJNBIgky3WdLwhYUyjBaU++EKYykBLHAb6bjxeKQWqTND3oXoVWVugslsCBENm/lP3GB6KTu1f+LXI2xNzdskppSjYOt97o+ngvor4fB6efHg7U5kC5fLsybmueY5nEEUZpWxKZSgU4KP3wvXT1JVS2M4wPu8OLHNNA0qv3Qy/GcAQA2eWbpCCkyfuqW+SL48RnLE77woBPEZ9+/i1G+ngvsr097by+AO3MTqRplDDJsamkcDv3U4un2ciWeSR+xSd7dce46oitnMer7WReOjTuktmCfEYXgbDW0iVkzc8ppQi52S5Nbqjrq/Z6h9ElMzYr+6oMoZYJHw6x36j1RTcReRRETkmIidE5PdmePx3ReSIiBwUkR+JSH/9i6rVy+5t/Tz1+G5y+RKj42nsOebA27bDRDKMKdv5xGMhNg4MU7aHsZ2LlO0RyvZ5lJshHHiMlujnMWboZ9Waa2tsN63eDi4XRyk4eRxlk7UzjJdG2RjeTF+wvoHWb0YZiNxH1r5Eyc0ClS+SkpMhZ4+yIfowHqPWXXa1myXVUrJKZbLyceBhYAh4C/i0UurIVcc8ALyplMqJyG8B9yul/vlc592zZ4/at2/fQsuvLcBUpsBr+05y4MgQrutiGIJnevekUtlBKYVpGty+dS0f2r2eUNBH2b5AsfwerjuFiBeP1YvPcysienbMUlZ2S5zJ/pJj6UPknSwxTwubo9vpDQzM2h+/EEopRguHOZt5lbw9CaIIWe30hz5Ce+DWur/eaiIibyul9lQ9robgfjfwB0qpj07//O8BlFL/aZbjdwF/qpSac1RNB/elI5cvMXxpkotjUyRTWUBojYfoao/S2xXH75t902RNm4tSLkU3gyB4jdozg2qzqzW417KIqRc4f9XPQ8Cdcxz/DPBiDefVlohgwMvgQAeDAx3NLoq2wogY+E2dgrkZ6rpCVUR+HdgD3DfL458HPg+wdq1eoaZpK51SioMXL2K7Lrt6enQWz0VUS3AfBq6e1bxm+r5riMhDwO8D9ymlZpxIrZR6FngWKt0y8y6tpmnLyrlUiq8dOIDrKsI+H5va2ppdpFWjluD+FjAoIuuoBPWngV+7+oDpfvY/Ax5VSo3WvZTaqpUspTg89UtOZc4BsD60lttigyS8M6+41JaWqM9HxOvDdl3ifj23fTFVHVAFEJHHgf8GmMBfKKX+o4h8GdinlHpORH4IbAMuTD/lnFLqibnOqQdUtWpOZ87z4sWfoFBEzMouSxknh0LxaNd9bAjXr2vPth2+++IB+noS3Ll7fd3Oq0GuXMZVirBXz6iqh3oOqKKUegF44br7vnTV7YfmXUJNm0O6nOXFi68QMgP4zQ/yjvtML0WnxPcuvsJn+n+ViCdcl9ezHZez58cxTb2ur96CM2WYW+ZcVcJ20yhVRsSDZUQwlth0YJ3yV1uSjqVP4SrnmsB+hc/0knayHEufYk/L9rq8nt/n4YvPPIBp6TTF2uxKzjipwj6S+ddRVFZ4KxSCRcJ/FzH/HfispTHrTAd3bUkayl8gOEeO8YDh51zuQt2CO1Ry72jaTJSyuZR9nsn864CB12y9pqXuqjLJwmtM5F8h6rudrsgnmt6S18FdW5IMMXCZfTxI4TZkZaWmXU8pm+H035IuHsJv9iAzfO4M8eAzu1BKMVV8B9tNsSb2uaYGeP3XoS1Jg+EBCs7sqYnzTpHB8MDiFUhbtUazL0wH9t4ZA/vVRASf2UOufJKL6b9fpBLOTLfcVwHbcRmZnGJ0KsNoOourFFG/j+5YhJ5ElJBvaQ0EQWXK48+sA0yVM0SvGzRNlzMEzUBdZ8to2kzKTpJk/nX8ZnfNqROuBPip4n5a7fvxWV0NLuXMdHBfwYplmzdPneenx8+QLZVRSmEZBiJSyQQpldzaO9f2cN+mAdqj9Zl5Ug8+08uTPQ/xjyM/YrQ4jkc8CFBSZcJWiCd6HpxxsFXT6mmqeABg1s3eZyMiiJhMFvbRGf5YI4pWlQ7uK9S58Um+8eZBJnI5WkNBooGZBycd1+Xg+QvsPzfCY9tu4Z6N/RjG0lgi3uqL82v9T3I2O8y53AigWBvspT/Ui8fQH12tsZRymMi/ise8uc1MPEYrk4U3aA8+jGEsfkNE/4WsQMcujPG1194h6PPSG597JadpGHREw5Rsh3848B5j6SxP7tqyZAK8x7DYGOlnY0RvEaAtLkflcFQejxG/qecb4kHhYLtTeI326k+oMz2gusKMTE7xV6/vJx4MEJultT4Tr2XSG4/yxslzvPzeqQaWUNOWB1eVERbWyBEEl+o7njWCDu4rSNlx+PZbh/BZFgHv/OdsG4bQHYvyoyMnGE7qDYy11a3S8l5YfkOFatp0SB3cV5Ajw5e4MDlFInTzW91ZpoHfsnjx4PE6lkzTlh9TglgSwnHzN/V8V5Uw8GAZzclnr4P7CqGU4ifHThOtQ+a9RCjAqbEJxqYydSiZpi1PIiaJwIcpuzduLl6LsjtB3H+3brlrCzNVKHIxlSHsX/gH6cp83rPjkws+l6YtZ1HfLqAyc2Y+lHJRyiHur5q8sWF0cF8hxqayGFC3PSp9lsXpsYm6nEvTliuPGaMlcB8FZ4Ra0qND5Sq66IwQ9+/Fay3+LJkrdHBfIbLFEm4dz+e1TMazN9fXqGkrSXvoEWK+3RSc4aoteKVcCs4wYe9mOsJzbmnRcHqeu6Zp2hxETLojT2EZMSbyPwHAY7RgGh+Mb7mqSMmZAFEkAnfTEfoYhjQ3y6gO7itEyOet62VYyXZoXcCsG01bSURMOsKPkQjcw1RxP8n8qxSc6YFWBab4aAs+RMy/66ZXtNabDu51UijZJDM5HNfFMk0S4QA+z+L9etujIVwq/X316Hcv2jbr2pfGh1TTlgqPGaM1eD8tgY/guFlcShh4MY0gIksrnC6t0iwz6XyRd89e4OfHzzOeziFUVqQh4LqK9liIvbesZevaTsKBxuaWiPp9dMXCZAolIgt8rSsDR/2tN7fsWtNWOhETy2zO/PVa6eB+E2zH5c1j5/jBgeM4riIe8tMdj1zTYlZKkSuWeWHfUb73zjEeuf0W9g72YRqNGcMWEe7btI6vv3lwwcE9mc2zvr1lSWWJ1DRtfnRwn6dMvsjfvXKAs6NJOmJhPLPsuSkihPxeQn4vJdvmH39+lKPnR3n6IzsJNmg7ty29nXTHoySz+ZtepWo7LgXb5rHtm+pcOk3TFpOeCjkPuWKZr730NsMTU/S0RGcN7NfzWha9LVHOjib565ffplCyG1I+j2nyqTu2UrRtcqX5JytyXcWF1BQPbtlIb2JpX3JqmjY3HdxrpJTihX1HuTiZoTMWnvegpYjQGQtz/vIkP9jfuLwtPfEon7lnF6lcnlS+UPPzSrbD8OQUd21YywO3rm9Y+TRNWxw6uNfo+PAY+0+N0Bm/+X5oEaEzHuGN4+c4falxqz83dbfzrx+4E59pMjyZolCevRXvuC6jUxnGszk+vvPWJZXLXdO0m6f73GuglOKHB08QDfgwFjjN0DQMwj4vLx08wTMP761TCW+0tjXO7zx8z/vb7CWzeRTMuM3erv4e7r1laW2zp2nawujgXoMLyTQXJ9J0JyJ1OV8s5OfMpSRjqQztscYFVJ/H4t5N67hnYz8XUlNcSmUYu7JBdsBHVyxCT3xpbpC9VEwUs0yVCrT4QkS9C8+4qWmLRQf3GgyNTVZauXVKylU5j2J4fKqhwf0KyzToa4nT16Lnrc/HG6Nn+O65gwiCJQa/sfEOBmMdzS6WptWkpj53EXlURI6JyAkR+b0ZHveJyDemH39TRAbqXdBmOjOaxF/n1aZey+Ls2M3lidYaL1nM8d1zB2nzh+kORgl6vPzNqX04qp7p2TStcaoGdxExga8AjwFbgE+LyJbrDnsGSCqlNgJ/AvxRvQvaTMlsHq9V7+BukszorItLVbpcxEDwGpXpriHLS8GxydvN2Q9T0+arlpb7XuCEUuqUUqoEfB148rpjngT+cvr2t4EHpV59GJrWBC2+IB7DJF2uTCe9XMjS5gsRtPT4hLY81BLce4HzV/08NH3fjMcopWwgBbTWo4BLQSzop2zPbyeWakq2QzykB+iWqrDHx28O3olSiuHcJDGvn88O3rng2VKatlgWdUBVRD4PfB5g7dq1i/nSCzLQmeDI+dG6nrNkO6xtT9T1nFp9DURa+Q87PkrRsfGbVt0G1DVtMdTSch8G+q76ec30fTMeI5W8lzFg/PoTKaWeVUrtUUrtaW9v3vZT87WmLY5SquZttqqpnEfR2xqry/m0xjFECFgeHdi1ZaeW4P4WMCgi60TECzwNPHfdMc8Bn52+/SngJVWvSLgE9LZE6YxX0unWQypXYG17go5YqC7n0zRNu17V4D7dh/5F4HvAUeCbSqnDIvJlEbmySeCfA60icgL4XeCG6ZLLmYjw4I6NpLJ53AV+Z7muIpMv8sD2Dbo1qGlaw9TU566UegF44br7vnTV7QLwVH2LtrTcuqaD7et6OHLuEl0LWKl6aTLNnsE+NnStmPFmTdOWIJ04rEYiwsfu2ExbNMhoKjPv5yulGE1l6GqJ8NHbN+lWu6ZpDaWD+zyE/F4+++AeOuJhhsdTlJ3apkeWbYeR6Rzwn/2VPQ3brEPTNO0KnVtmnqJBP//ykb28duQMLx08CUAi5MfnsW7YZq9QtpnM5BFDeHT3Ju7a1I9l6u9TTdMaTwf3m+AxTe7ftoGd63s4ePoCbx4/T3IyXQnuivc3yE6EAzxy+y1sH+gmGtQLljRNWzw6uC9APBTg3q3ruXfrerKFEslMDttVWKZBSziou180TWsaHdzr5Mpm2JqmaUuB7gDWNE1bgXRw1zRNW4F0cNc0TVuBdHDXNE1bgXRw1zRNW4F0cNc0TVuBpFmZeUVkDDi7wNO0AZfrUJzlYjXVdzXVFVZXfVdTXaH+9e1XSlXdEKNpwb0eRGSfUmpPs8uxWFZTfVdTXWF11Xc11RWaV1/dLaNpmrYC6eCuaZq2Ai334P5sswuwyFZTfVdTXWF11Xc11RWaVN9l3eeuaZqmzWy5t9w1TdO0GSyL4C4ij4rIMRE5ISI3bL4tIj4R+cb042+KyMDil7I+aqjr74rIERE5KCI/EpH+ZpSzXqrV96rjPikiSkSW7SyLWuoqIv9s+v09LCJ/u9hlrKcaPstrReRlEdk//Xl+vBnlrAcR+QsRGRWRQ7M8LiLyP6Z/FwdF5PaGF0optaT/AyZwElgPeIFfAFuuO+a3gf81fftp4BvNLncD6/oAEJy+/VvLta611nf6uAjwCvAGsKfZ5W7gezsI7AcS0z93NLvcDa7vs8BvTd/eApxpdrkXUN97gduBQ7M8/jjwIiDAXcCbjS7Tcmi57wVOKKVOKaVKwNeBJ6875kngL6dvfxt4UJbnDtRV66qUelkplZv+8Q1gzSKXsZ5qeW8B/hD4I6CwmIWrs1rq+q+AryilkgBKqdFFLmM91VJfBUSnb8eAkUUsX10ppV4BJuY45Enga6riDSAuIt2NLNNyCO69wPmrfh6avm/GY5RSNpACWheldPVVS12v9gyV1sByVbW+05evfUqp5xezYA1Qy3t7C3CLiLwmIm+IyKOLVrr6q6W+fwD8uogMAS8Av7M4RWuK+f5tL5jeiWmZEpFfB/YA9zW7LI0iIgbwx8DnmlyUxWJR6Zq5n8oV2Ssisk0pNdnUUjXOp4GvKqX+q4jcDfyViGxVSrnNLthKsBxa7sNA31U/r5m+b8ZjRMSicok3viilq69a6oqIRT9VcQAAAX9JREFUPAT8PvCEUqq4SGVrhGr1jQBbgR+LyBkqfZXPLdNB1Vre2yHgOaVUWSl1GjhOJdgvR7XU9xngmwBKqZ8Bfip5WFaimv6262k5BPe3gEERWSciXioDps9dd8xzwGenb38KeElNj2IsM1XrKiK7gD+jEtiXc58sVKmvUiqllGpTSg0opQaojDE8oZTa15ziLkgtn+PvUmm1IyJtVLppTi1mIeuolvqeAx4EEJHNVIL72KKWcvE8B/zG9KyZu4CUUupCQ1+x2aPMNY5EP06lFXMS+P3p+75M5Q8dKh+KbwEngJ8D65td5gbW9YfAJeDA9P/nml3mRtb3umN/zDKdLVPjeytUuqGOAO8CTze7zA2u7xbgNSozaQ4AjzS7zAuo698BF4AylSuwZ4AvAF+46r39yvTv4t3F+BzrFaqapmkr0HLoltE0TdPmSQd3TdO0FUgHd03TtBVIB3dN07QVSAd3TdO0FUgHd03TtBVIB3dN07QVSAd3TdO0Fej/AyxQ3Xk54htLAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "[]" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYYAAAD8CAYAAABzTgP2AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAGANJREFUeJzt3X+Q3HV9x/HnK0mBnkwlITcR82MvSvxBtQXZopUZRyFAtB1CW8Tg0UaLc2Mran/YGnoz0kFvBttOoe1YxysiUTIEm+qQWizys/5RoFwkAwEKScNuSAxwEqC1V4Ph3v1jvxd3j93cj+/+/H5fj5md2+/n+/nevhdy+97v56ciAjMzsykLOh2AmZl1FycGMzOr4cRgZmY1nBjMzKyGE4OZmdVwYjAzsxpODGZmVsOJwczMajgxmJlZjUWdDmA+li5dGgMDA50Ow8ysp+zYseNHEdE/U72eTAwDAwOMjY11Ogwzs54iqTybem5KMjOzGk4MZmZWw4nBzMxqODGYmVmNpiQGSTdIek7SrgbnJelvJe2R9LCkd1Sd2yhpd/LY2Ix4zMxs/pp1x3AjsO4Y598PrEkeQ8CXASQtAa4C3gmcBVwlaXGTYjKzDNmyZQsDAwMsWLCAgYEBtmzZ0umQMqspiSEivg8cOkaV9cDXo+J+4CRJpwAXAHdExKGIeAG4g2MnGKviPxTLiy1btjA0NES5XCYiKJfLDA0N+d98i7Srj2E58HTV8f6krFF5bs32w95/KJYnw8PDTExM1JRNTEwwPDzcoYiyrWc6nyUNSRqTNDY+Pt7pcFpiLh/2/kOxrJjNl6F9+/bVvbZRuaXTrsRwAFhZdbwiKWtU/ioRMRoRxYgo9vfPOKO7J83lw95/KJYFs/0ytGrVqrrXNyq3dNqVGLYDv5OMTnoX8FJEHARuB86XtDjpdD4/KculuXzY+w/FsmC2X4ZGRkbo6+urKevr62NkZKTlMeZRs4ar3gzcB7xZ0n5Jl0v6uKSPJ1VuA/YCe4B/AH4fICIOAZ8HHkweVydluTSXD3v/oVgWzPbL0ODgIKOjoxQKBSRRKBQYHR1lcHCwHWHmT0T03OPMM8+MLLrpppuir68vgKOPvr6+uOmmmxrWLxQKISkKhULDembdqlAo1Px7n3oUCoVOh5ZJwFjM4jO2Zzqf82Cu34oGBwcplUpMTk5SKpX87cl6ju98u5MTQ5fxh73lSauaiDzHJx1V7i56S7FYDO/HYGb1TI10qu7U7uvrc58EIGlHRBRnquc7BjPLFM/xSc+JIQd8W2154jk+6TkxZJyXzrC88Ryf9JwY2qRT39p9W21545FO6TkxtEEnv7X7ttryxpPh0vOopDYYGBigXC6/qrxQKFAqlTL72mbWXTwqqYt08lu7b6vNbK6cGNqgk51hvq22TvBIuN7mpqQ28IQbyxP/e+9ebkrqIv7WbnnikXC9z3cMZtZUCxYsoN7niiQmJyc7EJFN8R2DmXWEJ5j1PicGM2sqj4Trfc3awW2dpCck7ZG0qc75ayXtTB5PSnqx6twrVee2NyMeM+sc96n1vtR9DJIWAk8C5wH7qWzReWlEPNag/ieBMyLid5PjH0fEiXN5TfcxmJnNXTv7GM4C9kTE3oh4GdgKrD9G/UuBm5vwumZm1gLNSAzLgaerjvcnZa8iqQCsBu6uKj5B0pik+yVd1IR4zMwshXZ3Pm8AtkXEK1VlheTW5sPAdZLeWO9CSUNJAhkbHx9vR6xmlnGeoV1fMxLDAWBl1fGKpKyeDUxrRoqIA8nPvcC9wBn1LoyI0YgoRkSxv78/bcxmlnPeq6SxZiSGB4E1klZLOo7Kh/+rRhdJeguwGLivqmyxpOOT50uBs4G6ndZmZs3kGdqNLUr7CyLiiKQrgNuBhcANEfGopKuBsYiYShIbgK1ROwzqrcBXJE1SSVLXNBrNZGbWTN6rpLHUiQEgIm4DbptW9rlpx39e57p/B97ejBjMzOZi1apVdfcq8Qxtz3y2adwZZ3nhGdqNOTHYUe6MszzxDO3GvLqqHeVtQM2yzaurtkHWml3cGWdm4MQwb1lsdvFyyWYGTgzzlsUx0O6MMzNwYpi3LDa7uDPOzMCdz/Pmjloz6zXufG4xN7uYWVY5McyTm10sj7I2Es/qc1OSmc3K1Ei86kEXfX19/kLUQ9yUZGZNlcWReFafE4OZzUoWR+JZfU4MZjYrngCZH04MZjYrHomXH04MZjYrHomXH01JDJLWSXpC0h5Jm+qc/4ikcUk7k8fHqs5tlLQ7eWxsRjxm1hqDg4OUSiUmJycplUpOChmVegc3SQuBLwHnAfuBByVtr7NF5y0RccW0a5cAVwFFIIAdybUvpI3LzMzmpxl3DGcBeyJib0S8DGwF1s/y2guAOyLiUJIM7gDWNSEmMzObp2YkhuXA01XH+5Oy6X5L0sOStklaOcdrzcysTdrV+fzPwEBE/BKVu4LNc/0FkoYkjUkaGx8fb3qAZmZW0YzEcABYWXW8Iik7KiKej4jDyeH1wJmzvbbqd4xGRDEiiv39/U0I28zM6mlGYngQWCNptaTjgA3A9uoKkk6pOrwQeDx5fjtwvqTFkhYD5ydlZmZdJU8LCKYelRQRRyRdQeUDfSFwQ0Q8KulqYCwitgOfknQhcAQ4BHwkufaQpM9TSS4AV0fEobQxmZk10/QFBKe28gUyOWTXq6uamc0gKxtzeXVVM7MmydsCgk4MZmYzyNsCgk4MZmYzyNsCgk4MZmYzyNsCgk4MNi95GrpnBvlaQDD1cFXLn7wN3TPLG98x2Jx571+zbHNisDnL29A9s7xxYqjD7efHlrehe2Z548QwzVT7eblcJiKOtp87OfxM3obumeWNE8M0bj+fWd6G7pnljddKmmbBggXU+28iicnJyZa8pplZO3itpHly+7mZ5Z0TwzRuPzezvHNimMbt52aWd04MdeRp6ruZh2fbdE1JDJLWSXpC0h5Jm+qc/yNJj0l6WNJdkgpV516RtDN5bJ9+rZm1jodnWz2pRyVJWgg8CZwH7KeyTeelEfFYVZ33AQ9ExISk3wPeGxEfSs79OCJOnMtregc3s+bIys5kNjvtHJV0FrAnIvZGxMvAVmB9dYWIuCcipiYH3A+saMLrmllKXt7E6mlGYlgOPF11vD8pa+Ry4LtVxydIGpN0v6SLGl0kaSipNzY+Pp4uYjMDPDzb6mtr57Oky4Ai8JdVxYXk1ubDwHWS3ljv2ogYjYhiRBT7+/vbEK1Z9nl4ttXTjMRwAFhZdbwiKashaS0wDFwYEYenyiPiQPJzL3AvcEYTYjKzWfDwbKunGZ3Pi6h0Pp9LJSE8CHw4Ih6tqnMGsA1YFxG7q8oXAxMRcVjSUuA+YH11x3U97nw2M5u72XY+p97BLSKOSLoCuB1YCNwQEY9KuhoYi4jtVJqOTgT+URLAvoi4EHgr8BVJk1TuXq6ZKSmYmVlreRE9M7Oc8CJ6ZmY2L04MZmZWw4nBzMxqODGYmVkNJwYzM6vhxGBm1kRZWMY89TwGMzOrmFrGfGKismbo1DLmQE/NJvcdg5lZkwwPDx9NClMmJiYYHh7uUETz48RgZtYkWVnG3InBzKxJsrKMuRODmVmTZGUZcycGM7Mmycoy5k4M1nJZGL5nNluDg4OUSiUmJycplUo9lxTAw1WtxbIyfM8sT3zHYC2VleF7ZnnixGAtlZXhe2Z50pTEIGmdpCck7ZG0qc754yXdkpx/QNJA1bkrk/InJF3QjHjqcTt3Z2Rl+J5ZnqRODJIWAl8C3g+cBlwq6bRp1S4HXoiIU4FrgS8m154GbAB+EVgH/H3y+5pqqp27XC4TEUfbuZ0cWi8rw/fM8qQZdwxnAXsiYm9EvAxsBdZPq7Me2Jw83wacq8rmz+uBrRFxOCKeAvYkv6+p3M7dOVkZvmeWJ80YlbQceLrqeD/wzkZ1IuKIpJeAk5Py+6ddu7zei0gaAoZg7s0QbufurMHBQScCsx7SM53PETEaEcWIKPb398/pWrdzm5nNXjMSwwFgZdXxiqSsbh1Ji4DXAs/P8trU3M5tZjZ7zUgMDwJrJK2WdByVzuTt0+psBzYmzy8G7o6ISMo3JKOWVgNrgP9oQkw13M5tZjZ7qfsYkj6DK4DbgYXADRHxqKSrgbGI2A58FfiGpD3AISrJg6TeN4HHgCPAJyLilbQx1eN2bjOz2WlKH0NE3BYRb4qIN0bESFL2uSQpEBE/iYgPRsSpEXFWROytunYkue7NEfHdZsRjZp67Y/PntZLMMshrVFkaPTMqycxmz3N3LA0nBrMM8twdS8OJwSyDPHfH0nBiMMsgz92xNJwYzDLIc3csDVXmmfWWYrEYY2NjnQ7DzKynSNoREcWZ6vmOwczMajgxmJlZDScGMzOr4cRgZmY1nBjMzKyGE4OZmdVwYjAzsxpODGZmHdKtS6OnSgySlki6Q9Lu5OfiOnVOl3SfpEclPSzpQ1XnbpT0lKSdyeP0NPGYmfWKqaXRy+UyEXF0afRuSA6pZj5L+gvgUERcI2kTsDgiPjutzpuAiIjdkl4P7ADeGhEvSroR+E5EbJvL63rms5n1uoGBAcrl8qvKC4UCpVKpJa/ZrpnP64HNyfPNwEXTK0TEkxGxO3n+Q+A5oD/l65qZ9bRuXho9bWJYFhEHk+fPAMuOVVnSWcBxwH9VFY8kTUzXSjo+ZTxmZj2hm5dGnzExSLpT0q46j/XV9aLSJtWwXUrSKcA3gI9GxGRSfCXwFuBXgCXAZxtcjqQhSWOSxsbHx2d+Z2ZmXaybl0afcc/niFjb6JykZyWdEhEHkw/+5xrU+wXgX4DhiLi/6ndP3W0clvQ14DPHiGMUGIVKH8NMcZuZdbOpJdCHh4fZt28fq1atYmRkpCuWRk/blLQd2Jg83wjcOr2CpOOAbwNfn97JnCQTJIlK/8SulPFYD+vWoXtmrTI4OEipVGJycpJSqdQVSQHSJ4ZrgPMk7QbWJsdIKkq6PqlzCfAe4CN1hqVukfQI8AiwFPhCynisR3Xz0D2zvPFGPdYVOjF0zyxvvFGP9ZRuHrpnljdODNYVunnonlneODFYV+jmoXtmeePEYF1hcHCQ0dFRCoUCkigUCoyOjnbNKA2zPHHns5lZTrjz2czM5sWJwczMajgxmJlZDScGsx7iZUOsHWZcRM/MusPUsiETExMAR5cNATx6y5rKdwxmPWJ4ePhoUpgyMTHB8PBwhyKyrHJiMOsRXjbE2sWJwaxHeNkQaxcnBrMe4WVDrF2cGMx6hJcNsXbxkhhmZjnRliUxJC2RdIek3cnPxQ3qvVK1e9v2qvLVkh6QtEfSLck2oGZm1kFpm5I2AXdFxBrgruS4nv+LiNOTx4VV5V8Ero2IU4EXgMtTxmNmZimlTQzrgc3J883ARbO9UJKAc4Bt87nezMxaI21iWBYRB5PnzwDLGtQ7QdKYpPslTX34nwy8GBFHkuP9wPKU8ZiZWUozLokh6U7gdXVO1Uy3jIiQ1KgnuxARByS9Abhb0iPAS3MJVNIQMAQet21m1koz3jFExNqIeFudx63As5JOAUh+PtfgdxxIfu4F7gXOAJ4HTpI0lZxWAAeOEcdoRBQjotjf3z+Ht2hm1tvavXhi2qak7cDG5PlG4NbpFSQtlnR88nwpcDbwWFTGyd4DXHys683M8mxq8cRyuUxEHF08sZXJIdU8BkknA98EVgFl4JKIOCSpCHw8Ij4m6d3AV4BJKonouoj4anL9G4CtwBLgIeCyiDg80+t6HoOZ5cXAwADlcvlV5YVCgVKpNKffNdt5DJ7gZmbWxRYsWEC9z2lJTE5Ozul3ec9nM7MM6MTiiU4MZmZdrBOLJzoxWM/x9paWJ51YPNF9DNZTpm9vCZVvT15l1Gxm7mOwTPL2lmat58RgPcXbW5q1nhOD9RRvb2nWek4M1lO8vaVZ6zkxWE/x9pZmredRSWZmOeFRSWZmNi9ODGZdwJP2rJvMuFGPmbXW9El7U8sqA+47sY7wHYNZh3nSnnUbJwazDvOkPes2TgxmHeZJe9ZtUiUGSUsk3SFpd/JzcZ0675O0s+rxE0kXJedulPRU1bnT08Rj1os8ac+6Tdo7hk3AXRGxBrgrOa4REfdExOkRcTpwDjABfK+qyp9MnY+InSnjMes5nrRn3SbtqKT1wHuT55uBe4HPHqP+xcB3I2LiGHXMcmdwcNCJwLpG2juGZRFxMHn+DLBshvobgJunlY1IeljStZKOTxmPmZmlNOMdg6Q7gdfVOVUzli4iQlLD9TUknQK8Hbi9qvhKKgnlOGCUyt3G1Q2uHwKGwJ1yZmatNGNiiIi1jc5JelbSKRFxMPngf+4Yv+oS4NsR8dOq3z11t3FY0teAzxwjjlEqyYNisdh7CzyZmfWItE1J24GNyfONwK3HqHsp05qRkmSCJAEXAbtSxmNmZimlTQzXAOdJ2g2sTY6RVJR0/VQlSQPASuDfpl2/RdIjwCPAUuALKeMxM7OUUo1KiojngXPrlI8BH6s6LgHL69Q7J83rm5lZ83nms5mZ1XBisEzzctZmc+dlty2zvJy12fz4jsEyy8tZm82PE4NllpezNpsfJwbLLC9nbTY/TgyWWZ1eztod39arnBgsszq5nPVUx3e5XCYijnZ8OzlYL1BE7y07VCwWY2xsrNNhmDU0MDBAuVx+VXmhUKBUKrU/IDNA0o6IKM5Uz3cMZi3gjm/rZU4MZi3gjm/rZU4MZi3Q6Y5vszScGMxawPs4Wy9z57OZWU6489lsDjznwOxnvIie5Z4X2zOrleqOQdIHJT0qaVJSw9sTSeskPSFpj6RNVeWrJT2QlN8i6bg08ZjNx1wX2/PdhWVd2qakXcBvAt9vVEHSQuBLwPuB04BLJZ2WnP4icG1EnAq8AFyeMh6zOZvLnAPPaLY8SJUYIuLxiHhihmpnAXsiYm9EvAxsBdZLEnAOsC2ptxm4KE08ZvMxlzkHXsrb8qAdnc/LgaerjvcnZScDL0bEkWnlZm01lzkHntFseTBjYpB0p6RddR7r2xFgVRxDksYkjY2Pj7fzpS3j5jLnwDOaLQ9mHJUUEWtTvsYBYGXV8Yqk7HngJEmLkruGqfJGcYwCo1CZx5AyJrMag4ODsxqBNDIyUjOCCTyj2bKnHU1JDwJrkhFIxwEbgO1RmVl3D3BxUm8jcGsb4jGbN89otjxINfNZ0m8Afwf0Ay8COyPiAkmvB66PiA8k9T4AXAcsBG6IiJGk/A1UOqOXAA8Bl0XE4Zle1zOfzczmbrYzn70khplZTnhJDDMzmxcnBjMzq+HEYGZmNZwYzMysRk92PksaB1690/rsLAV+1MRweoHfcz74PWdf2vdbiIj+mSr1ZGJIQ9LYbHrls8TvOR/8nrOvXe/XTUlmZlbDicHMzGrkMTGMdjqADvB7zge/5+xry/vNXR+DmZkdWx7vGMzM7BhylRga7T2dRZJWSrpH0mPJvtyf7nRM7SJpoaSHJH2n07G0g6STJG2T9J+SHpf0q52OqdUk/WHy73qXpJslndDpmJpN0g2SnpO0q6psiaQ7JO1Ofi5uxWvnJjHMsPd0Fh0B/jgiTgPeBXwi4++32qeBxzsdRBv9DfCvEfEW4JfJ+HuXtBz4FFCMiLdRWbV5Q2ejaokbgXXTyjYBd0XEGuCu5LjpcpMYaLD3dIdjapmIOBgRP0ie/w+VD4vMb50qaQXwa8D1nY6lHSS9FngP8FWAiHg5Il7sbFRtsQj4eUmLgD7ghx2Op+ki4vvAoWnF64HNyfPNwEWteO08JYZGe09nnqQB4Azggc5G0hbXAX8KTHY6kDZZDYwDX0uaz66X9JpOB9VKEXEA+CtgH3AQeCkivtfZqNpmWUQcTJ4/AyxrxYvkKTHkkqQTgX8C/iAi/rvT8bSSpF8HnouIHZ2OpY0WAe8AvhwRZwD/S4uaF7pF0q6+nkpSfD3wGkmXdTaq9kt2wWzJsNI8JYZGe09nlqSfo5IUtkTEtzodTxucDVwoqUSlqfAcSTd1NqSW2w/sj4ipu8FtVBJFlq0FnoqI8Yj4KfAt4N0djqldnpV0CkDy87lWvEieEkPdvac7HFPLSBKVdufHI+KvOx1PO0TElRGxIiIGqPz/vTsiMv1NMiKeAZ6W9Oak6FzgsQ6G1A77gHdJ6kv+nZ9Lxjvcq2wHNibPNwK3tuJFFrXil3ajiDgi6Qrgdn629/SjHQ6rlc4Gfht4RNLOpOzPIuK2DsZkrfFJYEvyhWcv8NEOx9NSEfGApG3AD6iMvnuIDM6AlnQz8F5gqaT9wFXANcA3JV1OZYXpS1ry2p75bGZm1fLUlGRmZrPgxGBmZjWcGMzMrIYTg5mZ1XBiMDOzGk4MZmZWw4nBzMxqODGYmVmN/weyH1vd8khynAAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "N = 50\n", + "x = np.random.rand(N)\n", + "y = np.random.rand(N)\n", + "colors = np.random.rand(N)\n", + "area = (30 * np.random.rand(N))**2 # 0 to 15 point radii\n", + "plt.scatter(x, y, s=area, c=colors, alpha=0.5)\n", + "plt.show()\n", + "\n", + "x = np.linspace(0, 10, 30)\n", + "y = np.sin(x)\n", + "plt.plot(x, y, 'o', color='black')" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPgAAAD8CAYAAABaQGkdAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAACkBJREFUeJzt3d+rZQUZh/Hn23HKpiypLMwZGi9CkCCNYSCMIKWyEuuiC4WCIpirQikI665/IOoigpisIEvKEiIskzIqKHNmnCxnVGwwnOnHWBGakZP2dnH2wGQTZ53Za+29z8vzgYPnx2afdzM8rnX22We9qSok9fS8ZQ8gaToGLjVm4FJjBi41ZuBSYwYuNWbgUmMGLjVm4FJj50xxp6942Vrt2rltirv+Hw/fv30h30daJf/kKU7W09nodpMEvmvnNn55584p7vp/vP3Vly3k+0ir5J764aDbeYouNWbgUmMGLjVm4FJjBi41ZuBSYwYuNWbgUmODAk9ydZKHkjyS5Kaph5I0jg0DT7IGfA54B3ApcH2SS6ceTNL8hhzB9wCPVNXRqjoJ3Aq8e9qxJI1hSOAXAY+d9vGx2eckrbjRnmRLsjfJ/iT7H//Ls2PdraQ5DAn8OHD6n4btmH3uv1TVF6pqd1XtvuDla2PNJ2kOQwK/F3htkouTPB+4DvjOtGNJGsOGfw9eVc8k+TBwJ7AG3FxVD0w+maS5DbrgQ1XdAdwx8SySRuYr2aTGDFxqzMClxgxcaszApcYMXGrMwKXGDFxqbJLNJg/fv31hG0fu/P2hhXwfcIuKth6P4FJjBi41ZuBSYwYuNWbgUmMGLjVm4FJjBi41ZuBSY0M2m9yc5ESS3yxiIEnjGXIE/zJw9cRzSJrAhoFX1U+Avy5gFkkj82dwqbHR/posyV5gL8C5bB/rbiXNYbQj+Omri7bxgrHuVtIcPEWXGhvya7KvAz8HLklyLMmHph9L0hiG7Ca7fhGDSBqfp+hSYwYuNWbgUmMGLjVm4FJjBi41ZuBSYwYuNTbJ6qJFWuQ6oUWuSQJXJWl+HsGlxgxcaszApcYMXGrMwKXGDFxqzMClxgxcaszApcYMXGpsyEUXdya5O8nhJA8kuWERg0ma35DXoj8DfKyqDiY5DziQ5K6qOjzxbJLmNGQ32R+q6uDs/SeBI8BFUw8maX6b+muyJLuAy4F7zvA1VxdJK2bwk2xJXgx8C7ixqp547tddXSStnkGBJ9nGety3VNW3px1J0liGPIse4IvAkar69PQjSRrLkCP4FcD7gSuTHJq9vXPiuSSNYMhusp8BWcAskkbmK9mkxgxcaszApcYMXGrMwKXGDFxqzMClxgxcamzL7yZbpEXvClvkLjT3oPXkEVxqzMClxgxcaszApcYMXGrMwKXGDFxqzMClxgxcamzIRRfPTfLLJL+arS761CIGkzS/IS9VfRq4sqr+Prt88s+SfK+qfjHxbJLmNOSiiwX8ffbhttlbTTmUpHEMXXywluQQcAK4q6rOuLooyf4k+//F02PPKeksDAq8qp6tqsuAHcCeJK87w21cXSStmE09i15VfwPuBq6eZhxJYxryLPoFSc6fvf9C4K3Ag1MPJml+Q55FvxD4SpI11v+H8I2q+u60Y0kaw5Bn0e9nfSe4pC3GV7JJjRm41JiBS40ZuNSYgUuNGbjUmIFLjRm41Jiri1bYItcJuSapJ4/gUmMGLjVm4FJjBi41ZuBSYwYuNWbgUmMGLjVm4FJjgwOfXRv9viRej03aIjZzBL8BODLVIJLGN3SzyQ7gXcC+aceRNKahR/DPAB8H/j3hLJJGNmTxwTXAiao6sMHt3E0mrZghR/ArgGuTPArcClyZ5KvPvZG7yaTVs2HgVfWJqtpRVbuA64AfVdX7Jp9M0tz8PbjU2Kau6FJVPwZ+PMkkkkbnEVxqzMClxgxcaszApcYMXGrMwKXGDFxqzMClxlxdJMA1SV15BJcaM3CpMQOXGjNwqTEDlxozcKkxA5caM3CpMQOXGhv0SrbZFVWfBJ4Fnqmq3VMOJWkcm3mp6luq6s+TTSJpdJ6iS40NDbyAHyQ5kGTvlANJGs/QU/Q3VdXxJK8E7kryYFX95PQbzMLfC3Au20ceU9LZGHQEr6rjs/+eAG4H9pzhNq4uklbMkOWDL0py3qn3gbcBv5l6MEnzG3KK/irg9iSnbv+1qvr+pFNJGsWGgVfVUeD1C5hF0sj8NZnUmIFLjRm41JiBS40ZuNSYgUuNGbjUmIFLjbm6SAvXdU0SrN6qJI/gUmMGLjVm4FJjBi41ZuBSYwYuNWbgUmMGLjVm4FJjgwJPcn6S25I8mORIkjdOPZik+Q19qepnge9X1XuTPB+88Lm0FWwYeJKXAm8GPgBQVSeBk9OOJWkMQ07RLwYeB76U5L4k+2bXR5e04oYEfg7wBuDzVXU58BRw03NvlGRvkv1J9v+Lp0ceU9LZGBL4MeBYVd0z+/g21oP/L64uklbPhoFX1R+Bx5JcMvvUVcDhSaeSNIqhz6J/BLhl9gz6UeCD040kaSyDAq+qQ8DuiWeRNDJfySY1ZuBSYwYuNWbgUmMGLjVm4FJjBi41ZuBSYwYuNeZuMrW26F1hi9qFtuft/xh0O4/gUmMGLjVm4FJjBi41ZuBSYwYuNWbgUmMGLjVm4FJjGwae5JIkh057eyLJjYsYTtJ8NnypalU9BFwGkGQNOA7cPvFckkaw2VP0q4DfVtXvphhG0rg2+8cm1wFfP9MXkuwF9gKc6/JRaSUMPoLPlh5cC3zzTF93dZG0ejZziv4O4GBV/WmqYSSNazOBX8//OT2XtJoGBT7bB/5W4NvTjiNpTEN3kz0FvHziWSSNzFeySY0ZuNSYgUuNGbjUmIFLjRm41JiBS40ZuNRYqmr8O00eBzb7J6WvAP48+jCroetj83Etz2uq6oKNbjRJ4Gcjyf6q2r3sOabQ9bH5uFafp+hSYwYuNbZKgX9h2QNMqOtj83GtuJX5GVzS+FbpCC5pZCsReJKrkzyU5JEkNy17njEk2Znk7iSHkzyQ5IZlzzSmJGtJ7kvy3WXPMqYk5ye5LcmDSY4keeOyZ5rH0k/RZ9daf5j1K8YcA+4Frq+qw0sdbE5JLgQurKqDSc4DDgDv2eqP65QkHwV2Ay+pqmuWPc9YknwF+GlV7ZtdaHR7Vf1t2XOdrVU4gu8BHqmqo1V1ErgVePeSZ5pbVf2hqg7O3n8SOAJctNypxpFkB/AuYN+yZxlTkpcCbwa+CFBVJ7dy3LAagV8EPHbax8doEsIpSXYBlwP3LHeS0XwG+Djw72UPMrKLgceBL81+/Ng3ux7hlrUKgbeW5MXAt4Abq+qJZc8zryTXACeq6sCyZ5nAOcAbgM9X1eXAU8CWfk5oFQI/Duw87eMds89teUm2sR73LVXV5Yq0VwDXJnmU9R+nrkzy1eWONJpjwLGqOnWmdRvrwW9ZqxD4vcBrk1w8e1LjOuA7S55pbknC+s9yR6rq08ueZyxV9Ymq2lFVu1j/t/pRVb1vyWONoqr+CDyW5JLZp64CtvSTopvdTTa6qnomyYeBO4E14OaqemDJY43hCuD9wK+THJp97pNVdccSZ9LGPgLcMjvYHAU+uOR55rL0X5NJms4qnKJLmoiBS40ZuNSYgUuNGbjUmIFLjRm41JiBS439B+u8ezPSTLfpAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "m = np.eye(8, 8, dtype=np.uint8)\n", + "plt.imshow(m)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "PyCharm (trains-internal)", + "language": "python", + "name": "pycharm-40126efe" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.5.2" + }, + "pycharm": { + "stem_cell": { + "cell_type": "raw", + "metadata": { + "collapsed": false + }, + "source": [] + } + } + }, + "nbformat": 4, + "nbformat_minor": 1 +} diff --git a/examples/keras_tensorboard.py b/examples/keras_tensorboard.py new file mode 100644 index 00000000..32b89b63 --- /dev/null +++ b/examples/keras_tensorboard.py @@ -0,0 +1,113 @@ +# TRAINS - Keras with Tensorboard example code, automatic logging model and Tensorboard outputs +# +# Train a simple deep NN on the MNIST dataset. +# Gets to 98.40% test accuracy after 20 epochs +# (there is *a lot* of margin for parameter tuning). +# 2 seconds per epoch on a K520 GPU. +from __future__ import print_function + +import numpy as np +import tensorflow + +from keras.callbacks import TensorBoard, ModelCheckpoint +from keras.datasets import mnist +from keras.models import Sequential, Model +from keras.layers.core import Dense, Dropout, Activation +from keras.optimizers import SGD, Adam, RMSprop +from keras.utils import np_utils +from keras.models import load_model, save_model, model_from_json + +from trains import Task + + +class TensorBoardImage(TensorBoard): + @staticmethod + def make_image(tensor): + import tensorflow as tf + from PIL import Image + tensor = np.stack((tensor, tensor, tensor), axis=2) + height, width, channels = tensor.shape + image = Image.fromarray(tensor) + import io + output = io.BytesIO() + image.save(output, format='PNG') + image_string = output.getvalue() + output.close() + return tf.Summary.Image(height=height, + width=width, + colorspace=channels, + encoded_image_string=image_string) + + def on_epoch_end(self, epoch, logs={}): + super().on_epoch_end(epoch, logs) + import tensorflow as tf + images = self.validation_data[0] # 0 - data; 1 - labels + img = (255 * images[0].reshape(28, 28)).astype('uint8') + + image = self.make_image(img) + summary = tf.Summary(value=[tf.Summary.Value(tag='image', image=image)]) + self.writer.add_summary(summary, epoch) + + +batch_size = 128 +nb_classes = 10 +nb_epoch = 6 + +# the data, shuffled and split between train and test sets +(X_train, y_train), (X_test, y_test) = mnist.load_data() + +X_train = X_train.reshape(60000, 784) +X_test = X_test.reshape(10000, 784) +X_train = X_train.astype('float32') +X_test = X_test.astype('float32') +X_train /= 255. +X_test /= 255. +print(X_train.shape[0], 'train samples') +print(X_test.shape[0], 'test samples') + +# convert class vectors to binary class matrices +Y_train = np_utils.to_categorical(y_train, nb_classes) +Y_test = np_utils.to_categorical(y_test, nb_classes) + +model = Sequential() +model.add(Dense(512, input_shape=(784,))) +model.add(Activation('relu')) +# model.add(Dropout(0.2)) +model.add(Dense(512)) +model.add(Activation('relu')) +# model.add(Dropout(0.2)) +model.add(Dense(10)) +model.add(Activation('softmax')) + +model2 = Sequential() +model2.add(Dense(512, input_shape=(784,))) +model2.add(Activation('relu')) + +model.summary() + +model.compile(loss='categorical_crossentropy', + optimizer=RMSprop(), + metrics=['accuracy']) + +# Connecting TRAINS +task = Task.init(project_name='examples', task_name='Keras with TensorBoard example') +# setting model outputs +labels = dict(('digit_%d' % i, i) for i in range(10)) +task.set_model_label_enumeration(labels) + +board = TensorBoard(histogram_freq=1, log_dir='/tmp/histogram_example', write_images=False) +model_store = ModelCheckpoint(filepath='/tmp/histogram_example/weight.{epoch}.hdf5') + +# load previous model, if it is there +try: + model.load_weights('/tmp/histogram_example/weight.1.hdf5') +except: + pass + +history = model.fit(X_train, Y_train, + batch_size=batch_size, epochs=nb_epoch, + callbacks=[board, model_store], + verbose=1, validation_data=(X_test, Y_test)) +score = model.evaluate(X_test, Y_test, verbose=0) +print('Test score:', score[0]) +print('Test accuracy:', score[1]) diff --git a/examples/manual_model_config.py b/examples/manual_model_config.py new file mode 100644 index 00000000..7b3434a1 --- /dev/null +++ b/examples/manual_model_config.py @@ -0,0 +1,29 @@ +# TRAINS - Example of manual model configuration +# +import torch +from trains import Task + + +task = Task.init(project_name='examples', task_name='Manual model configuration') + +# create a model +model = torch.nn.Module + +# store dictionary of definition for a specific network design +model_config_dict = { + 'value': 13.37, + 'dict': {'sub_value': 'string'}, + 'list_of_ints': [1, 2, 3, 4], +} +task.set_model_config(config_dict=model_config_dict) + +# or read form a config file (this will override the previous configuration dictionary) +# task.set_model_config(config_text='this is just a blob\nof text from a configuration file') + +# store the label enumeration the model is training for +task.set_model_label_enumeration({'background': 0, 'cat': 1, 'dog': 2}) +print('Any model stored from this point onwards, will contain both model_config and label_enumeration') + +# storing the model, it will have the task network configuration and label enumeration +torch.save(model, '/tmp/model') +print('Model saved') diff --git a/examples/manual_reporting.py b/examples/manual_reporting.py new file mode 100644 index 00000000..148af51e --- /dev/null +++ b/examples/manual_reporting.py @@ -0,0 +1,51 @@ +# TRAINS - Example of manual graphs and statistics reporting +# +import numpy as np +import logging +from trains import Task + + +task = Task.init(project_name='examples', task_name='Manual reporting') + +# example python logger +logging.getLogger().setLevel('DEBUG') +logging.debug('This is a debug message') +logging.info('This is an info message') +logging.warning('This is a warning message') +logging.error('This is an error message') +logging.critical('This is a critical message') + +# get TRAINS logger object for any metrics / reports +logger = task.get_logger() + +# log text +logger.console("hello") + +# report scalar values +logger.report_scalar("example_scalar", "series A", iteration=0, value=100) +logger.report_scalar("example_scalar", "series A", iteration=1, value=200) + +# report histogram +histogram = np.random.randint(10, size=10) +logger.report_vector("example_histogram", "random histogram", iteration=1, values=histogram) + +# report confusion matrix +confusion = np.random.randint(10, size=(10, 10)) +logger.report_matrix("example_confusion", "ignored", iteration=1, matrix=confusion) + +# report 2d scatter plot +scatter2d = np.hstack((np.atleast_2d(np.arange(0, 10)).T, np.random.randint(10, size=(10, 1)))) +logger.report_scatter2d("example_scatter", "series_xy", iteration=1, scatter=scatter2d) + +# report 3d scatter plot +scatter3d = np.random.randint(10, size=(10, 3)) +logger.report_scatter3d("example_scatter_3d", "series_xyz", iteration=1, scatter=scatter3d) + +# report image +m = np.eye(256, 256, dtype=np.uint8)*255 +logger.report_image_and_upload("fail cases", "image uint", iteration=1, matrix=m) +m = np.eye(256, 256, dtype=np.float) +logger.report_image_and_upload("fail cases", "image float", iteration=1, matrix=m) + +# flush reports (otherwise it will be flushed in the background, every couple of seconds) +logger.flush() diff --git a/examples/matplotlib_example.py b/examples/matplotlib_example.py new file mode 100644 index 00000000..f918bed7 --- /dev/null +++ b/examples/matplotlib_example.py @@ -0,0 +1,36 @@ +# TRAINS - Example of Matplotlib integration and reporting +# +import numpy as np +import matplotlib.pyplot as plt +from trains import Task + + +task = Task.init(project_name='examples', task_name='Matplotlib example') + +# create plot +N = 50 +x = np.random.rand(N) +y = np.random.rand(N) +colors = np.random.rand(N) +area = (30 * np.random.rand(N))**2 # 0 to 15 point radii +plt.scatter(x, y, s=area, c=colors, alpha=0.5) +plt.show() + +# create another plot - with a name +x = np.linspace(0, 10, 30) +y = np.sin(x) +plt.plot(x, y, 'o', color='black') +plt.show() + +# create image plot +m = np.eye(256, 256, dtype=np.uint8) +plt.imshow(m) +plt.show() + +# create image plot - with a name +m = np.eye(256, 256, dtype=np.uint8) +plt.imshow(m) +plt.title('Image Title') +plt.show() + +print('This is a Matplotlib example') diff --git a/examples/pytorch_matplotlib.py b/examples/pytorch_matplotlib.py new file mode 100644 index 00000000..691f73c6 --- /dev/null +++ b/examples/pytorch_matplotlib.py @@ -0,0 +1,479 @@ +# TRAINS - Example of Pytorch and matplotlib integration and reporting +# +""" +Neural Transfer Using PyTorch +============================= +**Author**: `Alexis Jacq `_ + +**Edited by**: `Winston Herring `_ +Introduction +------------ +This tutorial explains how to implement the `Neural-Style algorithm `__ +developed by Leon A. Gatys, Alexander S. Ecker and Matthias Bethge. +Neural-Style, or Neural-Transfer, allows you to take an image and +reproduce it with a new artistic style. The algorithm takes three images, +an input image, a content-image, and a style-image, and changes the input +to resemble the content of the content-image and the artistic style of the style-image. + +.. figure:: /_static/img/neural-style/neuralstyle.png + :alt: content1 +""" + +###################################################################### +# Underlying Principle +# -------------------- +# +# The principle is simple: we define two distances, one for the content +# (:math:`D_C`) and one for the style (:math:`D_S`). :math:`D_C` measures how different the content +# is between two images while :math:`D_S` measures how different the style is +# between two images. Then, we take a third image, the input, and +# transform it to minimize both its content-distance with the +# content-image and its style-distance with the style-image. Now we can +# import the necessary packages and begin the neural transfer. +# +# Importing Packages and Selecting a Device +# ----------------------------------------- +# Below is a list of the packages needed to implement the neural transfer. +# +# - ``torch``, ``torch.nn``, ``numpy`` (indispensables packages for +# neural networks with PyTorch) +# - ``torch.optim`` (efficient gradient descents) +# - ``PIL``, ``PIL.Image``, ``matplotlib.pyplot`` (load and display +# images) +# - ``torchvision.transforms`` (transform PIL images into tensors) +# - ``torchvision.models`` (train or load pre-trained models) +# - ``copy`` (to deep copy the models; system package) + +from __future__ import print_function +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim + +from PIL import Image +import matplotlib.pyplot as plt + +import torchvision.transforms as transforms +import torchvision.models as models + +import copy +from trains import Task + + +task = Task.init(project_name='examples', task_name='pytorch with matplotlib example', task_type=Task.TaskTypes.testing) + + +###################################################################### +# Next, we need to choose which device to run the network on and import the +# content and style images. Running the neural transfer algorithm on large +# images takes longer and will go much faster when running on a GPU. We can +# use ``torch.cuda.is_available()`` to detect if there is a GPU available. +# Next, we set the ``torch.device`` for use throughout the tutorial. Also the ``.to(device)`` +# method is used to move tensors or modules to a desired device. + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + +###################################################################### +# Loading the Images +# ------------------ +# +# Now we will import the style and content images. The original PIL images have values between 0 and 255, but when +# transformed into torch tensors, their values are converted to be between +# 0 and 1. The images also need to be resized to have the same dimensions. +# An important detail to note is that neural networks from the +# torch library are trained with tensor values ranging from 0 to 1. If you +# try to feed the networks with 0 to 255 tensor images, then the activated +# feature maps will be unable sense the intended content and style. +# However, pre-trained networks from the Caffe library are trained with 0 +# to 255 tensor images. +# +# +# .. Note:: +# Here are links to download the images required to run the tutorial: +# `picasso.jpg `__ and +# `dancing.jpg `__. +# Download these two images and add them to a directory +# with name ``images`` in your current working directory. + +# desired size of the output image +imsize = 512 if torch.cuda.is_available() else 128 # use small size if no gpu + +loader = transforms.Compose([ + transforms.Resize(imsize), # scale imported image + transforms.ToTensor()]) # transform it into a torch tensor + + +def image_loader(image_name): + image = Image.open(image_name) + # fake batch dimension required to fit network's input dimensions + image = loader(image).unsqueeze(0) + return image.to(device, torch.float) + + +style_img = image_loader("./samples/picasso.jpg") +content_img = image_loader("./samples/dancing.jpg") + +assert style_img.size() == content_img.size(), \ + "we need to import style and content images of the same size" + +###################################################################### +# Now, let's create a function that displays an image by reconverting a +# copy of it to PIL format and displaying the copy using +# ``plt.imshow``. We will try displaying the content and style images +# to ensure they were imported correctly. + +unloader = transforms.ToPILImage() # reconvert into PIL image + +plt.ion() + + +def imshow(tensor, title=None): + image = tensor.cpu().clone() # we clone the tensor to not do changes on it + image = image.squeeze(0) # remove the fake batch dimension + image = unloader(image) + plt.imshow(image) + if title is not None: + plt.title(title) + plt.pause(0.001) # pause a bit so that plots are updated + + +plt.figure() +imshow(style_img, title='Style Image') + +plt.figure() +imshow(content_img, title='Content Image') + + +###################################################################### +# Loss Functions +# -------------- +# Content Loss +# ~~~~~~~~~~~~ +# +# The content loss is a function that represents a weighted version of the +# content distance for an individual layer. The function takes the feature +# maps :math:`F_{XL}` of a layer :math:`L` in a network processing input :math:`X` and returns the +# weighted content distance :math:`w_{CL}.D_C^L(X,C)` between the image :math:`X` and the +# content image :math:`C`. The feature maps of the content image(:math:`F_{CL}`) must be +# known by the function in order to calculate the content distance. We +# implement this function as a torch module with a constructor that takes +# :math:`F_{CL}` as an input. The distance :math:`\|F_{XL} - F_{CL}\|^2` is the mean square error +# between the two sets of feature maps, and can be computed using ``nn.MSELoss``. +# +# We will add this content loss module directly after the convolution +# layer(s) that are being used to compute the content distance. This way +# each time the network is fed an input image the content losses will be +# computed at the desired layers and because of auto grad, all the +# gradients will be computed. Now, in order to make the content loss layer +# transparent we must define a ``forward`` method that computes the content +# loss and then returns the layer’s input. The computed loss is saved as a +# parameter of the module. +# + +class ContentLoss(nn.Module): + + def __init__(self, target, ): + super(ContentLoss, self).__init__() + # we 'detach' the target content from the tree used + # to dynamically compute the gradient: this is a stated value, + # not a variable. Otherwise the forward method of the criterion + # will throw an error. + self.target = target.detach() + + def forward(self, input): + self.loss = F.mse_loss(input, self.target) + return input + + +###################################################################### +# .. Note:: +# **Important detail**: although this module is named ``ContentLoss``, it +# is not a true PyTorch Loss function. If you want to define your content +# loss as a PyTorch Loss function, you have to create a PyTorch autograd function +# to recompute/implement the gradient manually in the ``backward`` +# method. + +###################################################################### +# Style Loss +# ~~~~~~~~~~ +# +# The style loss module is implemented similarly to the content loss +# module. It will act as a transparent layer in a +# network that computes the style loss of that layer. In order to +# calculate the style loss, we need to compute the gram matrix :math:`G_{XL}`. A gram +# matrix is the result of multiplying a given matrix by its transposed +# matrix. In this application the given matrix is a reshaped version of +# the feature maps :math:`F_{XL}` of a layer :math:`L`. :math:`F_{XL}` is reshaped to form :math:`\hat{F}_{XL}`, a :math:`K`\ x\ :math:`N` +# matrix, where :math:`K` is the number of feature maps at layer :math:`L` and :math:`N` is the +# length of any vectorized feature map :math:`F_{XL}^k`. For example, the first line +# of :math:`\hat{F}_{XL}` corresponds to the first vectorized feature map :math:`F_{XL}^1`. +# +# Finally, the gram matrix must be normalized by dividing each element by +# the total number of elements in the matrix. This normalization is to +# counteract the fact that :math:`\hat{F}_{XL}` matrices with a large :math:`N` dimension yield +# larger values in the Gram matrix. These larger values will cause the +# first layers (before pooling layers) to have a larger impact during the +# gradient descent. Style features tend to be in the deeper layers of the +# network so this normalization step is crucial. +# + +def gram_matrix(input): + a, b, c, d = input.size() # a=batch size(=1) + # b=number of feature maps + # (c,d)=dimensions of a f. map (N=c*d) + + features = input.view(a * b, c * d) # resise F_XL into \hat F_XL + + G = torch.mm(features, features.t()) # compute the gram product + + # we 'normalize' the values of the gram matrix + # by dividing by the number of element in each feature maps. + return G.div(a * b * c * d) + + +###################################################################### +# Now the style loss module looks almost exactly like the content loss +# module. The style distance is also computed using the mean square +# error between :math:`G_{XL}` and :math:`G_{SL}`. +# + +class StyleLoss(nn.Module): + + def __init__(self, target_feature): + super(StyleLoss, self).__init__() + self.target = gram_matrix(target_feature).detach() + + def forward(self, input): + G = gram_matrix(input) + self.loss = F.mse_loss(G, self.target) + return input + + +###################################################################### +# Importing the Model +# ------------------- +# +# Now we need to import a pre-trained neural network. We will use a 19 +# layer VGG network like the one used in the paper. +# +# PyTorch’s implementation of VGG is a module divided into two child +# ``Sequential`` modules: ``features`` (containing convolution and pooling layers), +# and ``classifier`` (containing fully connected layers). We will use the +# ``features`` module because we need the output of the individual +# convolution layers to measure content and style loss. Some layers have +# different behavior during training than evaluation, so we must set the +# network to evaluation mode using ``.eval()``. +# + +cnn = models.vgg19(pretrained=True).features.to(device).eval() + +###################################################################### +# Additionally, VGG networks are trained on images with each channel +# normalized by mean=[0.485, 0.456, 0.406] and std=[0.229, 0.224, 0.225]. +# We will use them to normalize the image before sending it into the network. +# + +cnn_normalization_mean = torch.tensor([0.485, 0.456, 0.406]).to(device) +cnn_normalization_std = torch.tensor([0.229, 0.224, 0.225]).to(device) + + +# create a module to normalize input image so we can easily put it in a +# nn.Sequential +class Normalization(nn.Module): + def __init__(self, mean, std): + super(Normalization, self).__init__() + # .view the mean and std to make them [C x 1 x 1] so that they can + # directly work with image Tensor of shape [B x C x H x W]. + # B is batch size. C is number of channels. H is height and W is width. + self.mean = torch.tensor(mean).view(-1, 1, 1) + self.std = torch.tensor(std).view(-1, 1, 1) + + def forward(self, img): + # normalize img + return (img - self.mean) / self.std + + +###################################################################### +# A ``Sequential`` module contains an ordered list of child modules. For +# instance, ``vgg19.features`` contains a sequence (Conv2d, ReLU, MaxPool2d, +# Conv2d, ReLU…) aligned in the right order of depth. We need to add our +# content loss and style loss layers immediately after the convolution +# layer they are detecting. To do this we must create a new ``Sequential`` +# module that has content loss and style loss modules correctly inserted. +# + +# desired depth layers to compute style/content losses : +content_layers_default = ['conv_4'] +style_layers_default = ['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5'] + + +def get_style_model_and_losses(cnn, normalization_mean, normalization_std, + style_img, content_img, + content_layers=content_layers_default, + style_layers=style_layers_default): + cnn = copy.deepcopy(cnn) + + # normalization module + normalization = Normalization(normalization_mean, normalization_std).to(device) + + # just in order to have an iterable access to or list of content/syle + # losses + content_losses = [] + style_losses = [] + + # assuming that cnn is a nn.Sequential, so we make a new nn.Sequential + # to put in modules that are supposed to be activated sequentially + model = nn.Sequential(normalization) + + i = 0 # increment every time we see a conv + for layer in cnn.children(): + if isinstance(layer, nn.Conv2d): + i += 1 + name = 'conv_{}'.format(i) + elif isinstance(layer, nn.ReLU): + name = 'relu_{}'.format(i) + # The in-place version doesn't play very nicely with the ContentLoss + # and StyleLoss we insert below. So we replace with out-of-place + # ones here. + layer = nn.ReLU(inplace=False) + elif isinstance(layer, nn.MaxPool2d): + name = 'pool_{}'.format(i) + elif isinstance(layer, nn.BatchNorm2d): + name = 'bn_{}'.format(i) + else: + raise RuntimeError('Unrecognized layer: {}'.format(layer.__class__.__name__)) + + model.add_module(name, layer) + + if name in content_layers: + # add content loss: + target = model(content_img).detach() + content_loss = ContentLoss(target) + model.add_module("content_loss_{}".format(i), content_loss) + content_losses.append(content_loss) + + if name in style_layers: + # add style loss: + target_feature = model(style_img).detach() + style_loss = StyleLoss(target_feature) + model.add_module("style_loss_{}".format(i), style_loss) + style_losses.append(style_loss) + + # now we trim off the layers after the last content and style losses + for i in range(len(model) - 1, -1, -1): + if isinstance(model[i], ContentLoss) or isinstance(model[i], StyleLoss): + break + + model = model[:(i + 1)] + + return model, style_losses, content_losses + + +###################################################################### +# Next, we select the input image. You can use a copy of the content image +# or white noise. +# + +input_img = content_img.clone() +# if you want to use white noise instead uncomment the below line: +# input_img = torch.randn(content_img.data.size(), device=device) + +# add the original input image to the figure: +plt.figure() +imshow(input_img, title='Input Image') + + +###################################################################### +# Gradient Descent +# ---------------- +# +# As Leon Gatys, the author of the algorithm, suggested `here `__, we will use +# L-BFGS algorithm to run our gradient descent. Unlike training a network, +# we want to train the input image in order to minimise the content/style +# losses. We will create a PyTorch L-BFGS optimizer ``optim.LBFGS`` and pass +# our image to it as the tensor to optimize. +# + +def get_input_optimizer(input_img): + # this line to show that input is a parameter that requires a gradient + optimizer = optim.LBFGS([input_img.requires_grad_()]) + return optimizer + + +###################################################################### +# Finally, we must define a function that performs the neural transfer. For +# each iteration of the networks, it is fed an updated input and computes +# new losses. We will run the ``backward`` methods of each loss module to +# dynamicaly compute their gradients. The optimizer requires a “closure” +# function, which reevaluates the modul and returns the loss. +# +# We still have one final constraint to address. The network may try to +# optimize the input with values that exceed the 0 to 1 tensor range for +# the image. We can address this by correcting the input values to be +# between 0 to 1 each time the network is run. +# + +def run_style_transfer(cnn, normalization_mean, normalization_std, + content_img, style_img, input_img, num_steps=300, + style_weight=1000000, content_weight=1): + """Run the style transfer.""" + print('Building the style transfer model..') + model, style_losses, content_losses = get_style_model_and_losses(cnn, + normalization_mean, normalization_std, style_img, + content_img) + optimizer = get_input_optimizer(input_img) + + print('Optimizing..') + run = [0] + while run[0] <= num_steps: + + def closure(): + # correct the values of updated input image + input_img.data.clamp_(0, 1) + + optimizer.zero_grad() + model(input_img) + style_score = 0 + content_score = 0 + + for sl in style_losses: + style_score += sl.loss + for cl in content_losses: + content_score += cl.loss + + style_score *= style_weight + content_score *= content_weight + + loss = style_score + content_score + loss.backward() + + run[0] += 1 + if run[0] % 50 == 0: + print("run {}:".format(run)) + print('Style Loss : {:4f} Content Loss: {:4f}'.format( + style_score.item(), content_score.item())) + print() + + return style_score + content_score + + optimizer.step(closure) + + # a last correction... + input_img.data.clamp_(0, 1) + + return input_img + + +###################################################################### +# Finally, we can run the algorithm. +# + +output = run_style_transfer(cnn, cnn_normalization_mean, cnn_normalization_std, + content_img, style_img, input_img) + +plt.figure() +imshow(output, title='Output Image') + +# sphinx_gallery_thumbnail_number = 4 +plt.ioff() +plt.show() diff --git a/examples/pytorch_mnist.py b/examples/pytorch_mnist.py new file mode 100644 index 00000000..81b537ee --- /dev/null +++ b/examples/pytorch_mnist.py @@ -0,0 +1,124 @@ +# TRAINS - Example of Pytorch mnist training integration +# +from __future__ import print_function +import argparse +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from torchvision import datasets, transforms + +from trains import Task +task = Task.init(project_name='examples', task_name='pytorch mnist train') + + +class Net(nn.Module): + def __init__(self): + super(Net, self).__init__() + self.conv1 = nn.Conv2d(1, 20, 5, 1) + self.conv2 = nn.Conv2d(20, 50, 5, 1) + self.fc1 = nn.Linear(4 * 4 * 50, 500) + self.fc2 = nn.Linear(500, 10) + + def forward(self, x): + x = F.relu(self.conv1(x)) + x = F.max_pool2d(x, 2, 2) + x = F.relu(self.conv2(x)) + x = F.max_pool2d(x, 2, 2) + x = x.view(-1, 4 * 4 * 50) + x = F.relu(self.fc1(x)) + x = self.fc2(x) + return F.log_softmax(x, dim=1) + + +def train(args, model, device, train_loader, optimizer, epoch): + model.train() + for batch_idx, (data, target) in enumerate(train_loader): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = F.nll_loss(output, target) + loss.backward() + optimizer.step() + if batch_idx % args.log_interval == 0: + print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( + epoch, batch_idx * len(data), len(train_loader.dataset), + 100. * batch_idx / len(train_loader), loss.item())) + + +def test(args, model, device, test_loader): + model.eval() + test_loss = 0 + correct = 0 + with torch.no_grad(): + for data, target in test_loader: + data, target = data.to(device), target.to(device) + output = model(data) + test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss + pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability + correct += pred.eq(target.view_as(pred)).sum().item() + + test_loss /= len(test_loader.dataset) + + print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( + test_loss, correct, len(test_loader.dataset), + 100. * correct / len(test_loader.dataset))) + + +def main(): + # Training settings + parser = argparse.ArgumentParser(description='PyTorch MNIST Example') + parser.add_argument('--batch-size', type=int, default=64, metavar='N', + help='input batch size for training (default: 64)') + parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N', + help='input batch size for testing (default: 1000)') + parser.add_argument('--epochs', type=int, default=10, metavar='N', + help='number of epochs to train (default: 10)') + parser.add_argument('--lr', type=float, default=0.01, metavar='LR', + help='learning rate (default: 0.01)') + parser.add_argument('--momentum', type=float, default=0.5, metavar='M', + help='SGD momentum (default: 0.5)') + parser.add_argument('--no-cuda', action='store_true', default=False, + help='disables CUDA training') + parser.add_argument('--seed', type=int, default=1, metavar='S', + help='random seed (default: 1)') + parser.add_argument('--log-interval', type=int, default=10, metavar='N', + help='how many batches to wait before logging training status') + + parser.add_argument('--save-model', action='store_true', default=True, + help='For Saving the current Model') + args = parser.parse_args() + use_cuda = not args.no_cuda and torch.cuda.is_available() + + torch.manual_seed(args.seed) + + device = torch.device("cuda" if use_cuda else "cpu") + + kwargs = {'num_workers': 4, 'pin_memory': True} if use_cuda else {} + train_loader = torch.utils.data.DataLoader( + datasets.MNIST('../data', train=True, download=True, + transform=transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,)) + ])), + batch_size=args.batch_size, shuffle=True, **kwargs) + test_loader = torch.utils.data.DataLoader( + datasets.MNIST('../data', train=False, transform=transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,)) + ])), + batch_size=args.test_batch_size, shuffle=True, **kwargs) + + model = Net().to(device) + optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum) + + for epoch in range(1, args.epochs + 1): + train(args, model, device, train_loader, optimizer, epoch) + test(args, model, device, test_loader) + + if (args.save_model): + torch.save(model.state_dict(), "/tmp/mnist_cnn.pt") + + +if __name__ == '__main__': + main() diff --git a/examples/pytorch_tensorboard.py b/examples/pytorch_tensorboard.py new file mode 100644 index 00000000..eebf560b --- /dev/null +++ b/examples/pytorch_tensorboard.py @@ -0,0 +1,126 @@ +# TRAINS - Example of pytorch with tensorboard>=v1.14 +# +from __future__ import print_function + +import argparse +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from torchvision import datasets, transforms +from torch.autograd import Variable +from torch.utils.tensorboard import SummaryWriter + +from trains import Task +task = Task.init(project_name='examples', task_name='pytroch with tensorboard') + + +writer = SummaryWriter('runs') +writer.add_text('lstm', 'This is an lstm', 0) +# Training settings +parser = argparse.ArgumentParser(description='PyTorch MNIST Example') +parser.add_argument('--batch-size', type=int, default=64, metavar='N', + help='input batch size for training (default: 64)') +parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N', + help='input batch size for testing (default: 1000)') +parser.add_argument('--epochs', type=int, default=2, metavar='N', + help='number of epochs to train (default: 10)') +parser.add_argument('--lr', type=float, default=0.01, metavar='LR', + help='learning rate (default: 0.01)') +parser.add_argument('--momentum', type=float, default=0.5, metavar='M', + help='SGD momentum (default: 0.5)') +parser.add_argument('--no-cuda', action='store_true', default=False, + help='disables CUDA training') +parser.add_argument('--seed', type=int, default=1, metavar='S', + help='random seed (default: 1)') +parser.add_argument('--log-interval', type=int, default=10, metavar='N', + help='how many batches to wait before logging training status') +args = parser.parse_args() +args.cuda = not args.no_cuda and torch.cuda.is_available() + +torch.manual_seed(args.seed) +if args.cuda: + torch.cuda.manual_seed(args.seed) + +kwargs = {'num_workers': 4, 'pin_memory': True} if args.cuda else {} +train_loader = torch.utils.data.DataLoader(datasets.MNIST('../data', train=True, download=True, + transform=transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,))])), + batch_size=args.batch_size, shuffle=True, **kwargs) +test_loader = torch.utils.data.DataLoader(datasets.MNIST('../data', train=False, + transform=transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,))])), + batch_size=args.batch_size, shuffle=True, **kwargs) + + +class Net(nn.Module): + def __init__(self): + super(Net, self).__init__() + self.conv1 = nn.Conv2d(1, 10, kernel_size=5) + self.conv2 = nn.Conv2d(10, 20, kernel_size=5) + self.conv2_drop = nn.Dropout2d() + self.fc1 = nn.Linear(320, 50) + self.fc2 = nn.Linear(50, 10) + + def forward(self, x): + x = F.relu(F.max_pool2d(self.conv1(x), 2)) + x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) + x = x.view(-1, 320) + x = F.relu(self.fc1(x)) + x = F.dropout(x, training=self.training) + x = self.fc2(x) + return F.log_softmax(x) + + +model = Net() +if args.cuda: + model.cuda() +optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum) + + +def train(epoch): + model.train() + for batch_idx, (data, target) in enumerate(train_loader): + if args.cuda: + data, target = data.cuda(), target.cuda() + data, target = Variable(data), Variable(target) + optimizer.zero_grad() + output = model(data) + loss = F.nll_loss(output, target) + loss.backward() + optimizer.step() + if batch_idx % args.log_interval == 0: + print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( + epoch, batch_idx * len(data), len(train_loader.dataset), + 100. * batch_idx / len(train_loader), loss.data.item())) + niter = epoch*len(train_loader)+batch_idx + writer.add_scalar('Train/Loss', loss.data.item(), niter) + + +def test(): + model.eval() + test_loss = 0 + correct = 0 + for niter, (data, target) in enumerate(test_loader): + if args.cuda: + data, target = data.cuda(), target.cuda() + data, target = Variable(data, volatile=True), Variable(target) + output = model(data) + test_loss += F.nll_loss(output, target, size_average=False).data.item() # sum up batch loss + pred = output.data.max(1)[1] # get the index of the max log-probability + pred = pred.eq(target.data).cpu().sum() + writer.add_scalar('Test/Loss', pred, niter) + correct += pred + + test_loss /= len(test_loader.dataset) + print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( + test_loss, correct, len(test_loader.dataset), + 100. * correct / len(test_loader.dataset))) + + +for epoch in range(1, args.epochs + 1): + train(epoch) + torch.save(model, '/tmp/model{}'.format(epoch)) +test() diff --git a/examples/pytorch_tensorboardX.py b/examples/pytorch_tensorboardX.py new file mode 100644 index 00000000..859a8bd0 --- /dev/null +++ b/examples/pytorch_tensorboardX.py @@ -0,0 +1,126 @@ +# TRAINS - Example of pytorch with tensorboardX +# +from __future__ import print_function + +import argparse +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from torchvision import datasets, transforms +from torch.autograd import Variable +from tensorboardX import SummaryWriter + +from trains import Task +task = Task.init(project_name='examples', task_name='pytroch with tensorboardX') + + +writer = SummaryWriter('runs') +writer.add_text('lstm', 'This is an lstm', 0) +# Training settings +parser = argparse.ArgumentParser(description='PyTorch MNIST Example') +parser.add_argument('--batch-size', type=int, default=64, metavar='N', + help='input batch size for training (default: 64)') +parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N', + help='input batch size for testing (default: 1000)') +parser.add_argument('--epochs', type=int, default=2, metavar='N', + help='number of epochs to train (default: 10)') +parser.add_argument('--lr', type=float, default=0.01, metavar='LR', + help='learning rate (default: 0.01)') +parser.add_argument('--momentum', type=float, default=0.5, metavar='M', + help='SGD momentum (default: 0.5)') +parser.add_argument('--no-cuda', action='store_true', default=False, + help='disables CUDA training') +parser.add_argument('--seed', type=int, default=1, metavar='S', + help='random seed (default: 1)') +parser.add_argument('--log-interval', type=int, default=10, metavar='N', + help='how many batches to wait before logging training status') +args = parser.parse_args() +args.cuda = not args.no_cuda and torch.cuda.is_available() + +torch.manual_seed(args.seed) +if args.cuda: + torch.cuda.manual_seed(args.seed) + +kwargs = {'num_workers': 4, 'pin_memory': True} if args.cuda else {} +train_loader = torch.utils.data.DataLoader(datasets.MNIST('../data', train=True, download=True, + transform=transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,))])), + batch_size=args.batch_size, shuffle=True, **kwargs) +test_loader = torch.utils.data.DataLoader(datasets.MNIST('../data', train=False, + transform=transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,))])), + batch_size=args.batch_size, shuffle=True, **kwargs) + + +class Net(nn.Module): + def __init__(self): + super(Net, self).__init__() + self.conv1 = nn.Conv2d(1, 10, kernel_size=5) + self.conv2 = nn.Conv2d(10, 20, kernel_size=5) + self.conv2_drop = nn.Dropout2d() + self.fc1 = nn.Linear(320, 50) + self.fc2 = nn.Linear(50, 10) + + def forward(self, x): + x = F.relu(F.max_pool2d(self.conv1(x), 2)) + x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) + x = x.view(-1, 320) + x = F.relu(self.fc1(x)) + x = F.dropout(x, training=self.training) + x = self.fc2(x) + return F.log_softmax(x) + + +model = Net() +if args.cuda: + model.cuda() +optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum) + + +def train(epoch): + model.train() + for batch_idx, (data, target) in enumerate(train_loader): + if args.cuda: + data, target = data.cuda(), target.cuda() + data, target = Variable(data), Variable(target) + optimizer.zero_grad() + output = model(data) + loss = F.nll_loss(output, target) + loss.backward() + optimizer.step() + if batch_idx % args.log_interval == 0: + print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( + epoch, batch_idx * len(data), len(train_loader.dataset), + 100. * batch_idx / len(train_loader), loss.data.item())) + niter = epoch*len(train_loader)+batch_idx + writer.add_scalar('Train/Loss', loss.data.item(), niter) + + +def test(): + model.eval() + test_loss = 0 + correct = 0 + for niter, (data, target) in enumerate(test_loader): + if args.cuda: + data, target = data.cuda(), target.cuda() + data, target = Variable(data, volatile=True), Variable(target) + output = model(data) + test_loss += F.nll_loss(output, target, size_average=False).data.item() # sum up batch loss + pred = output.data.max(1)[1] # get the index of the max log-probability + pred = pred.eq(target.data).cpu().sum() + writer.add_scalar('Test/Loss', pred, niter) + correct += pred + + test_loss /= len(test_loader.dataset) + print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( + test_loss, correct, len(test_loader.dataset), + 100. * correct / len(test_loader.dataset))) + + +for epoch in range(1, args.epochs + 1): + train(epoch) + torch.save(model, '/tmp/model{}'.format(epoch)) +test() diff --git a/examples/samples/dancing.jpg b/examples/samples/dancing.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4bb9da7e26a3702c7da783540c84c813ea743a10 GIT binary patch literal 40484 zcmeFYbyQr<*DlzNYjAIz;MTaiySqD$y96gdAUHGu0t5mCcXtTx1oz;C;O=mF-}&zS zW@gQ-H8cOtulk%`)wOr+Q_s`u)Y*Gio!7^wbP1=!e}Jy^}HTr8|vEnS@0e9c|iz^v?S03lId zS941TYflObYg>C~VXD)%E-DIpD`6^KZe@05S4nF-dpUo1YfXO@ElYm~OMWXVQ4tCu zUjbhyS0`&va|&N4M`sTKUty|$jSIZh|7m8UqWD+E(?OU@Pg#vZ(#74Hf{T@lm7V1c zxHkvYn+8vzX|ng6oz_Dh)RzlQ4L6%LeA>=VxQ*VB_FmdDCF=@N@Pw z_hoVRp#Gm0q^vzG-R)gH?OmKH{;_Co;o{{fO!apCe+Op$M)Uu&`+uO|AIZNQ{7+R+ zdz=3o)qf=arTR}V0X2JHYezjPdnapWkGJazQ*m)|QBnN2NdNyV`p1BPypy@DwXVID zr`>;b`0rLZYkOO}w^9FV=ig>kcYEtMsQL!@z$S_zwgB|6}04JveLUH@D5_%|Clx0Z0O1VPO6_-U{5?0gnU^4+jU2f{2KK zgoc8KhKhoUivA886a5|5J5*Fmd`v7HTs%BHGz!^0!v zqNAeY{=Y7-F90D)4QX*LRSf|OTPqfbtF?mVaE_e+m%h z4Iu&|5;6+vn?eI700syP3j+uH59T*%;F}x(hXs#K2^L4dQ8!1Va>L~aP0B-}mZ<5$ z)A)5p!)f6jhKzzwKuARVo|cZDfsu=whnJ6EKvGItMpjNKYRQ72L^|R zN2X_H=jIm{mzFoTws&^-_WvCGJ-@iTy1u!+yMOox7Z3pZzrlL@{=dY9^@a-u4h|L$ z@gH137@vQDW5K~wf)TLA)e+6zaHu#!k#HrF@@jgJsW~;y@GRVaq2SYSZN5MM2im{L z{+|I0`~QUOe*yb%T+0A7Sm4{A2a5#|1>C7&S|u4b9tpWACJ6e!0$iVZ7uJ)8y-`+_ zqKeDtrF>BK5y=^D@rFF{DkGgy5Qe2qM84Doa_O`W$#UziNOKnQ*T>$fnebCamQKcB9%d!dKWKCK({__er zBW^)KHrGJ{nsiE;;@l zPdG&cPB4LZ`VxLr6FVA=op<+HL~FYf>Pkf*e1=3clwlKf50f6+YI~80dlK?dmCE7E z=%9xtw8c<8h9XH`6vGx7Xkx0Bs~Ap~f0Q;6*uE=?$J6%lp9`UYVc8E?A0(E(WQ??Y z{_9xL3}p$;4D9Tsu=vdw1E_IgVr+&bkI>G}4#(zLi_mLyyZNs0R56|Gn*sX;V3{}0 z6qE=SKRd1#}U=kI^+ZZPZOG@9WW@)7!?Bej|M)7p&W&!c?b@a9Y9(;siB%y;k|C zot?{ZlO&kFSeaF}A>qH3U!9YNnax9y#veS(>HumfVd6uB5I~{x*F}NYv>_Krn8POwzn*SB(k~-D#c4K2f8pQf_JN@n!>>6JPZN5kSG4A}McjoA)i9Q9Tn8n0 z(NSgC;~>jD?!3$I&<-haBmY8S!M@)hY_WvQUmlTr|xZ zVS6C_?skt=kb2s|u|B?UvL}t#VZTw^_%Fd1X*^szYBSD;HaDGgg3Z(#Yip*=D4z#K z+k6&1C~uhGdN4{EvtvAFyx2e>q!M1<7Ou1BBCl*HB!aSbaRNN9nTDX>wpbZtI#sO`ua)}^0SiN7N5D(>D zQRr{z?p3q0<1`EptqowE3TDhIL9JFvN>m^f&lP@wA9Ly1m5s9 z*9p9YJ}$`Fz#(xw3#DGNenij7q!~|taD3`TtNS`m2$9wZ=68RJt}{=&pA^kLRf&A( zge`!6a-9rHzCyDo+ALzUApEPuxsULu;GRS3YiX^>(&T7%KFJ5|HvO9(>JTEa;ZmOI zx|VJ@UfR#OYz62vvTiG(S=vlr)0)tCzoq(+j+C^HU}OH}b6F)>=%Z83|OXT?sz zGqAI*|K-<`Ivrn_QdDQ;N)-c2q7{E`4;?9nsiaHwvOhWVSod4fT1RKP2M z2`#DTirat7+=>oVhmL6Fp9&*6h}m3XJFa1hGu1noDeK$b0QmyGv}F3hcF#+GDyKA z-Pih#Ycw|TOn_;3*^6M>@Hl5nh3Us6NLpWDb405s~f-F)2>5Mk1>Q>vkM z+t|6H72@lAVF=TE1eV9h)twJha20So;}oc^I;_682MQO|1LC6WB z3LBO<6(Y`ev)<9e&_*XFbLbC%4-_Ny0mAUA%waeUB9JN}M=c(S{R#lHO5R8sQwo%< z!JO7OHw#}_+IHe73`_>{5nao_jc(-#&1e}CE{1VP4;}kj-5HkGBE6jDto|gK37M>= z9YrAfsC8DKfJmv8(B7E})c+K}lxSxCFc%=p36S6Ju$y#LuB{?ux%g%fzYfnzQ{VAY4$eA%*Y+9jtPh@cR*BC6$c;+61l{2Ck zguz-AV(q!i9(3wo!_L7c*y!~5Hue5S5L?qmWM)zfg$DTG$r7RnGf;*WW^#scuHN8z z-(zph%R`}Z-92~&OFlB8R=r2pKVY4IQ}Os9MKzbEC|{N#pdVzi@>V1Lb{DzoB|^u> zBOC`9LwyX831j2G7Y{n>7qDFv(bf_4!sP0+vg=CTl5}0;qf-!3}#Du ze-ACAj{4DySe&6U8CeP|%Y{s|V+&L*%mI;`izTYUWZJC8E4mpxlMm4rNglB{@k|oy z-pnmqAq=0nQj{WDjAt5_``NmB_Zy&MSW4OuL56~@u7PgI&=XLz3W=hh8j{-hqoKm0KFxD?UH z6c$i3BQ(cEgXNbDhKpZs=(mEH#K5B0#}9vWzDC3DB?dDBg&Ego$A#18C<74~>vdR_ ztg9EVz7HX{FIihNw#lrL`wuoZYe7>3HielW$6*71kmI_ysx2DrM?u+99gxOmly8S{ z@v`GH_6Hg)TLXo>R#pBH6+g4^?`7R|j*qblh>WU=OApLge#Z9DuCjm7KD3a!5EMn9 ziH91&N7BHagpUizCPww5cS7SgY#|&N$5cXVBOUz`wI};2HFb9y#%xLW7r7Z222Lpv zx;jH5IdgXC?kWQ*-stMzP!#<__Od}ArNjJZV9G=%!x%&AfR~i8dDqMscrB3$YsNiC`w<}L~WJ=@2T^!0JLpiU);q0|m@ z#k~TAakmdON9n~$CwX$?C^9v25sUR5MbCw%-7*2un7OcAn*W|I3*0Xh0+tXd!;%t+5?+ za=4uBfqy5N+$`#8TOq*8vNamUYblbtGKaK9QLG+xXj=h*p`sjxe_+6a-vRCbfpBPU zN*~jFw28z7z026>{lo$y-06C@>NsP4J+6L>+oWG@Z4 z>UG0ZX@ZDrH@N_+A#9GZf(uS1MX}^8o>Mo?RT(rpp>8p9h7(A_a#Y%{2&-3s-EA() zg^ESe==0{q{;c*_3z4zEUA~|MNcpe(a#ZiKG>Yz{ejk}tj0$0ZRil)Ip`VSgHiQ!B zU>}p1i;?cHlPpI4Q4WK&Uo9~;UW=N{7B}Sk+17wu0^=I0mRk>V3GqNaUk|q%l|unp zcxYx4uS{i9?9e-D16ZQ+Ay&fdrGaId5KM5GZlS6N?ulge^3DaPj#xB}_6D8BhdChh zLt31kKon_>Qu9Y?s~8zY(xr;8L&;ijMd)jOFWMQwgv_PoK8&8ZXq%935Fcd%Xj+zl zwcvK>VJ>6IhpIG72FMi&4-JQn>nJHQqf8s7U3R;)6#OvFo!qYE9lKY*-H3?xo1VOX zJw4Q@al-9TmR(p$zGBFFH3Ys4OEMuN6>orD*g?^GyJiZo_RL85UCua3<*fa39XhDbO(HKcC$b6m13kd>caB_)MhUJ zQpr-Eh3oif>i;SxMeyA#D|&WQeQOrx$1Snn>AsN5l`8POE1$o${6hIa%F1qhh=cN0RfLL(H@q-9JZL3w}D{ueh&o z#$lEm0i5MPb@-;}d;Y5Mm+5lkhH83~@D*zZS!pk;st+oRk8J4IHHcbee9bdK2ZzhZ z55g82)!1MT8CgqOp8aUuNPGn_=3Ir8K|n_M%rH6M)1Fhq;{!Q0!BK9EeuD}XZb&XH zS)1qp9!7qdcZNYA!7(7a~kn4V{Tx_|#cRV1QdS@ascFvZs#U=wG!k2LU&CIv~TP=W#=OkTx@{$3T5i5RZ}XH76W#}svm&HnLt zQlZ0%_X}p+8G>If(vL)`C$k5e3OwnEbn`Eu_hyjsx!y z{Q)J8uYmU7vHi2p4iY4K&U2p=|7}0r?3!tY8s1F zIo2=)HNefG2Ao;B#d7I~27qlx{+e9ryWg7XMg!DYL`W2#Ws}r{R0CyM3F!}7oQCyv zXaH$|R)fsMc*`xfzg#lmYy=JEs7A<=)8fz-ZWT3`*jpT%3*Z>XjUL#0ESfGhDJ`Rv zkVizQ7?{M#D!%vMo{mHYTa^0oj{{`6Tf*kFYz0GU=6pX9NbxfkRWj~;*T*wQd^i4N z0yV~q9SbSjZplSEDAccfkEXA#s!-*Q@ikuV6#%IiclJ)R;DBhy3C-HxLkU2)-13Vk zD#S>kaT%()xM7IkjKy~{VBPns?3KXnDz0H`q&)Bi!r-=6cK5K|#~fRPf~F7xr&ZYH zib+7+O^6>6w=rIR3S+OgNBDsf@-%wSOrwA%*%(2R+A&>+N+>iV)dkMGw6;d66KS%8 z7>IU8IHw&i=>;jG{S!Sne)6NiBAA_3YfyM^KNRP&Lw)*t29l(U+MYx`AZZUC50wb=tE%YpSk$B+ClACq6N!_H6OYi^gzTF^OEhWju^YeRD;`g>xUbry#-&}d}W_-Ga?Gc+CDR(nZ;yI5H16BceQs= z%J_KUu#(qG$8=6XQsgc14P`TC*c?AM_<~c(((!66Mc}gQW)Ah|=SPxGUAw_gWfId= zhp~7Pzy0$~1#9Qv9H(H$^s`<8m}OuC<}Wn(AWv@GiJE}Yr(plO#wJ*v&9QjE zFl;fGL70!3)02G@7)-0>v4D`)UqAsWnY3Us39%nq(cwlS5msR&%zr*GYvb=Xz{&t% zkzsnMb5y5OoDP=@h0N5t)EU-p@N6fOG}uB2C}TKEf`SmD zg#=34<7hlse@$Q9xY97`Sm&ft{k=62ysjNCkPvM;pm!hrEe>NnX>T%Wfy;4|hrAf! zFvXjmd{Zd@^#f?32>YZ3bh80VS+TU^{B8hO_Lc3*^r6|}DYf|0^auhbWc}-kdO$8X zoyX%@+cUU8!BFDC(8v?W{!%ZeAWp1p;rk7$WnQ&%P%zSg8tSP4Rm*_D6#Po0YGU4K z4(MS{(91gT<1Q#!yiDEDalLoXhX`;}+JtkDZDsb6M3meBkncmR%;_4?_}FET1WTAD ze;LqxReE`fMHTs2L@_s%ji#DoB@7Rc#^R93g~=z}*ecAHN5Lh11@sA*oWJY9PEc-tu>>M60F3aLg2>G|;m=8*D4Xp-u>_T$(G?P7Cuf$<))D?KE| zhVcRUfjPosRkI-x&G17F?(b0?2$XUI45!!=U)8c6ewlG@Lc>Rb&O{oRV|w61Bgd>* z8Rhfz5XLt4Q#9)1`#djZgJ?ekwK;c(#MQ>w<;V5qPz>qD2UKzY3@6h$HQ(37YC6-> zQz=>1oJxmyjo0=&pbfH0dW0|6LWM>%Tv4ocFd$uA$}BBJX25Qkym(A!ydJX^w*-GN zwTtfTaI_z;{3WdB7|03aZk-y8`uW`#i{_VYT=xS*VLAC70PRR>w>`&^oW0VqT)MPg z*NI@_CVNj6gepo}&798<^PZ&|9;;mtK$tZG7zgd>K%XWyD{#YAo@lFax5?X! z##LaBuY74;*DTkz&>^?n3cY4X-v?QkVXT&aVy$z|-B6>>-E|IBv@Z%S9E+V)NpH4( zA3y&`+Wsv$ng8CG@Cm}?NhYoXh`mrfo6&)9veBE^U42UzHjgW{INdNGw`kA3oK{$x zR{QRnXq^m`H8cf93nHJn~ZKQ zJ-$UEk6THfGG#!rJK~7!^95wq`^XACq%szx1Yfy!jHkc+uJ5wb+9&%V$$sx9?1Db90&r zhh_k{{TU z26GJgS(HMUKcE?PvR@spenN%=slieUn)mS2oCPv`f&0W0iI{UuS{`Om^4H_4aF&Mw zfED654iI$HMsW*Yf?Q|P9$HkSQPvGQHK-tCCdGo@%{eIb%4328cj;bW#R!tO@!bXq zH}A^r)wf%%5bs(<<^qP`Ymp`?t0AA_GX1iWeLx`ZT=bo0c--(WE3sM#@HsYWupyk+ z+&X>2-BM;+H9s>K@P}>iF;^lvjC^v@N}Nz-2q>IR50%4KDdQAvX)bk7B=LV@g$c$( z^+A;)M#m{G2V(d?X+qH3bBdM=q1~QZqWWn0q|;;`_^0?Kz%Bt|Qw*T4Q%>!nNj6#nYa60NO&jvw&MlN(0wXpKD9B{0|AqeU_`xq1b#1oW@2OwJTa zdOiNIHcyt3^!~m@f{X&Q8LuK&Txw zPLo`pM1-st86ABAvUO?bvUIs!*(u59(?ZWVjJ_1X%L9= zBJ~m3JXRlT^EuhrCXWMHuauUm7>4IGeBF@E^pyyh?{E@Lfr*6cm3cbdVwi@a#ynOpI3MLgfc`%fj zow1{EI9j5bACaiS@FRk>k*$kCWi`!(90#r~)%OjV;RYlN;IY0ck*+g8DZ0auh!-o& zqc&4CHTGM$>2+HM%eb{A>r|=&v2Hewm%zku@rPDGky6pBDr9STI`ylIgc(*5gj+`` z(o~Q^XtT=JB%rO&Ej*K0_nAUw(9)DTnvXDxCA$(e2cy(hqSA>og{jCl2y1s5Yj5cw zYEi13`fiP1Cble4qGbnQM&`$ng)v-gBA)H_8n%>Yiqh;NQq>Cm#`ZPAAwT%K%t$e3 z=XVHOflP8Ca<~QNDu$qE2+jM-pX~R#CD&yHnD>l84L;gZeiwR$e8yD_gSpTan`l97 z_E5hHaY7}&V902FgQ!j1!ute+X)PE4@@ns$v`u$~56^+vs!>9rJau!lBK&Bjh=GNK zMPgT40ol0PBtI%vM`uB=5{(_D(Ub_yp+SQfU45(|3sXX>g80=wshE{S zw9EAn?olj(vJ z<-UWEIeSYjl1V`a;hpFo)Iym6#SDcq@Q!7L?ecgHDM9v}`vEZeI}sz|`Vpie)t=hP z)TAEB-R|Im#?r<@N4NZ=SvuO=>-VHWd)hwJW3D>~JkP^y{h!dAxOAtrIDl?rWBhwt z&~{9jaCI_*`R`orpb0h__T8km%&_ybM}4FO(hfVA!onI~{hMQe`_iFmO|~Ts0(>|a zCYp|l9}CwJwrX4Z_}6!7vs`=&&SyK|S^m&_lA4CPe#ga?=K{7fRu02Lo27_Ie!Yvy z15M%Hp-(kkh^GLzet=^Fr!0pw?w#^+DxKawmxf{s>X6AKeRq!Cs5oP9a6 zE;XQNJ=eZCI7@j2kj?jgAh-DNf(yLtuSji5b_%6Q-FDTBJl&OlpKA&(i2I%-gCfV1 zWa{bb&>Mt%=YyM;)@ns%qxRii*A2FX|G>oXTwq43y`>dwAhsthVwHzDe5yV_kQK8a zqMflT4->Hrj>mvB>^jkKJXQSWwm5;ye9_DAiNipXRad%5l3#0Mnu$T)wW?Wx^U^kP z%L>>1MICeAH#SbpwpMymn+KIAym$3M(Pz~i^CJvTidEBPq-a&E>d=iUcBcMGN7>{* z`$ZF(QX94r!q4(q%upyaX&g>2hN7BhmtUHn?^vUkmX%oMF&3=cWL#Z~hI6V$$%|b} z{>6M!_z&h?L0^24orOo_4;6xaa$2#;QI)Ln6lcPN(0=i-!d1+`8f{k+gtfmDuCfs8 zz4s_u5W)lZ;VVTA9r+h!}z>xWj&VlAh0HsrA|b7cTb9_kZ$1v`N9X3axo%`Q)? zr?ogL^D1rq5R{gkmsH@_55)O?)Vt;r%93Pv3u%CDpG|`=F=6sHeC_}v;yio1_ZEa3 zuYmHwra~66SQR)&xl#V7QjUVLbPb;C+cKM9sFARMteLz9PKCaG*puxJ4Opt1VDN1< z*`w-OF+L`RQdRokso7Q8jD2_A25bn&5E`+ z?bIL&nO|ef$CYzAuessAsmAkc%>!Uu4dGlgd>qa=(g{ zp_*AluZQnJ4e>i1F9#=il8@T{xT616YBgh&bPsk85hlW>dwAxU6#;^BGT6a!x0`-=8C93(`M*L8(<620Hn|HCsbJo`cERvi)gPJn?w7r$ti zy(PVMFfK+Qr&$DwpF7rO=r_z?rFHkX@o0(`pfOi*JTu&om;och4!o&i?~+a;dJI*# zg~2X2^QH#8)E~&jk8yl!Za!4XFg;?&|HfIxTJ(s{9Bv1&mW@z7-Sk?e`M<}SsHWxj#@ z+YkFAf}^$Uggf2ex*Yao2-6XbNtSp;X(DM3XAJJqFP@5Eq zbSYe*J}WnTNNdj&yW})0ek?P(ZELZ=q7M5#?3T(b^2g=$!)&N5O(n)fnNH!p4$%GJ zB2-z?Fl-t<+>PJyGSmQFABuZo%ZH{@GK`t51f_V;Nw5Dh>?%=u1DRab(C9lER2OGi z{5A?hx*~h)8pwzSgazI9zn>dU=EyyEUMh}Gl*rB7uRw73p~JLUvrvzqW`6G1Krv6| zKsfQf)YahC2cn4!%R-{J^o_k66^77Z)g6D#5L%=40*E2h-17)>ZqrrrHqP!CRgf@O z%XBEYPX!EQWJhR}33QpIQSY|zzCqT&?Ct2>7<)|3U)UUt0CVvnR4e-<2fgChyWQ;y z9!0CPBdWKVabT}7UVN|hm_Ijx(ca`e+9S{P_YGSkrsqs^Dl<;(cYy+$o(n9La9&0_ zu{R^0S7@kUqb%>z3)$!FU3MaOPU&t#{f}c7^ewTQ2@O0i(v6`7pDOjX!>51PrHPe? z?|EFR2U-*V6gSGt6AX==X3j$a$nl#o>Qk=&mFu^l6Io^3YceJxe1(7=BJFdv?t?0_0T%0=#_it8sb^njQbv%@s%38 z>143O1!SB1z?gqj+(dk_zl(oygkCa#KZysPrpfpX5~do;}R ztoaSit`3)L=}GFE1Klg|o>8AApV`v;GU69JIP)&wL83!LVuu2G;(-s=bt;Cy>EzpY zuK+$iQEu)L0xh~O+TyZLO9sN-K136cS@n|jl?m~yavzv!v0WPL?l@V4$&Tmkb4^7L z8a1H+#x9+`vLtFyTwYh37|IM1OIS*ZvXLKL%Q9yKH|{y7QfY7QFj0}9U_h>G2RuNx zIV)O44@^$abfC*pWlRl#@i^4hSj#aZ#BAA*nR}U3D#l29nS}7bjGA&vHz3luyUF9t ze#qD7rO9AKB$F*}Vm_D&rIh<+yRBG!d9F-B2VF_db+aPy!hP5jwnA&kIu zrFz3Yb1H)9i#@2?GPVCr^Bir|jr+(**0~u8*4qSdnECTL`~~JtXtbJZ;p$C9(l@=M@ln~eV<#p+iX1!?+vHeO1ybp_= zIsug1s!OT^>!hFYF?-8;iGB)q1>d1t`+m5QJAY~6E^e9Gnwf3(PV?jW2-p{tHFyTi zSP}PeeqtGrl>xvYA51nt4n$yVE@CHWQlBQNFOv>p=BQLr@9@reweFm9d6IstK3DGP zzx)!OA|w|#Rk904Eog1fES0On8P*#BiriIx?hPVR6fVKFBpU9n5|8OIkN)7cXo=Tu zUs)i(cquQ0Z26-be|?#+`;SnK5nIevGgHSlrfjVR_?VF4@7eu|LQ z0t0tm%?xdlcTMxC+)~dtFD`USdiB0-;Dv$=DV|AN9K>+ZSIkcEJLW8b#JAXZwS9rJ zz)*J=?wb4X6@ce;Xz#+dnS}2xjZE}%s5|*WHRH^N6lbB2>!pv2^Pr1UYpZ0Ov_%Gv zzc@5#PLm1XbdhX^4i;J-jO|;T$+(M=GR=4eqZ`+O7)!3qGe^Gsbct9Zx(xn2N-C|f zymjb7uAJCCLb&ZV@i$Fzn6+q~WYp0tVk>R)UFM5c&9T!O$#j&U>S#UFHMdwju9C=D z&!XGIyj5{c@-~rh5TwSB-U=ztC%!?Pu=@R+>SH~M!j6x5z}#tk0|b|oR&N_I^jNO( z#T&!}6oCdh_P5yzw~)=^j0|a&52<)3>7S6&_4%%T(EbtqtA{0fkis9u7pa`IH9i%; z9?3?Z7P-k@Se|dMbnW{vbr?+KVNB{u0@%uk%pF3qaOsTH4R}zatt?llf&1+3J7_c? za7kU`HGc>>$0abO~hBm20=MP9G{(D7E^MW zvTUkQC_k5^5R}V>u>93FCy`MDlMs9hf{@fFt>*4Xai$#TBy;;3*Mzx!D^v}(8CVGj zAh`E_h(Y$ZQwxI`ZqBas>nAAt?2$6=_#=&Y2_^d@D--LJqznF7Q^vOn?%b4?aL^3E zr_We?63<39Mq-g=5Dw*=T9eo3QKjQSX2yy-KfpkUAp5}fp~^PlDN!{4y{yrkAB$xU z`p9q5q6Ur^x(gq-q@Lo9>|DxbIu`{q+jRrNeJMo#V+|7JB0}60&h~Y8GJ?lByxtUA zAF{9p*g_5-Igftpzs&k?ZeKw6<9v9t)7@V2nu1W8`o)kOKu1xHgWBg44#OJLjp$Ft z@|k-LOvDj^xoquUtY#z^J^X|7H4RLW5mwrQ!+^KD$1lm|H|IDgqg!)ozA!m~zbQ7s z8Eo{Ny@UX&Qc?DtKg4>cwVMU2VyKrdF}MAx8cF`)x`N4G>5n0aj2~G;VW+U(>l*&AA4EYyEq)3MN~^i zwUdCXX_?#|N{R*vTOg_}eHLbLHV@JIpQ5y=AYarWQPo#~w&a2t*Jhjspz4cdO0s~P z=AEXB2-P`jcF~W00s^eT5Pl-_Z&M2;0RqoyB2-V3Gw{1P$&OAmL<9n9e`Gh>JWL`s zU6z>oUf>oA2zb3&f_H6J2aaPOE`&=RyB`R8$Z?g{$pzj)ZEIH)K7A6(zbK{uBe?&& z!o3SKK(b`r!b!l-GQ)$$o#Aa2@b3%86Xo0!Mm~w!*_v_t-x=k$Ae>GN|C0tb*p*@Q z=XUR5g3Mfr-KdkA+phI-P=%u|_;PJBmRm5JS8kU3uqcc!)sSnbD_v#M>L&pF+s?(TcjLJhHYP@^xETo z6^%E!IX`*P%zJ-*Wb;)%`O(}Kv-ej%n)QcTOi-w6FsEFRbJXCv$lW*FjkceSGejpr z56pTte5Z%wA9RnF&RzjqyaO%u#~*a1&rYMvgTXc}h=JW#D1OxI9j${Ks=coOR2900 zGiYp+oFwyfWcvPd6?@ZxNz=w}oW=Y>7as`4BG2IN*(YHD2zV?J(=k>#j>Of$`?FCY z&53#cF1?Bw@&YZM_|4x)&zXg@TzU6Jqv2)DrH%t{z+HAcbKCPbfx#kz;TO1sZMp`7 zsWP&!_EknJ(&-z3V83|#{Q;jDS-K$sPrSX;b5j*rRJ=al6DAV%yZr+D#+lK5s#HS0 z3Q%H;s{YKL;jVmT!Sl}N4hxYr2hRTKz^|&G7v&E?!2I@?jqDw7+oPvst+vVeiFozrlCm2X4*a6azW*JqvSh>RA_AnPSQ6 z=~Bl&c4(~Ife~~STcNVrf=77L;P6S)TKJ^MuTkXABJw;n`enW_NrRS=7Sr(DJOD+3bIyZb!qHrM81< zb263Dw{!;@#s91nMIvVikh(Lrcd)z(CAu8i7KZoLzR9s{1_#cNTbe?AizEq(&0WX* z4&#~jx}rshd30&F=#fU9D1rr?7QKZ|9wulgHZXPR5bDfBkVee_MQle*D)t^()rT-B(GT$KAsEw9BB^{jqT)oz3GDEhpu0Fvj zc)X-HCOBiwrjL>K`9z>9+i1JdM5ZTm^^LaL?yhYfX}RzgWuXZjGt0SFa)x&BadHZ- z;)u8jR1H5P_$_Cr;A-_E;*@;%_oyEZ%6KgiiOtu%x%5g6thtyjoMj2O|VRFSYPbZ zkN`FU7ug5xo=EmT_~W;ZKnbi4GdFl^ISj26*A_1dpcPbS?= z1%LnsH2QSik6P;VyJnr&~efoTY{IKWf8Ph>%}3}gW%1!&XtQO>OXojY@otZ1baSt zQU10gg9dt@(EPo^h|}0aWpI~hStPw*yf_>S35TL;^wbzK;LTznQy}BZtcy2&Fn>&Q zha4g%wid+)@)Fu`J2cu$a=s;?rzTp9*a@;}Gi1Sk1!&FJk21m3L0AN&U9cO83TPZb zSAw6&!K4rx10Es!vz0y*po8Z*LW-N2GnNT8#Ym!zA5)f_(duP})+r;N%386Xxv90U z$AEi)<=oZpNK;I%maTI*&BEA)UqAkgl!7hB7mqbfthYs%1l6Wls)}az5N7;hR^`W% z(bvRD`K~LS^H=Y08039=s9{6q#OtU4V}QZYQ^U;0&mOtPfoTu;ky3o4K5tL0`7fM9 zNrO1;@nZ2@v0q%iUyDXMSi#mWk&FQ&_h1wGh99mfKAQbPbKSm^8$2|rI4ingzbiZE z@y(p+O+I+=b8MI>2uMXeY^eTS7&v*R`+_{Y`3+$dzhYZh@dX=o=drQ+8{@iYrJu{7 zlXhF={!decSAb0d>68Cl|Bvv(d;D-1Cz-Q)*S^kn2IgRtXZ$uz9#V9keS z*r1Z+EzvM(eE!ecEp@MeRcMjkPMkuyYb@qcHwt1$z#2h+8+}7fsi|`7GS8xIHcIL~ zh}Pe;3Uux>@cY;>iWlc(-514uX+lPncszXb>0#A%^)GQj+Rz#Itd33>kE#~7b2kxH zjTbkU)=_1k`CE?gvw-%z6>+QzC_9V?jSzMg?S$xUUm42$x2>QIrZVp3YD%N*HSuE; z(v9GyAxPRXL-Q;fi}H@dZUzlnsoZKQIM$}Vh4vq^B#JA`N@UpkCoCH4;Up+=C#=bmM*UV&%HX?ewtT6+WY7$%C4WC z2vY%m+YX~dg^$Lcm4SlCLv|zEzd`%UtqdY;hYw2LW$!z_wwEP;-whoCdf$7HF08vPL`7Z9rEcFNq*-rfuWjV36mXwP%<NyhkHH> zOa=!@*CX9!(G%=l2HtwzR)&(x&f=h(U&|{Xw7_HLH{J&MyC5dx0O9(D3MVte)LC;d zzGbYt>LqQ<8eRW#d;&r~Y3xtVVi|l3{*iC50ApcU%cHJOtyiLFAMezaNo3>dEpkXZ;&eop*STRF}+FX-SxW69VtIeyXw z^*8M`jZ+OQ)w2^gO*GFQlenT7AzQld%BD?0CHs{RIa@T67H(Nu-RoHB(LKLQ{spAm zq3R3{-(1ftHZn$XmP(XqiJKvOxZXdj3V8nZ^xmV;plUffsnuDSKC0i6pQw}NOsEl+ z>yfkB?bwtnKzs(T(n^L9Q3%V_-?w&35pqDaE#x^N%6PB3^_G^4eK*rru1mCIc|SJ+ zB%%$l-4f7}QZw&uQF>(afj$7xJ(&@aI>Zd3X1!%QHFZ*DOzv!ps1Hh0e#tst`$G8P*2 zf=V*m0S^yX&PvCTTU*~N^2{U?URT-A2U~xOM!6{L49Bg(HudC90zX)KdF}?Tj3|<4vno}0X^b*W>ax*a;RPPaqTEdw*%dFn^Yw3y(Cmxnz+%qX zD$vKGv@ww&MtScQ4x}NkBk*)_Uq1*;q^`L*yvNA^E8LQ6VKmsenb8W=k3%pprRSve zxSQ{l7&^gt-{!BaXgoht4Tt@^SEXA$W|w8J2aeN{pKIF^_hY+uEyb!-m~xMyKMpvi8SP_c z3{IraCruzg)h*DCz9|x(kE0D_d0AD!+=*sqiZcIO?Yy#;dj9B~>VSutS7IYP>_at$ zhpCV~_Nu;-xR*U=563qSB1=JiBOYuM-jsYtwLv?iP37=NP2`{rGjR9^mk1rXLBJPC1?G!X9PjH<;; z$938^M@NnM9o%hoQl!~@j6@rbXBTF26LJW~<*DUz6&`Q5;YuA$TUv1|x5U?QBDePK z>J3H@qLU|Gz2&ek$UX&mAGeWZ42T*$MBPAUUNH6Sz8e$BSI=DQU3@|35>r|HqlmWG z#dL)cXYeDN;c}io&R_>3Vn-R0AI9-VOL#2u*w4q4bFTo>pc#{QXMDaE zoe?*&5luT7mF!G=RnfoC)Q~u)78PJ}BKZ4*{Qd^9%|A)II=fWvKnwW52$W2Wdwx7l zYu`@sjt!+pi<9MUt4Lnv460%(-(!tZ8a*FvbN}Rx<2H7!6N*)D&0ceu(AaT(2LN5h zJE)LMZ@>p8rG(bMG$`=QI!Lyktp+)_IG+5_h$n(3n49L4BX`21o+8^X#}~c=$PKdG z6g2DIoK7Ef2ZHO^e(s$^*G}?-){P5jC~ZgyND@Sv;d~BFb43X5VrixuP*ZBx=a+G7 zn~cn`E@EST@^t3!O%F8nF;ob#H)2ZR6PB0befAurNSIpaezX4a7T*vLF0ojT9BFG* z3{~ZcLQZ2a?fuaorCs0WC`tohXBoHeW1jxZQgyV)uc##%&&y*I1lVFIKpx)-WZOnp zx^O?5)*I~%nO*XbA*#MK9U76PR4J0StEjb1Ad!MV*H3Z&FBu?#+o+xDHe|9qNNfz@ z2KxX4M!<)K2!|Z<&MELdbM;sfBE3B9E?pDZkIFCU$D=h1cAcQv%63z{@oy(bKVlUu z=?GC=-9;K!eNQ*%O5fZ)G^=>SZycPc#V9KxUP4y@*gKu7pKW%P742Z*pPzPO^oe)f zlF#`77`+G`7+W_H)|Nj4zaxSN11dmnLjvqQBMF|&BDp9N8n;(WP;QT&6VG(6VhvPn zr#wGq|8EB2@*fL4O*u|fDMQ|Tk?n^mC(i`-_yzy_ zjE#WBy|gvhzo=Xg)U(_V;R_d4)$KU10JyhgJ~!dCv7xpewO22aIN{S_P4FGvK%A9W zE#-ZqKsPOtz3^wWbaXdK_*N8tu3&z8l4>oGW5hr{;@alZ5Ol;_G<^K@og z3x6OEW5RUnwszHBGpV9$BggX9bF8fy zZB0OTonsa8c^hMGn~$_KZme8Jld)?Jgp#9|c}Zrv@%A`O*gOq0SFN^td#i!`sY8_@ zgKp2E8>8`8iwZlqx^LU2A5UTWOR!4ULjukym~i&EkH8DZ(NQ zK9k<8<(&+E^>3A#VfJymyu=N5%@@2y`O4 zM+Un5pROE;h`Q?*%l;NUF&OsV6p$7ds!E%nYj-Igv+T+YC)C+;E3in|_9TlP|0&A+ zm|ZT6jyPr36u{WRKY?6qGqX-1HYTd|3P6c|l9=y4iTg$>U*2|#FBFC96ndChR@i(^ z+gWpIPBA7~ReAbC+ZlLwe0Fm>K0^mDkCgl(!K8A#d}QBfiByvfZDg+LD7NPCN&b`|hd`DhZ^Y6!seE7^71IqAGA z(O-9|d+qd+LI2&#>Y=E_{{gi?O22VP&F^A&m}(_>DYR8qEN%jTNF8!$f_8*63^1Y@ zR_k)-Y^Uyl+)zmc^|Pj5a0-1pQ}mQYn}sa?hNZM`C^Qp|+3skGaVU@-ihGXK0NLbm zzW|nJ+ksivcO@oWpabhx=b4&0L?0VoJFmBqN26!f_5JH-<9jWsC#AEX@&svh!-I%=WcrCPh`_&YU;foGF zX&2X#TM(A+hBxbin!(Gev&nCcwDYx$DzZk3WO2*ZdmP}o}?zF}ba=nYEC z=0_uA7_4WxWgFl~Q@bTbF;Th{LWhjwB=^NxlF6irTsO`;8jK^R(ERQh$?Zfy&AYai z1P6Bu=~QiPURgJ@VGqo>9cxceh`qktlECNis+;`iQV7l!MjeNxXtmr6Nj#a1f>@r_ zU>4ee)BySZbxPjhU?_nJP)-`1BN-&~a&VmmRJ?$>HLMLHE(S++t&Jl`ORNY=h9m|I zINSV0eJc@nFyFL{{*}>bGOe|d`{XePYW?m#Ju5csf<9-`{{RpCPTmvnMX!}E$k9x2 z#5fGZfuF7ku8YJ!7H;gkKNgz@I{-4s58fc;56oB3+INg3gH2SqR#=d!SWg6<-G3og z?)6JOY2pxEn5JnAn2|vd@xdT-*BnQjb#>!(og{tH#R9~n9;BQT{b}A2 z*6nmZ4Yr@ESlPT6Ohghgf>HkfgFjk_Lhu`ZW=dA!uAIgndctl zfSyf71-z3?qs!Vp_slCw+Sz=wBa(yE@@lok+~hg8janoa+{d+OMavQgiZE1w3lp^t zGoMfmR8CA~HgsudM-@Fpk?I4V~HR?O8U>EREYs#bC(ChT{jdFWZCf zMN%Ac3G}X3U4A0$mGeEta+EK_!EUHaTD%brn|9LnQLrS4UA(oDW3=A`?LX%sNJmFS27us2UbS_>h$)dwK?d~Fo&29$7lL4YAj_4laDlFp%52adh!JBA)(07=ik-KYbu z;;YmQ<-Kb;JFp$7O19!PX$DBcDD73GkVhCoH!&aUJt{3gMqO38bIx;1EU_sF4&%^@ zXbQTG(@htX9?%HEde5>UV#W&^efarGp$nf{a*~hos*2Sh8e(R+z$ACYPZ~24S->8Z z8t-Y$iZa;7DfaLbxK*2y{i(1nTMsq1vtfJl+O{FO7EK?P?X&yX&MLjNv`O*-hwr=6 zp}3kPl)#d1CbPBGfoN{QGT%-Zt55F4*5!?Y7c<1sEXY@d032sLp7_Ul=QQS6W5I3B zj)3Bl4Ms?$xLEdp>Q|paFl#GVcRT%d_fWOfu5B$Y=C!hg$VgSbP``NN`ByRF`&+C3 z01&|T$nxCqWM>~Dk6hDjyjZt-pq}U#USyDZjAelOn%B^;h}MpuZzzJ=;y?_C8*%P> zihQgtbAC0tzn5N$IVLhL1BOG83XEs+s@B#~-C4sjG|`)YT&@8NgP#7i*?dg?$&TXT zw0TfR6RcsF<%k25>DIZKyhOJ;ZK5TtaXd@0+C1UMuQV=Y9fIuZygzq$rrQ|fnJwYC zJKQoWZX?r<_0ZT@{i5MYM$*ZQ7LfJ&4wcUQI<`8y6B36CLj30-eLXAc4}gCJXYu!m z6wVscNwwzU?`}qaxHHCUnZ>zkJ2Z3u01N&KYd$i*NHohPnTgDo7|J650KL$D754AK z{{VtsANXTo8#HTosu8*7l|f`ap7p!o{T9o@8ccd#g=lQ7V>tppy*K+ro_`LNt!-%~ z+~6){k&X^e3tdr)y@F2Y*^2pP+p-}k&wk#+(0waj+_Xqp+i@ozn`<)O+8rzm${tSP zPHMouU4h6ao|Uz=xq+i^BRsf{Be%K5S!8u_xeGA#rrKUS7EH`zKo8CADxvWqp76&I z8<(kGYfS)};wk5l1_mU@f8wdUrHa{9>{92DD=zz0d87g4F5~!CsJ-F0Z!&HVU`eTM zAX=MFw(BWUz$ftl0NYn=^&5MrJo|V+3>n^iEky}iq)r*slO64y+)IA7={moTt$bspMXp;!S0p9vv5;;@1Fz85e8_e^0!>*iZhX8mFc>V{bL&k1 z0ED{NKvrAp;*b&tV#;gDb&rTL=~_$8Dus{?LN{{N;~Lk*3%yF_NhXxZBXz;R1Jbi? z^cnQek0R5xT-@3MBw?R14prCMxjzher%>?TrHN!SX^t>rVsN9^*1S#+i`QCynf6aR z%!Dv>82OM6qw%cQ_=T?POu`F! z1H%a{`3L>pYFTa;W(CL2qaM|bK2SIrcgW-Z1zYH0sb|U-^Q&hp+;Pt}0?jN?xOK^X zm^}quC~k8Y9$Nwl&+AlmJCwSeOg|#+!NnR3Be`XZ<>wu9{56Rz@uk7oz?l_+=dD_b zLJOgg0(#bTR$@!Yi!dN(?$MwZEg#H9Mk)e~b5Fd!Wm20_2k_vYX_t_Qr6CbUdC#R+ zwzvj06a35%7!{?E4US=w#1an$vr@EkN_QsRr|VHFtdfY75WBPUS3_s=Kn4SQ;-Xq? z2P=1SpJxOdvHlNA%vCd7#`5q0$6ki3-O6R%BXCLJEl&lpOC9-Uc06XfEC$Y=n}J;A zlc4QZ)mbpHi30^4N3B8Ut-Yu!PJ82}3qT=~c8n1aIqkZl-6$Ckl79CcDJ}_` zMRmy=dJkHj)-SYeb#1#CZw={5>I7k>hIJ({N@L~hYeqY^NXmyIHgA~s6$lS-^2)y~ zNN@=LDu98(8Q9#B-ksP4mp)mNGIFdix#&5~Fu7RB4Uk)b?NhW-%r>apzup3?1Q8OB z4ne>~;vDB85f? zImTrF0BgVHUyW9tAh__=?Y5<2#V_p}%!XibhRU30=qv6Y+4uIiKNoyQVWMlq>9>Ud z3mRo^iuEA&V^}qJakq2sdq^%d0;_AuJ!SqJdsc*$btQ)Zgt3bT5Jd9QoTx2k@kqR82ZHmf#n;Y;A>327N2@*Wp*~w|#Y^ z+G(C3xoca*8xrTrET`R#Y+U}^dVQ20d{;8+;^K1}?LZ`oCF;dV#(&^c<5RgDtox6| z9y%~X<$Sx8W>cPn(z$In;$qq}f{F`_834-X>-c`P@pbp@Z6Aql1QJC)nG=7jVTm`N zSNXnQ!!<+VZ^xU>Cr`4{^jpN1XSZ3Wxn37@4lpWZK4Rk@=iK+-6!iTfCAgaY(OJL> z1LWtud1u5Q+M7f0{{RzTnWs>HcM@kc@T|T(*RCd4xzt(TX=PFlGJhK8^*u^^hMwx> zw>u*--TbRG`e~N)3s||9>f6K zmE^cm2m`prG5-LMSH^!1blpm6rfX?riYF&>$I?vt`qyEs{4cn`yB_>&HK|ZlfQE?@ph4AvY0`%P9(2U=`!Fc%pn;)~E1Y?}u(~npkCFiCx!ns2C%G z>0E5z27FaIH`#CFjx1y(gpfJSbG{((&y9RF9E~Znn`vj6lYNZ6XCVCxKt??pVQehNtCG4Q#Rus!WErHakOm}<<~zG;~|pW9!_z!K~r4*r{g=j_A2>_Gl3fa0IdH2 zzIuP-)RF_a)jUHcv3)Ey@`$31zj-!(LO8)^hK96O-7L&hgOTr41`HO3 z=L{9M551b62x7NpZR3w>(0SQo7(ikjU3%7S+#XDwO9?#<05=ga^*0X>FmX5JTfw?Yd1_y-+DV_V4WB4K>B_O6mK zCAkwBGTU&1lh7i~2{ywrq@20!?^W(%X>LcDNohXtAH!4WcE($c5sH!P~4f$2zMo;1Ui+U>y|g#cHP1+06*FO&k;D)(mi9;Y^^LM707InQw=TNww=I-V0E+qNc+yfzO;~CwK^j#>1 z$beEntA+L$sHcp}1LX%mKN^+c*u!`L5sZ6cse!el4>rOwtmvd$a@mB)5yTkeui-eYMP=bTqbYYN>gY)F3d*p)>jpbJrnMszet%N%m?Iawr$4aMp1bU+-v_CYmo`ce@EX{ck z%MX=#8Lvb572sjwj}$b|9^`{)Bn~;DYEsbbei(c)Ho9~zBs)$w9x+`Ph;$@r7$jr? z?OyX~;P|w?Brs4LZr(;Kgx2)TI}yfuj8wknmuHdRBI%aou>b+<(!7hsQQi2C{cWui z?LJ{2bmVi^y3dQc>)mTG=vJVj&n$5kQ-hXZG5Xig9vuCjS3uG%1a_b?V6P_bbBc{j zTUW929;4v<{{RWE*{?06vy=s3vJJTeXCv^e+o@+wKhH?aw*Y_u>0elUWwWu?yhSLO z3#k&^1WacNqi8kD{5SY@r`?|>;wAExnb$qQJP*qiT+FB*3E^*t^Xm6M+IomqxNeF$ zRA2roy$aVwveRQLgMcxH71ew|wX^VcsP?wMy^4R{sfchf)K{BpUMsh?&iOLLaGB)Q zyS>5cTh-+^FSa-GRC*q@%H1u6s~4E8=Q-Qd{#8;c2HjZ=kKNB>VCXOGr5tuJNU`p`_gMh8!&5b>gI9BpoY8iLL=a`Ll) zVi?5;=gWkEZFQ_^CR-UDBEut|-6ldi;_N@1~Wnwo9N==h= z7HF+^31C%EziR64N(ByypW#)<>r|kzj@!#+3U>UzHEQ`A?T*mQ0l{(5Qk1SWpxrZ1 zm+W#lJeJFo{3rXhoeID9gyBwEhDiSJwQD@B6iICsN)_JBI)hX!6>Z|0cLbTFI4#tV zY7@B438M32P|8bUvX&zZu@my(^lH6tI=Zyz$K~VZrcD``tK;S^)bLA4ZZi9ftOYW$GOvAHZ-M;O8Bnt?4htC zY#3Z*@l|e0BQ#jtpTrl6+qH8gqeB`Hae{rTF6!URYz&89!m_vvWeZIjG9VGR@p@Iu z*!-QUrwTfeRdnh6v9;8IdI8$3Uq0p|KZ&8cfnDQ7b#TSQ4l5=&T-`fk0G@GM5BsQj z8B_XIXkzlDY!Jt;+7kgMge)O*h6QTKQe6C~S z3o%S$1|%Oi^yyNf?)l&MWqSM5>?~DTx3)O@#PLkOc;r1;0nmZ>seO<%Ak@vS)+{`( zO1T%nt7yW%nq~dm8&M z_ABtx-{_b3tr5%I1sVSUVL|@2@w!LnBB;n9^%<|w-`JN+c)lE4%d`t8?q)!!T~yfJHcr`b*= zzPOB}VGc|q`^0vwTvQ@r?&Ns-MGeo4^||i}g;*7bcLQPjpQUk!#PQhuwkv5rcW%I$ z7ui%}f$Lli_rxitw~>VM+)9jNc4M5JV>QSdh4Z8g1+)%JI|0<1=CQia>b@d)ragMj z2(6akC=;`Wm1o)3o-5 z*vrg#%IzIZEzPgmTW;N~4tS@!$zg9JdBwJzpJ7=^e>Pmi`+@bK4GXKpl%l9A=rHE0 zTwJuIkO>2*_Ntc;B3vwooyR7oxkqI(Mt)MfngFMJb(vb-*zN3lQrI+UG?2-KjYrDf zwE=-H&Iaa^F1Y@+uRVl9);S4N<_u>&C<7-~)QXu9N3{>}4)u$5cjc&D=W^iI&HbiS z1v$s}tDm|+w;M7u(-aWOs)hb#8qRolfZF!@pkUzjoGroN0Bn7G)$4Wl&H zgp^syk79hR$2Di|;Q0|g2#t9jrlWX%)o~zF6^8@sRWG8J-ge4ue|mQSzmhQY6NzL~976aN4#DLhpgtLMpy}0?k6nj@Gxk@){YblAtJf{HqR($*NV_dNa03DY-Yj0Gv z`yl~<*uTO(Yb`{LynhpnXB6Y30&8`}vPPpkporufn}B^Qo)NntJO2Q8_jZ6u z&#h}(#!}oS6&ryHiaF1P?^Z$u&H;XUnpCrBk}`S8&hKi{`BhYH3=diW$(AQGGLM*gdQ_J1 zByt{wvCq@emvBjElzfOzz&i8mQYE`Z3Bk)b%O6?*s*yA@WPnd$SM2U&hB;Fi1a&>C z3xD5TwnyATe;TmkP?X>0t5~PiBkd`=YMLs5R8TDgqE~ZYw@DVgiC$`_-#wg33g5hi87|ih_}H z0kFQ-ILJMHhDYiv^xyVK(!a633+RSej`@|C40a`t^%eQk;Xez{;~y4jx@ClIB-d8~ z2c{0)=kUksEA?~2{u@sY_&ZC|ZJ4Z<(I`nVjsO7h?^?lk6r_`}S-d~1>)NSQwq=7m zMh7YW(XGqN?+ko4v1`3r?hC7=>%4*i^}*u2KjWXruNinQ`fWDLOnVz&mOr+TN;Z3j z>P>uE@k`>ay{udqr@oST8bBFykbO=nqa_^~D(_?K-;ZCmMx7RgbEkNIR%VpqNr$KV zC#`%N;x80xemB0 z5!*2ljGpzB`y*9YvA8!sN_Mq5dqt4G1WnL)qSEFjw=zh(Rgm+J{Tfg^{ZxdmN2+d z56rw)BgF4^5aEa2SN5r+m2Jzb<$(Em_Mi&VGsuL4mkfA4#ZZzlI}?t5YRvNp<<9J= z=zS@+VP{DcGM5kXo_Q1y$YZxKM}f$w?hy>od8F{5YyNx__H+ldPS-VaEopIsun+xt*2w1>d+F^r||6 zdx%k4hGRG$)w6YAK$(&90RW!3tm%MQS~3+Ex%I09oV$)j9Z6jEUrNE5Pndo~`PXrC zY_p(bUCppj$o4j6A! z(ts``0uUu)8zTTzPd+C3R{+#Vg`z6&h9@|rbAnrT0{5T=x+==5G8@p+ZCGI1$C1-* zMGU@lUSp8lyg%@ zhI0fgTxGifTOcxSfNR9Y2~)Iz-nF8%S*`bk*u(fuX5GyT&IZF2fDTW3)U%o6v|{IR zKAh77JX-9V57eCJ&{NH;7+i$w-Ay>mJjcl-o(4xs46o2O-=zRpx0Ksj7Ir|zbGH=} zUnARLupb;6iv7%`71W^%f<+{-kg3<%}-zzn7U+K*>F;V#?R-kvo=bf%kD) zI%`P{t-+5d(wt(XlKxy6nfY!r=~%r$)wRF#VT=$kyi}9TvK3`;dFfO%v{^w@-@m0< zHtQ6nbGvOY5=Qe3YqzP-)~+BuF^XfMA7_&o+1u8v^31J(LG4Q5GQ!NW-bvF5kF6p? zOpTs^^!4EX0EJw#oHEakl#z|6y)0rQJDCYk2LXM5$)T%&GWkr%k&w9~B;&nJ1c3>L z6~S)#s*=oQG8HSF@T_V*Im^h*aC6fcsd@)no6ttdlHnKB1IeqFx=e1#EU~uG%t3Ae z9f+zHa-xvTRfq7g?_D>8^c$}l_^VFQEgnfGyOj}C3@$&2kJ63wEoyyb`wsjnNsaf5 zZ|&DINFlOT&+gSgE=T*^4EtBw7dnIyU}NP}Aa3Iv=DB}|UJ--9-Vd^{w?L8GMdhnw zuNmF{0DV>$iFN+~vw6~%;eAbL{ncSLc^+Z=OZ-P$Jr;N+D-n?GY$V9svX&p+6#mesHni%;Ak{&X7UwUK= z(x~8ZSLV}!>>c@L~=C6j?x*rfk+q#Sb@85 zdPW-(M){Pm;QCMoMdiZ6NnqG5<(;)cX3OnUfUVO!{c3$mW_S({&PGqUs-Ab7C|hvP(zV!qs@NFNhf#yaT8hcMse&=eu)xUaKo~c69!6m!EZy_c zs6rmnZLDxH%K_27E1}oQ$t-FRM>CH@#br7wsEyZg3C=xeC99cNA;h4&Gj2UpRp{jV zK;6^j!TQuW*>0eMaB>=uNLt+O;k{@et$lP=$|)xu>ni48$bsCI4mTRma8q{SoPM>G zv56%-A1gU*_N^1VKy0!{46+pgkp0qfGf}g}>$IITd zT7j7*=_MkIN40Adk&)?gf;T8!ws2R{ zrnZJi6AC~~rurNuI(sXXXV5CRHQ~gMfsn0 zN$fuw0G(N@t1BB_u;HsgC)}Vg!k&hJE=vwAQAsFNXOH5jBeou5 zzBf0atq9>VKwr!Uu?P5SVHjwF0sjC46_5pr`8MqPer7ly<5s+%vKS5&H&PE}8J^YqAFWtJW;o=obtB0J8qH zC9TPCvGs<#Ak(x(OcA(b4@&vB;?Ic=pQc?l@G`xeT$2Cx$}RB^%1Q2ie`|rDjnQZR)sL%Apmi@v+dQcFK3(_CN}1#vvU-f zX8Bwnz3W`&g)LunQe_F`V1rrc>=GqAf-ygpZ)+u5*&|J^#P!cLhj!8=plm9!j43Cr zYP41ZO41~X0^sg4AG_!&vq&z>Aq4^GDoqmxLed--Tm|$sXHeQRC(1GCC}Jt{yYcuU|?4!jLV!%SViWephbw zrvtFIAe>1VBxATV*{up);lN@1Bp$UghP9n{$I1CrXM!jMRhH&BTXJP1$5HD~vyxSw zHDUs1Ag_8gJuoC|m)g0`4|;rY6hSHIIKq)Y*`;lBAbg=x0pxnqu2D$B@WkXFO3m{Y z+F*zYW6fI6+Ea3zW2m4*byZoFWqC5g?(OebcWkguA2p~GQTh1BI#R)=O*9gDLwQkhl*TInwBmSSaK0ArTh+OY1J4bc_BVfxj_U@Aj$k`)LxlPqhFx;WI-@G1cD_B&D__$DajDjh58)pO+IjQb; zyfCvel7o&q){q05x`pLT{J2w*SnnHLNbbPon(1y`b}}ktbOyQ0%V$f53j9{)N2*#u zx(1n>_tKow-V+W39<`&V#J1{EMI0^^)@<=_UF7Z{^sYltwkw^^4$?kccc-QNY|N*I zUX;tF#j`Hmt&T8j653s{9ph^+dH~R!C3y^_WHxb*)T?-8niy07-81P_{{XbwI1l&p zz|9s{0tr0X2Q_@R17_uXkCSr0t|`z=H}RA8tn;X(lY;7gZ$a9TrB&Qeu*-a&;-4TF zbq5b0U?7*rRYz$!^H=jF#Hqc;W&0#7v>fo9_T$!axe{Bp;BqS|V`#0s&OttvWhDy&lG|ENx$Qs%QwSs@83l1xZ5%4Q92Q}L ziloQoO@&+QQ`)*L$d2faIRG~Y^q>gkjc!wDQm+?(T7l#SSmJZY{w5s_UYT}Ytl@w; z!BBep)X!lJ#B$3g&Nl7=4|)K~xVw|e2_gv!oCEDy&a*R+2w2Mf(bqMo)3wfEWZ4-Q z-UmZfB(k2*YOvg-gLCQMkBehkU(Ur*^#VwQ`N^X$;<^EFMln`9=R$FqME91X<=a~JXOn^(H ze8U|prMtFNjZsEdZw>A1T*Qz`XLw9PGv&T5D8Pp zYp1xiK2erJ)-mGF)+xm1_*70z9T^P0A=FXFalh`#necCp=FM%`LK$n|@Rkg2*q zn-!TA)3wV3fzE2h!~Kp&0Zfq}N}+3Y8%ZRJK5%dp*D!*KW4% z94Rc_IO|hEI!LEHHCLRi+=T8dNX`duY5ZC`&Qm_B04fH%<*8D_iCwLlcJM zKGl5zkwX~@sOh?vD>(N8PtH0Dk!BuY1wm2RiU60(QmS|UwAo;395&LxRcY2u(Y!@k zGt^?PTgve>#>ePs=ma+qEJ0bmQb*lAscjjf3emR=K-xNt)tRC

)^MIrPO|(gn4g z2wY@0@gBLOKq%W@-6J$YLKi0)t9K@OQWrUGtYv8e?Eytw84!ji zanIvXuqbo12>o$Sw@?a(JC}A&JtzX*q)Tw>HiNY>nw~2yu@%WcPI}TfSe2F}D(9nB zP?r~u14)R786C|4M=VVoGN=rGY<=#&^g=Ebe7S=%Fvj3>igm@B$jqo*?Cd+kXs10K{|482U4&!4atYRS}C&9J{Y90A(58XQ69KRX6F^{l-@Ez$Xb zFb}6{+8}dNL-V%sr#{tGvmwV7xGmTm{p!!VSdw-Caau-ZM`v{_P6k-6I^&9}s~XGl zd5nEHtFc?V2F?!dob;%Y(8nT&$Ya+XstrkJfwl)cWMY%eK3Gt2K*ngEZNqAu^Ni-J z$tDwN2Xz2RZ!66<-inLRjA z!sVnnWjt*)B-YQkINV6+TNEfJF()VRpbHle%FKswIAhRM+FYq>W%E^?-M=AGLQHsf zR@i>+Pc6`ic8LCW{`YEV3xC=S4&+cUc;l$4EU!wD=3p}%^~Gl7lDv_%dBEnhe269q zGdC(Q0Pk46UBJ?Kmgt=2M{1Y^fmI6@^x~`9B(66C!8J!f$LBh<&I7w-MjnP z;-2<55wD!e8T$^xfFQix4#_}2F~L(>a8680tOI*BR<~l(vdQxvIBMCmvsfLbAWU(> znwO{*<+5BM+vU4-UrM$bOFKpiy#_${su!Arz2dU~2>uaPV!4u4-c*%S+L3YdEyHUh ztCzx=2h4F_V1C28UBq4+*JO?<4WeFvW=?S7KqK-3zBd}1NZVA59)*T0?*9PTFUB_h z8u&%xX>YAF65q>kqx--`8ty04wKZ*70^cGS^ts&Vjn~aX#E5o(eVtrEhMgwnbgOX3?Y8K_# z!JU9VA;LFJ-74HRkk1H~D9@bS76jt6ZU)I#Hami^-6M?CG}Am!8W&)$`&B{WsFPAX zGFT&;Gaw|5*nToSDoC9yQB*ptt^7F8^QK!&#&=l(D;e3lIs9s*hvqW7hsR(j16N-WBgR)v`dK465+wY$l`z-1cj`f1C}J@gX#6E#z*ptKgf#R5SqBq z$K|+Y*aH30)YEP3(pdJ!+lDi>r~%N+5OPlCQZs?xpq*Aw>KTANPqj|*Be4Qi-`yAZ zMN_%dfigw{gaZM)&|Zcnps~X;G-bX+wBx-?C8IiTAUI=`YGGe6kW{eF2R&-ri9EDS z09AS^py$-jiXzi<^0yrR6+OL<=-)4#RC7ZxBg_UjJuz3UkXsF|;P0SzWl<8eWlNad zc_Y-;Jl69}nZVDX717>W#Uw2%I4zuFvM=M5+cK)MFm7?!P!?rtSbo5)#$Pb?$4#|X z?)n)#*!;tQyMkx*r%Z-;8Dh@v=CH0JXy#HBv+ltZ#6feasS6^eLwvaOtq3omxP=v$ zVE6A_?c@&(@<)=0&5%gxQqQZw`>2*S+ZftgJx{G`J0Lc1LpU+SyOeT$>l*6bWsMz1 z(0ZuC4pE;wmMw4#TiDqAW)l$;C1B)t7eI2w~hFwVlw+ zgNPi3QsdTz-y`{hVD_y8q!kz>uhy1581CgsKA_Qb23xz%+b}cO;-rK+C}v%|J$stI z+EsT4K}7Ry$Yy>KegHfBLk7a$4a+7n{V7Q-qgtL*&UtF;ZoY@1d+AZ3JRo9S|@`%`5l1(x{EhJ&P=}y2^ zm3*ct<)J@XRlG69xp1U<)v1+9EXYYe-90L-k(S;9hsfzgfOyp%0b+?EK9rH`QaELh zfE%C#rBq8<2*3&ls^+GO7fCV#k^a>d70INzbwX1I{if?v+G;YGf7%Ba^{AdEm0Jpo zM_s2F{OTvRkgv;-J&ipIv1UuBl3m3B`c>;Ii6ysan=u3YLaEx>u#CIp{xu-IhB6~y z$o_=%tbu5HkHDXVlYCzBg|3++m@H$%Tsohf7uLSV@y))A;2(rj_*+7>dpRuO@|kyr zY5Hbk@4&AE{gb>|W#IiE#QNd4lJ5QnLo5)Fnm9=N$JA#%E4|nJUcNlE`*)fons;f2 z3EHrnVF~r?`BHAhCU^zBR`!N>x?;${I@dfGr(76bsV1JMf)RyLrq&DwT$t4^)KBBE@lFB11v@s*8 z{u5Ymx!;|IPy0rx!ZiOxV)wo)z9o$l1#Rde%<03nYNS8Rx3g9{L|N z`4>%saDJ6EvWMDZlYg%224>nTmQYjX9`(Bpp%j~7nOR!1Rg80agkm%AQO|J`sVvB@ zKIt9k1E5rj1BO!3FWo+sC6(p4V7YAPIU}uO+iI{nEK@lP>J2kdx{e8AXe2C(F~tCM zGfQVAa!kq$v5pq8?)dgR08Ba>aH8&U z`x`jMX_47Wck=?OgPf7ptGw=wwepLcoO7C<9Z90fjO-l?P*usIurLCpLlfSh21F+* zxaS9&V{1{zI^-PvYFE6pP#PK3yBf%Z6 zNHe0L>58XzMY+NP%-yr{&^Id=QthLE-XI%>Mmtn@63C3M#5`<0DuMHrH@MHRH7Y#p zj9IWd3IxPs44=D*l#unvs#n^1RsGpr;l5+(R+iO?Eg=fn;M2q_8W{_T7#@32W^)&I zGR;BDhdXbSQC(*t)eS`xj9;54;gq4jrAFWR_dB1=%}`%50&mB+n7E315{&-qbsDw}313o#^qHBI7_ zF#)$OFg~@X1+~dyMm>X7gtu+OdL{wS%4*`5P+FLg!sv05-;GLRkwzGG=zVKiJvhAC zSiv6D+nqk89sse(#vZ??n|~$_!5d zNcQVna9KM5yO%wB)c*k4EgFZwkoy{U3o`{!DBL0QKT3D@hQ-_DxcXM=>2E6TF|v&R z01>7B!<7#Go0GpaS3@#w?#pB}Wshvtm_#zuOYBpl_vUO>Q?0 z%n9yk)|Mht#6HXpr@al_e5j_Di{*U4ijo~Z)=67p+?5h5dY+j4>FghF)QJi8%|jGd4*vk^69c<+H9}A%TUevY=Z>M4qhB`M z{H#2`RuynsN+bqF+s6vMs>;~UAsfJTK7>$Jh%Fq*b~GjN_lWIM3vykt0s;QjJ~H;Q zw0J7Rzhn5)x1S-&#@a3xBf7LkwN_Y>Na_Vp4I;>;5T8}WXptj~T=tq$xup%9k6N(-y3PcK@2tz z_A4sq#r8pqL*}l1V?W_SO_@mD{>w&`k2K@cnrYJ`RphxN(AOs?jB_?4GH%C0wN_6P zT*}RckZ011GF92zN?{S_I6n0Oy#RdiE2m?M;M-Qae1Ot3d(%GKs$HGADhIwP#y!UL zJEpydoFWAs3kr@+N+o41p@uQ?*96vj!!|O+dTym%GgkgE%0J$%tc0D;y6;o8VsN~U zxvcBGUey2%xA3fabScjA&g7Awm8l`{o#7#bZ6G~~qR3i>*0B_B=kDjyv;P3M5caJp zd?$48hKYdx0BV!|5gC8FD6%N@sSpE(gsPsLbgI)@1yXmG_5(l4tzAzc803%cW16gP zRuxg@$sC8E3&E@k`zKYS>grDl_2-@rk#pK_@YwatZf>1 zImh#+Tr6lt){)=0dSq97kxP#-9=W8Jds&=FUE^WUE@-_s(z1a1J9{`VDOG&mpT zmvNK(s%o&iNv7J_m3>;TL$AdU2(f}aaYey2$d=h{Wg%iGJ;?t6>(zPfeADxspK85v zcOBz`Vp0d7#Vi{{xd=$^I^vO96cbHImHyT(LSV`XMLG4;GwW}Pj2h@zy-fL*a9B%h9{{V#= zhJ`TMZ!Pwb=}U6f?H`u$qL1$bPh$d*@f9!B@OpkGnJhBQcPYs9-9cQ#mdf5l{{UDj zDRfAtY)1HgoK)7<9&CL={{XYrJnW($wpG)a;M+vS7y=42n?Y?>Iwe<>(u%v zi$m0{t|oVQ5>;l19!T%rpIeWmkJ0TeNc=0|7<71&HAo;!Wj#75AIJ*x-x>IYJUQSU zYg?A=*#(oyMd}|I;e9^!bylAd{3GJ63JqS??iO5ck~vr_A;+e9{Hx+0+H>}Vk5$*B z(Cy)!v2}e|T37b*T8=q<2j!)*=UKK_8hlR!Ka4dk75`tSV1Z=zpy~R87xBD!a41 zFg}${!&^z5I0K>WTv5}!X|+MRwm9@r{RK}SgnUza6{Xa`Z2iDEsN@Tsl=mW7!@S7k z{{VN2a$VY+V$X9D}OITsqEF=LD=sE5NtXCX7(I|JAEhx}@+7dpB8Nu}H5 zEyA|Xy^<-T#@ciwy3CSsz)~v)+rWBi?fu7@{@LkUaCk#cS5`}C)Gttb)Yw{-=kev7 z(vTx3Jqn7fz9@B!q!78u&my|WG%GX;{{UxMv;pXFYs5QUUs7!lBpYROwT;tcEqb{#*Z0m^vI&xWoPc44~-#QF~`qa_*2uYD5kXPEh z4cj7Sb_Uf$@omOi4lA(dc$Fzioyj%RzPWKpoM zeD$QRi%A%ZVH#nCY`)AYSFw&>HskO$a!Wgm7CXLQ_PWy`c|&D^&(^B!O(QL>A&vH) z{?#D5NYrcxj+Jupl$q_=gJ7>;HF2cs@T1`7{P41qcb5+Q4XBID#+VZ z^aH(U$aZJU1D=69Q$U1ef|G&jXd{;9a#{tzkciVgaZ<$+VP%g{o~C}w!0*ZhQjwN#a&GzG>?0;D|+rV9m1*) z-AXdOayrpstT5Ura>(6z+G@16n`H&JDD?)cM|AfL6_7X`#}#E}&+be|7~r2;EEY3_ z4PYf{4LsS_Ei}xgd_H!#SeFnTRBtX^Ef9J$S3FXELq{r^jh9aOleCr_5=RSjZv`Bb8I@ijFu9 zxK}ccy$Q`00@}y><}Ad2cq>(#YZ#^sS(|GN09&D|%`jJ2U5ehg6=^Pag&9n-&OjVc z)qr$b6f>&_jQNA^;O49yCM#w6&@jd~6ryP+f;HQ;W1-DSb97ClaXg@caz<$ZPUiM& zd>9=gBcotsel&*os>dvh9%4z$E=VS$xVTu52P-0uNZLA8H@OZ-DghmSY9El*x|V#% z=9hLO1$(S`&0L0QeDfaH5tYtHar)M4TC|a@sU(B_kb70TMv)Z${FetHIn5f6Lfo)D z;`1bm4^>fAlFC}KxLnp%d16}OfX^QdJVlyOtRa*!XH10KMNwk+-*K31ca%2`MY z86XczR$-tU_HsT{3)yfgsh&|O^76U<5;7_~i#g(DeWVdeE@VhabqwRwkbg>dB(Srx zvhHG|__pzn%BC0lh}wUXSSWs*IPI&;>L#}s%h31QSO zC?V)pj@^_vRxAgq)X|{}(SIs~`$8zBJ61+h*ON>Ap%MNU6dLm*d1Xi!2`c><8e-}( z#(7mF{c4TFvrCf4H9RpaqjL((xUALF)CTSt_hUj_VlQZn%c`Q_|_x&N2B_g_+*niDFM$PIf6f zmCcsRa1;j5YFKQ{kd;Wzdz!Ir^Oes%YG{5)&N$CnN^VF^neAaLBPdrKGQM(?S zjTd3eLlSS8qN{V?ll^N>D|Tfd0CtPA$fa>=`R&Qz~o4yIUMz=qmha*+(0?$KpMi@MnVb@r1ewUt9cQl7|NU;o7A7{Rid6o2QJ)n zO=usnZgV!#Id2^fgusG_Xz4GlF`O*{19IjPbY1`1wa_+zDmX z8U+}*&U$f3bs4trNr+M~O>Yd99YHMFKD<-S@W@0m zNTa53DI-Oi5L^a6^$WL>`#=mo+8Lk=QV}S~B~3(+`^*O{-t}7Y?c$XcLV$XVRhxtc z)IKoYqJS=2T^D2WD*!rElIkd&?F$%;+ z)Duc}4hQ8`?wF)|fywnW8BChy@Ua1;SjVuaHpF_}pUSHl$%QhiexivKWPK@?XkzKz3R`>pG^RPk^)o#fP@49AU*v6f3^Vj z^8OCC000!q0>A_S04M+=B$U6Ve@<>BWB|cao#*Lvc!T;+o6J-FZ_&1Q^Y*iGcBbLt z;Njum0w4pB3!nYt5s&;7=YOjI_5QQhr-g|(Zd4TqJR3#Y$@J0~{> z7bieW(%;>}%E`u?#?r>l!Bw2@wEGhsjf1r~oj$)Rm#Vw0jlBaT(9=dcP))}w(8)^J znod%JM$BKt-^Ja<#@m9%-^JP0OT=HC?w{ZyPxZgeoOCq*xOh8>(;290(8#)Z+R*TE z@NsalKLPjUp?iwqX>BW_C8zMO2v7gS>HalTKR-VXKVA+uPdiR-VPRoTE*?%E9`+{> zcCP?eZwr5RS1-_iMv$}dvhs9r_jYh|rTH7t!qUyhTb%Cc{{IZj=85M2iu?bd;IHK$ z4*t{C+rjpKL^f!PA)WyQi zM&H5O+x}l2{<{@o<6vk1H0poi{L`%N>0t8&^)D=OI&M17C%!oUMcCi!fAKFt^Y8L+ z2L8>!zZv*91OH~=-wgcU$H0FpI2+d|+2;48&;A@E;b}bSL^m&cH+LG|CzU89164&u z{i_H8=Rjm{05_8rGmW;7m6eUxUj;*>;b3Lq<>kgHCoQW@!^6eRN5e_;PXwC3jye|J zHb7)F0J<6%zc3dc5ZM91p>6ZjXicN$V{JpjEzBj%&do(*89<|9Vd?DlpW$e@g@m}- zxrGIJfyn*bCSBqSs__%FyviOGnFNr?XjK|*=zgNpVH4ec2*E;cUl|L^kW z5x}PYBvxG<0-vBg@uj38uju07>hLs6!W_I$f}V!U-^N;)%Su4d67Z)NK!}MPj~tJJ zLV;_mum!Du>OV5aBSfnq=$Y0VkFfuW-@bL=> z3JHtI%E>DzLZC|8I=XuL28Kpf);6|w_709--afv5{sDoJQPDAPV&mdd)83_LWM*aO z6ql5il~+_&RW~)aw6?W(bawTB9vDOn4Ude@%+Ad(EG{jtY<=6_+1=YeI6OK#zxZ`| zb^ZJ1_Af3Z0Lp*Bdb<7xvi}7a;S(-oR8$mHjK8>$kp2DwCqzY~@t*~2uO8=?}Ol8eZ*Qf&TM9$5!_vJH#(EwiWTjER8 zKD$tJR(&@swf4v5j@w=39!8SF5}#_AX*>m=FUNCix1)$p0)`It__13YPz{+nu$15h z{gu+7?xu{j-C;AK1H6!)g%k!vVq`_uigM~7z(-1S9(448I*iut_(3Ylacoj^MRDGw zGTBo8p*-AD1DfUxl;2@0`}`Gkjtx%WMp}I-)DVl!G?<5GNK~t(P^*D$r$Id(7nIDj z*ON!k>Ndo(ZtVqOM2IzO#AmYF8cdMCnV<E~Hi7E8o;d~ASka^?rFZCe?yJ>q|pDsz>ZoAN_ zX*@&D3)Bm7@Ev<|2MHa$=yMAbxz>i!At;4^0N)p)AIp8j)mDUEsm@pW z#k==*mo8C5n+!ISu4^RJ1NY_1PD}>7qCTU#lfCZ?>pvfd4!x>V-I3`EKQ9@*vAh`}JKyRwvx_*Ixv$Hc?m6#9`>0``go%3qB zDU+{;==&M4xyk7vn>Z=y_cg#b3G>&^BM4mIb^)~nw%ApWFvw$4n31~9^ZLr>x(|-TyiG`(C);; zm}sxTqKPCW*;Sj9d`Kw@i1;M1ozx{Be8k93!L$N+{Anb8E<>(98JMI0{s*f9iLM{I z@YF2a`yN-Qzq)T^4Rd73F@wT48pn@RDVs| zPxNWCu+pAA>y9F4!nR~M`G(L@V;M}t$ho9qD;Kn^uSNA!$Y}DKoi$!TVUe1 zLPlrvl*RTyPa4Y}Ds=sgw~b4>RMr91RflTZ05^mv^~D6_y-UaoECqFns#J{DbPOv1OLsS3(X?!57wTF=S3!}CT)rC)xsoP+CYm~ zVOMlTqwz#RSQxP5`de*VmH4;=@i^s+bfG@<);-#RR1J+Cn>W|c3WZ54aE6|VU4c$HX%ov+WL0$O^2YNMYt!&|L>2iT)4{2|Mgj`GcH4CI>jtjF=Jy8R_aY_L6plz|aC4JVTCb~llfcc)>+Q+p&fP!8<@}uro$}=#HwWrw7oavh?|!zZr+RNzd+8%J{NRm5IFYPLt#&8@|7g zGMP58N>3NuhE-L$vfu9|D5%@)6}GGlG&8Q5i62_N5|+i?N`#0<+qrk@Y40BE0Z4=~ zTJ=*+j|@jf;T{`xJwy;GNdfebmf>&x)ZC$OQ7AvQWSOk|tW{==CycNN8HM0Z{ambc z=(22zTRQPtRznLojWCzq4kC+NYjBmO`o!sYC0zl=bxmQy_#MV)>KXlEJAZ*s)&Y4q zEa7zSN-ea%XI1Z0&XoeS;He4aI)LQJOt^ajE}*n(q#zm8dAZm@9_Jgb=lipF~Ng~>u2cHiVT{8(ILBa7s za(etrpY2R^G)Fq!$%^noz=dE`yG{{SvO94To0TB8?OW`V_vy7TxmacLs3C(!+U_l1V##!qb(2iuTiNuC!$X!YhYv(mLt0FT5*a;)5^h(OIw%e=kn};>LIS52L zjgQn!1-Y?*I&BbY5EbMO?Kk!Bz9P<(MyaHHHe|9`)Dq!92K|CLDb6}&Hm+K=-eUq~ z%SNc7;|0179ZCxd<0nv&U}!%sHUh{~0}9ao%5uKd@N+o#+)9h2extt(n*)Q;atI-uX(iUToFhvjF z$3%MMRO7wT=W{;I_l3ZrpoCEZAPlzXM;*17;n&^6qwI&b?S|vY$5Oc6Rghcx{m997 z_4C#3Ybf2&her)vHLSDvFkzSHpKlZL$Kp&BWY9DMgv6aZoH;G?ntmK$tW(z5VAa~O zaD`bh)Qx_$l|^6Oy~IcjKC;$Pshz-WKEZ8)MFY+~6)Rd*CGt;qmCgtlsO%Uey+adR z*fyt7>u0!fBsXr;QUiUzYpMI7wRk`uTO>=3fZza2wsPt%^LD-qwJIK2jxb_CY?glP{cx`_2%k#-VDcZ zV6glF{HO;EEk;5XPq86ZzC7=4q>nEzt7{R?wBU~m5#?s8@yv3Z7144|4mdCuU z(u|=+!5N}RqFFh1qTdiouj2zMy;s4hK{oD?5e{)yMIGZjzkxHaL@yqHG4XI5u2Q0W{*dKY$wrDKl zi1s`k1cRd{!I=Q~({p;aRAn_WDIi-gVa!2i%@tFQHPh@$4DA)zD(|;OVT7o;PGCjs zrD#-;e`(_698ob-X4)`iD52q~YDRI~D@e<9VSpe3?eS~0 z6SSDL#IMfnrv{%HYYkWJMXq;fUP{iAB=JDf8g&RiE?6Y;02pue-qA9Qd-#+<4au<` z>0aeX)r+Lu^Wkp@(Z-j%E9*uj6nD}qNFjdCoe^Y!?%s1j2LoSM#2-0%zL!eg&GdG5 zH|EdtK=*P`h(KnG!|S=OiOS_adV;8rslV|PHH+G- z{urk)G*V=S4eJyqeQua-1vk0qt`%jv_8Z^oMG5qRBPkfXxBdXSME(FoCJXk>t8Y;E z9crz8P7q9J1XLle!PQcc? z94vX3s_53}sk<-GxqnqLWRXob#dskUwJ#};2%5G4XnyW^rx%a%G+k`H$g~#>ggcuJ z+PW2aAf!|tK_I8NLvO8&7%h%*YC!SakodGi$mX{BkKn3Qy&ykP+jn8g?o3*SI+X|o z2nqWt>u_aYZ1r0h@;c_p7Qsvjk1FxsMV|qn)w)ytndbn-4fZkUK0KF%-3Uc8(#s)mY zC8%(1k^!2g$UgA4`>|f#2~L1s-^nwvIY!iIwXpqoDJ)3Xv81N1N9&IBfY^68;Y{}( z-lSLkt-orx*_^i@%!k&+9#;1GoTB5eEkvy{!zTg!OAaeG{io+^QDR)Hz2Y7>gg0Nj za+C(~pQ}i>VySP0WbN-#e{(_#(;fDt_#IxKG+<8CmLPK~vkwkJdSK1XHcXKX)4x?0-Ka}lvfZVk}G>t;{M^YSwR;>u6^zge6jfRa= zdut;3NX{4fZ%bm*+{rW~@h)*&DAFNPWI)O}q@{a-*6@1|H=$*4%F4(t4xD?1d-1#3 z5L)6P$u0v|z?x#4(`X2(@t|aqvzB^>PpJ3S@*VbU$FxbE&QgwTV^vAZ>7J) z$;!VbUv)drkdt4ulcrDK8Ci%z5rsNg(rDL({eGF`97=Z2GS}IilFKn%4VV&3gKK5? zl*f_nZN66>mDvBi$iG~cU4m2l1)?~LIl}lmBz=5C#+9>aoKg+ox?G=sxE*dF>(qMZUo*Ww^G|ln6>L|d?T2Jxi97nS=fi|X$4DUjQ8mSkmSQGae z!iHaw4=7M-B+Ggva!&->Havepo6K{kokEg?pqvA$Cf9yM?371f8ppO zDE`C^jA4j^s_%Xf={#Y$jcd#hJ>L6^I;6A5$l+^w*7}e~LA0XCnGENsQ#n zjAt*X{yn?qDGv-s3 z(qc$o`)%WE7_bvb7wI9Gr8e^v2rK=llN?a9;ygigQ@?||3=EYt8qW~US9B>!RcC0l z9Vy_E=204-5m3&nNDRg5wsWjLpPcvzDqYXYp1iy zxeHRl0U_+G_g5s}y{H(O?Udpf5|tBvbF2m&=OoF0j9)aFJE8Zz0fDdr7-`IdF8Qn6 zYur#Z(^Utm);Hew2v;cAtNVT#FyYHJ?E@xoz-iOGP~A?m4mOYMlOJD z(v*q8M?*`4Wwn0E$On=56dWj45%?dOuiL&D^BDFEDRAsi)v8I-E|Rd#W>FdYSc2_) zX{;jG9vthfWrICj+^g9^jK$U5H<8a_!CnR%?9+BLyNM^)BT>y+1Fr^#$fwluYFd7V z#BPLMm5n}QD`K=y2J*oLg8fB9MW)V(O9t3^-kS_Z@oo!|9Uo3G4Yv}v3negWrD8^@ zy&$MGd47#iXTN4o_%+4Pg&E+wQu1!S1x~eH;QBZ}rH-16+k|5_$Sg~KQjH{O)>^ey zr)9uFSduK02*DZxfq%Ja!n{sZi__?vXXz8_IXJ95ZFn)fEg2kZ)@SaM^-g5en`S*)_ST45^BikyxeqLP#;( z-*blM_(?7R@_j5MfV-th3CF7~vfi8A$=XXBmomr*S`$jLzr^M9F}_7?7waSs-*NX? z-tbekrWjLK%eK$ETByyJCRCrrA8P8R+0AEiWW`x2MiKZn9Z)hiE#9 z8D5tOE4;4|KcvK3$DD7H32@eMKRq;Dma^l+{A|9?>~}Qw7vO!Uu_(&%9Wh> zEVxgHat&FXghzzW9KQ#DEahG{{I_{d9hWDa`(0XiC%WHD9b3B<2|ZxCD;Zt@>zwzg z>awfyqJ>-eX5qsYC_?mQ429=Ug~q&bZQtH!HAF@u`&ugrnR;zw8!Qqu|>89zVtF15y^4s)*poqMQUzdLX60Nq%k_;>C<@7g5^uj|V zKdA8@seUUCyqwIoPBA?SOf2#tX(2F;LD~uJ$nHh$j@H#og>G8qe5cr|kFNOIe^_Ct zEUHFtpmQQ8bMGpAplB6&thEwYyyB$r8Zqqq^RDX`&Eu>6R@`R^^nJLk!hMIMIk#`s zemNR<`g2_0ZslgQ#FMG^9AGEb*TF+MXR-yw z3>6IU&;u}lmIdLYyQs$~2^%d*&lAeRbu_~uL$CeHRr#EIg+Y|!hf6d4wZ_UMt_9vN ztwUMse{BZlc=9x5V8FM;4@U@TG9(2*1*&Uw)4*Fe_h@dUhbMfmM7m6k`ag+D0iXBW zZ~Ot6>^uq`n^+A%mOMk#9S%~-HkZ3UDz?wSAypJV9A9-5{MfLD{V0l074b?!G@oQ* zVbWQmtG61=XXgaT;4S00`d%Fk-NrjBa(pJ6LVpMQf~uA7ZHwP>)=TMlx{Nz{NTeu6 z#0-ifK^_KZcny&>SRiwtf8)W#&Tkd8Ok*}OG~w^1jn%-s$;jhxPDP>HDZ&X37<^@` z3l$WiX=VakXkvSbI%5rv8%pLoEaj%V)M;+tWbT4UXbA_ISITqE_@V8wZX|Zva^>QoXZa>`2FC!A$A6s2gj3u zOJ#o>QzUXPbV%Y3SNXRO>uUL=l0LieX2iI$JJkMuoo`;?lm{CxgRJQ!uIXv3{%|8V z_Nb2c?zhk&&n{o>?hr!p`~HjH%-E1!Nl1mr6Y21G&OoG2Dq}cm?6+2% zZ571TKj@@KdC-CwQ9X_nFzc)+$e?Qp8qhc?JzF7~_=To8B4Ki6n~!G3b-FE4BW`3E z7_MIN0fsbbqillAWPs~l$%SH@f%KFBDTA2FfWtn?nQ+G{E*dr z3BVqHgx<;I*%2i#%V1n51y+THt<3Bl*tHhw5Z5qgr0^}jazJ;aEkQX=)2xRLj%&Cc%|xF8W=ArLI`zX zqj~FC6mkxVbh#==f>I`(%rZuNp2JH|*!)VkX1u!Qa3DKgnmn+bWVB4`l6j#v;0bl! zXswauGyfAD0#|*4eDsr zo6JPKGEq;vs^T!=P6LJ~OWjW?b>pTQn2?I5%cg!wr(~&k(~?`7?COgo zJg;?EpyG}g;$<3^G-^vuBOP*x8htj#w7Jm2*0^fT@=R&pH=x!Ssxv%skEFmQROh7q zLAEK#u5p`Rz&0);hdxgZHKK0*&8x|Jhv`_Co~CUpAleAO5oBS#H=#Md{H^hq&P<`3 z_dFsxP~z(2o`nN;ttd>V#$E54;;YOxy)GlYn%iEb3a?w1W z(-}qStiVqLe-h9J0`hQ04Ek04cwIC+;GQ1&j%Dy%Uh`%-s_6tu>YA$ON=1r$?_0W#s| z$%-0&cC#ePMy&}IoPxtWow`5Vxo>kmW@1djHhbdxkkpi^O%{Bsd>7FadM|h&n&EfH zVUj9dW~G`tZLpxRS@u~bkF0Ae_xl>{{=oE)xmy7s9h*GEyhLnL%U-ye4!cBhx`oTH9l{@fa91Ua+ciW!5~Y#+m9bU*C}mqeg=p1X;$WmGF=jl)9kIlP3-}fv%*a z3Ad2*qeS|Osr{9sn&?v<*C-9V}j~(7gQgaIXn7CA2%d1K^(rF+jZD@?@$|!bO_hZtA z?@B~f;5L{?DxRxwbGYY`uB%%)mS3nhbX`f}dt&6h(^SiYqP3YxlsRtc^T;Hl12#^* zE#Ie1JsBh~hIhh-DOdVhgxO_d{hOHBs0RgJz+_;4YkQG@PB7aD0nAh$9AigRkVS3s z2k?~3r0K|C(P4?u>>*(bdJdaEQVh&@>}m;^-hD$###EVQ`sRAJeg(j}2G2LK`9Q6HAiEoYSTVC~d%kr9o215}TRpL`0gdBG|%o|WZMw(Z=tky)&&<#j)$8Et~N9X=M-~Vz$LD$Lau93FGFZ|^?jN4mTRLv+dv^k6=rYeRFcQN; zmDWpbVcpqOx&~haByCCHuS=Ydr11;VFskaFamZ=X_KtCy{NhJO%~Ni~^7BZXd)v)| zh|yc_MpFBczn}E@L0t?7MAS{Ha2Pqek-?C#AWto<%ZC5{4*+ygWwj2}Vs+ukMsnGcbBzUtBcy-b43ST3b~)9={n!n@-c+kJg;Kk)!%d zdWGM_-@x=ZJPTN+Z%qV(tSrqD2cc~-j|uO{KGLsthV{!CLS(~GbHX%6 z4B;v1M0t;~7h0vA9Da(=l;Zd7&uxCT=ude(TRLx4ctKcXS>ewL+XshqH^-lP|LQ)Y zjw+ksv;t{-vGeaU=x<-nOAHdf+1yo& zzDNBjRR8jBFof>RXKkgmr_tWmi>|-zDwVhApfuSqY{$!Nl8E2t%-Nr;v+thKtL8|= zE~=$8UJ*v2M?hAD1(UF}vPdj~CL?>QIUycFznA=D7EV^k1Y~S*J4V-z5$J2%l%XPx z-vW9L>c%HGEE1%*8F#@U;$D{%9$lYFBUCD}cWcmDVJT+gnmdByo~tsea(%uhKS&e; zrp#G-I#SIkD#=Z-TGTg0Jv&A)YIHp)2wSQsrZCm%%9_jgBsNsIv=a+B8>_%F|J-zQ94re81hd=*xrQky5A`oIi4{AxwCfs$P6I(kCUJzpjx>x#z3TcEU|iZ07` z07@AL1qy7C2P0A5J}NZ%IDlr?KTg*XwIdn89%G6@fw>uvK;u5gbB=iyKdX8t=sgIB zs|1`dN;x`IxV`w@Brj>8ufka2{b9a$-cHJyPD4*9n1DJLuE2ITRgKqY^EtVhtVDVu}Qa)>%8V`sZg znJRNnVf-92i3!RnjhS!bk11nim9H?W>p`OTxOy`}j+&qU!=WpB;HP1M#1FnH8us1# zFD;kU4}pnHdSrb%NJ<}nT$2q^@3-pZ3X<0Qp6gHte$*CyCw9-IcULVmW%cx~us2X? z8|g;6O1@L{g6lc&i~v~brc*tBVEihV6*By(VSv;_02X@gU3b=1={_l6h? z_kR+%#Zh!jt@nxj0d#AujY_N>kjM6}DXJ{8ZIq{QU@vkul9%B2fg)5*bi8)mJr$t8 z_W8Ah;=4&!pgY#Ts*2Nlb$$2AhQ+WZU>W)WQ2ztz-a zToC$ka6z^yJ-$D>vl!S|YZPJ(Gja}-6_E;aKzQ7GVVt1Vdq?zS6S}NP9mJ8~Ma;@l zW#|HzsGpo7ARw#FHOtW>(gb5nno2 zI>+h)R2ELXQ&*gAh#GQ%R@H61W=WfQ=8hY{=yIV{dM3r1#IIu)B(0q(>0?(5T-Z?o z1`-sE%=gjG%S+Of!|J>Nj5xf#wOKvG8*C9j)G#SOwtCc)A|{Qqvse+n_n!tF*+z|s z#q@cl#cp4#Rll#u7ONJ0-K}`Un%9KuAp+6IszCD4GMjW`T#aG*VPuFMd@gYyv4+*{ z*od4k46qIpuG6B=Y4aSj;&bd+P1<;1l@{AwzD}qCZoaI-p>RPM@4vegA9S<-d7LO@ z>RTzaUnWIcJVLvon{72kk))|T1#Bzl>dauK205SrVAMN~6Q|4j&0HB+gYNj|PS^@_ zC+rRMOEd%f2K~>k31Q=PHdc*5)m2ungavZPAfRsJ6 zE}SVP>g<_}5tyf4G1I1bz-Wn)^W!xhsjH`%9BsZI{yM7Onew!k>-V?9t_}$W-8W<= zNqR!mw}dqrKV1#R2zy{JW6K4rP%GaTA#~Njc=J}=V+8E;3X0;6m#xV;diYd7lEu42 z6QB&a&+rIe5LStcH)}1+Lw3f)PW!JI;0*6j&)4Dvt>GStl8B0zmW7t9ew+&EMLAk@ zY!J+j8;tEgfQ2dtT+x>qc$AhIav)Tcq2#D+G4)u8^%(r<8RO~GHGm4GATSBoUr?iq7)HECX?}6 z$|Vd7`)s}S_#ypW%91~Z`BuN@DKG{(4etnyyo_;Q2U(MQld`*2kjf?!xUlWV&coM>LC1CI(~PoCU{ki3G>HO9b{>P0c1=DYN^mE0)1IZV$uN>Zm zcY}7U?wyRgOlK=Hew0cx*rGD`u4FL+y%E4UH4sD zcnWNRM^7Ac^lbXn`)=t3nc)wB^E#^i4x>`Cz;UlNSc#H{W>Tz#Wltz}M1BWlcJ}5@ z2fu!>cp!16Dsyad@FqTIx2~w?OrWAODjFThd>ON(kR+*z8_C8&j>pf0s1&i3`VtPYAsM_W%h1>d7D@{B_Ai3+=D8d;uUM3qi34i$ z<|R8S7)9OHJ)NM%7c#xptDTv9SQ4^uYrOf-S`7lRms?Op&omhiX5F<&2NW@w`M&rR zUw94#3+0V4#mJ#PyA?|z&l??0xZlm{EmoahdHRJRaO5*OL5O@$aBQw$PGTg@qp%4t`U`fYqo6f?CBKrvkDP*`QD1r_GO_SD&)r9yCM+H zC_Fsr|Ep4f*@R7jO9bE%x!C@Do-xTTgBJ#$k1OIGo#C}Y*~JpQZ#CN{-DmOFhUH+# z)3?91{G`de;)e&NDo}sgL;HhF3AzVTQWQH6#fk>oCPU&uDV}&d#Fd9h-;KM>;>Dz^ zYH4+;4;Yt}T6V?ZMK(WC-8B+cDNT z<58$YiRG`cJW6ds-<_lKq>RyFqK_mJ$otA3*E=#XANOm?#6@UOE4A^EK$^X6-hc8% z{hIX!>};ls;peync*c#595PztKvuJ%_V##UQKG2=jAx~a^JS(zq5b76Ti*R zl+DILn`}3N*0+8*r(d|<_wdQu0urG%)wreIPJ|4lE|gTD+6)-D4Mx-iyOkO#ckncm z1Bt9mY?|*swiYijCHXQ*66kH`5izH`<=VC2f3r0vI6^V|cgb9FUUh+1U) zZi^dN8)3T$Fiibboi~9Q!!V}jzAL6gYq7BH=nIk5CrxwgX~BxP#X zVjQ9a@ZYPl$kSoezE^}Ts{9<}jBXXq7s@cO8uyp#Nkw|}86Q~9Mxu}Sg^{L4=Q!}$ zgUAIVeVe4B6be_EaoGmvjHC8gSM@PQh*-81I*&vkCzmwL;eVILnQf)gF?YRbqxz(x zlhI~1YG*Y<-nW_ZUGp#sa!w9qc|Ow`1$c+{e%$Ng9MCKD;FK6j*h4TD1*=?=uLx1^7fZ5}kgJ=EkL)~>uxaeD zsbsPBzOIloDo}^ayx7rOHMKfXC`q zzvytqC9K7vwn!(I^83gQSBS;wg!f<#nG(H$OL>O+S{On`iCf&A$0_xUqnQ8gjb)>7 zOV~SoljD>(p=GYW-x$1wHU)`>*yIT63H4zq;Lmrjkb*qSf@RS2Say9j>c5 zSQKciO~i?P`yousZljfb{#6i=$s;k{OEE_DdjOK<+h#Aie6OijcLyOYV7yET16eV< zatx)K_`$cHH9%JsVjP)FG6buMX@CaC3Ra>gnYLw%@~X{1*}Sg1M_T^9Hhrso7Ug8L z`q;2o(41WN3l}ZAc#pK31;w~;=Ouz)Q1)a~Ki_?8VJqkE1XpUg^R)8izKYWk;bKvA z)}%``anky#Ah6A}7H-r=$fc7EBPdKqncM56n5P2HQ}JLKD|t{f9ajXS-~mYPypd@GGYi`U1W*)#wiMc0BYqXpGHd0G?m8r&IgF{_;IS*;vM znOCbF)^mvpz#o87f$ACXkUXIfhJntHx4eJP!e(M5!dm8!!UDA3J~V6|>XLgTf}jc_ zm**NNXn_F`o#)LdI`vYiGsbbBT@PQj3yX!umU(tUP8l4GEk|03t?M*X4Ik(*a&xCt|Lk1AF=#SOX+c}s&Lxd@B5tDNgkClY*p zMMl+`@8tSH-KQyGYbXy+EE~O!k4!Bs@=jTF9-_pe3dBShvq=jdgbq9V7{`AkS|Kz< zRMB#_NM{INq5_dIB)q+YS4sh)&V3CTkfXl(|@uK&*ZK0bJWk zN#9Ce^Gk962h4j`Wm7Uihy2eKA`>ODp2{p_NWwdKB9C(3wX0)q?l^wWe=$_gC?y!L zsFIjZDKz^q{?aG^4}gJ(n4CBLCVn#r2g#Iu{W87SHUD5r;M))*IMqEclx16EH)ZZ& z$D?caLdBw4k=hQ2vT;5B`Qnj?ceN%NT{}L^l?=<^=hG%xJ!Pco!|P1VF+c}2L$Vo^ z#6RCb%~{wtLN1obDrx-QpHA)#TH8Kj=XO#9|0+h&j!sk>S6>kQYI-j!!8S@gD}(8* z+@0sy{+=$`3ah$CM_k>;a&qpQ^e^of5frUSIda0jT!sEW&3 zHyBD9(?SJPxf;=uXo&E24(M*3CvUW!`G6$)Oz3<{_}HHBSK{BNBE5oti~zcyn5Y<$5;s8;@45n zlNBp}@n>1H7=m=|hp|&db!G+zlEd{GMHK_uJ8pdaFX}QCH?^RJL|)3QN&%$$z~Je z&(9*Tv2IbgDgQNIi@iO~SOC!(j#e4~fP#mTR37<{&4_+m)I+NznzvO| zedwWLHMj!PNKTEJLW9nTdLvdZ7>C)-$2m4A3~)!*3OayI#< zX6PlG)-s5tVof(;`4y%D31nz2QMi^fWp2FN`n6Yn$j8g=4bboQa(mAWBm0G^Dyc-Yo@*e-`eErNTujgViVSsB(A%_ zMY2&4ORXS*SK_DrKY%*ZSK+NMQt=z{H^%I-<)7a#!!A)~B3V*CRzjWE)}ZGS+72#j z<~phriWwZf%tuQ=ts}p7RlW);6SXoN>zx$XUL#sZS2K??s{kG{k(y%;uh9#)inci0 z^Ibif`<{LG%W7`3*b=M89kg?-gyPgCM0Rws0)U8Ify?jLef69&dG<535Lawqd@cn} zs_|I(OL8fIo%8GT()hmeN#hCjZqZ4WAa}6=TTw7`WHW5mb=UMBDJd6jclYTba8x!S z-jXqChoxHGF^`yBh>?9*yqdtT`2@qS>RUydp@g~mr)j+xcJd?GihD)6Yjpok%XcbqM&I7gO zV!u59Bs?-gc6xbLPk!oUzcG-y+p(G{FgIl+7Uj94xLc)SUl<>|{4 zBt`kntEK6SjYxf_X(eS)(mCZUG_+W7?^~~+g+vfS(0rn|nWQw{9<7Hv*mi2;n&-Pu zJ^tw90#xU?DOrn$-gHi=z8N=ediM$tTPOfnpU(gE;J74+P0S8-hcm6ilRoIOb6E7b zYvOslIsR3HG;qv9*%(ERF*lF5Lwa;>la7x9^4QdWRpvBOaR_TB@v~eFKsg0&Hy8~i zlF^5SRiogz0&S~0Ncn6E19?panc=6Gbe!*K81NC^t zSeOsYnrWUj#s+2h#+x=_^bYJvf;BFxt4u>OAD@9yR3!($=y;xAQHJBtOg;EgXp!$a z0aH18@5So$BE?pMtDfS_sTawB9PK({1MrxBu;Fi#D&_k5Y|x{C;5CmCceSx%(-8+l zAL0_>7wjZVTdqvGcKrgYc_OQXqHX__NqE9XM4>Q^0Jb&%0sy;5^wcjteN~GJt7Br| zDGbFg^M0iKxZ?M`eU>qP0;HVRzZPecTsU$e4XU7koc`7th?f%edoK*Y-_>#Eq|2ur z<`yO5op3Rh!K4MFgm4AA)C9&oKb!&EZ^EkpXq0||dUd}ly@w4;0ahIcp`(xKV=HSZ zqnFxKMb}i9A>TE)2T3|7=a0PLp>XQSVnj-c!med=76PBKz3{T~;}rA%0H;7$zvp@9 z{h>~tN4b-6190a*)9F=09Cf0^-JE-~%AQ}Es)cY}#zKv&`qx$P!p$MF*0pygMz@2@ zjJFEP$CLR*{{W9yC8%tSDlQHNOSe0Y2l!W@=yuR)z58>_jq zk}>zLd6mtJBeJh76d4bu?zpT^5XI&uRRNMkc?&lMKaQ7|J@7^lRpePPHEG>~I zL6e_M=AoqZG>f+8&arA@`z(c_x4A90EBMppkpUS=2VL8Xox`>&OPQvM$)Ps_d0&JJ z^FIDL9;1)t!=CxBuG-)J z5}Dm@CTMM-f0}$`vacZ*)ri3-u3EYc62{w2f!9q^praD*a7pL^KIaE-zItScm3u29 z=wQ@}`X_*s!*l(j=^@m0WC4UDCOcOh08@pLy?a+fsja#jn`y2Z>gFkM`5z~lhD;Ck zMltnc=}dvmyiqeSAwXf0aguOH{{UX0y@)T6=dQxz10duVAfKZF>%l#Zd2*bpJx_Wn z6yszv(#A(U-`@o~+5*+^kg>6R+Ybr(}2%m7STKpEDjUeCLZ{0Of{zel?Ba4m9-qI{{Xye%C!w%^Iw$SY7;cFZVsWkDfX_)RGse6KD5hv zzPC4sZUW(LtwvZzrv<&wzqLfWRxc<4w{`;#{{UL1n{tF6oDe>^sR8+cJVYNn*Kx?r z7%oh3fg^c{rx-O1a_{ocmL>8=2Rzj_y4pfBVKb6VTCwWbnfza?Km=*fWFtBC5$T zwUMWC5u{uwUQ2u9(t|WfBOT1exKW&*huiC0UKF>u)OA5`Wx5$&CNo_q`JlITb#`2I zb`DSxp1rYHHd)s0*l&s$5n4>j;UkR)@nrQRV1bPDTN+Ncs>5$I9v>@ew{Aq*l*0u; z=OfWU=BCA^W7d3O;(b5BmR3+3WJ{KdZLNi(fj$pJRSk?T2qp4=+N^Ce!G0k4sjISh zr%^r<&`hIljb?XU%s>HfTOb}Ylk(@H3g^5Vro-X=PHW3+hrQHU+Zu%H^SN$JoaCs< zZbNSAU6zY*TTx;2&2#1q5ON9rXY;3P7tPFi_k(P7e+t{54rz8!X#@z9)mS50kwD6_hWO2=&|GO~Te3<2Jt^#FjZJplu?dG*x3TCA#M-eTGYe(V1L zc>319r*L&0BHhHhC6aV`V<+yh!m;{w81@yLk*aps^8Wza2V1xCAA~$1aSf<~NdD7n zEGoY?GKEQgr#u{Exvw(t&x-9dT^Q;YQh8BH5MUJUXLaKP>)ZNQrT)9xL&+?~52c z%!_SmueuFBJgO)-%Q+u;kK{k#M;K87BvJ<>B=v6J=lYs_p?t{qt|JVgfEmwU=$WOq z5nYw*-&L>jlqn(V;*KX+&wwx@ip3bTIxR;X)mMRuo{JwDuQv0$qN0pMQyHe*8t>a7&zjp$dxHQk3MunxS0fn*{!UiJYkWuj8!iRYRdqdO`PqwpO|0!lMmq;>_8;@is)`& zXIb860HXycf1lE?bAqC3M=5b^H)JtTGTl0z)U(UEKvF%#6-<5;6vNRIehVIZmYD=ASDzh2iq_ z>9`*@*2w%n#;QTQ0CBq=s_vl5_Gby1GQT&cPw=V`$+f^yxUMh6a;7*@wybBS zYnjI8(PXh|k;f6*Sg>5+^sO&BWo(60BMkL5%B{;l{Q-420lFI4(k^3`U5z2h7zB!k z63&-TwuWX?F)ZDc58bU(Vdth;Cv|Ng9P4V<0vc_(>n*Ra($ajLP34 zDB$t8ocHzypbM}YsO_Q5q&uO*HV!Ixl_Zb?OWVS`n>|lY@T`e%*eQt$ynwO=9PJeN z?&NijAQxjU3C~vauDGL~MVNw2?{PLVcwNJQwXLPblTO6(ykm2op5y#0BI4G4iG&Rv zd5|z952bV(Y;35k;{iQIREa_)GQo5_!Tzg*gST+wujN*)?xdW%9E2k|J*syS3C-d%VhG^Q!9lT9Mo3@*8~;7VtE~@48fTlO6n#m#LvM0J7jh&D_ym1(Mc!|0WkL%(u0YS;IL1Dem*x=Cs%1fb2Wmrog23d3 zJx5R}v_f`gl-^G!fvmE$dn5C?OrD=F_#e)%#jJ^~$Cid%Hy<$^S5xCZ4<@%}WVFQjlR$QMvl#AECwk*JHKxdyHquHMtNFQHbYX@y zi(*8FadP8tQY(7a$gQwgkQrAh4s*jcGyedO3LCi5MP)0T9GqsE=E!@0(stHrW3FR)wAUBOt1-K7sS} zu1CVrt@n&<50K?mm5SqW04{5+@f4C?&3Q1uL}0#3MQX>*Cci!bD? z{{XplkIr$720e4dQMiF7iOQ}+Wan?bG4$Y8oc z5~d*#671c8QzR1~p*iGN2dB35y7>>24hoVwf1i4&q$xjib-^2mWIT{d6P#od$K_I4 z>KmgAbfRdAZBpYZN2j;eov2y0ynz5`bq+}%bb-!C;KTK*(nN(8LK|po@#*X`-_I0CAk3UU8l)QYYWZVIcv*Bn_u3$hbg zjJswAe=w4Az~?7E{?)l}sYi1l3mTP-5*-Q4a6hAdd{>=TlfBXE(ZnlQ>Q?SJP8vTs z5Vm(W7!Ac*H-bO}L3cR}R19U$1-*vERs1vD)&;HGs!Qg^9SSz-w)7{3s*J5JaoEaPXr2PyO(UB zFdJgr0Ryj5o+~TF-ZT4i%V(x8AY>v*g4KFB^Ji`Sh+jXdD$K8(}189C6nN+O%%9 zxl-OcYjPtCg)Vv#$4|wsl^p1Z-p9>984cV}cp z1PI-^J;9}o$~a~Vw0iYFO0o;rEhMnRorRWN!DIoKgToPzymANA71h{ku;1Ai2n!)d zhmDEoQbpb|(PxHOZn zo4P$0Lb&-N-!PWqUZ_R_x2-9usp)KQPbG^{*p_@;J2*Di=$L*Nlz>8uibH`iL_Xno_aI@3|0XB#c))m5$ne z@y>i?w}gB>@dlN32}y1hriMX+5tc+?$LaGCUn1Sw`L7hkSV)6wBc7viQh!SOAL93i zuKxhx7uO;iWO;1vO{bNXc4G^bU8jyt+;d+a+^l$JNkLXEwTUE|H$kOBMLo!D zW`ZO0#IhL3P@(bi|egKYlci1(1W$nM+I?}BC#1|(Y0HBLc{krusX{hKPh4S zBzFXEQj}9S(GZ8>O|5F9rlh!vEX*9f93m&*`T7Jf&~QI#hrBP4h3 zY2HJ+j#Rlg>Gx@&gSK+n`c`XGREezD%`A)tWy z5^4!8)owE6g;E>Zufo1ehC(*X8!8u-H4Wm)JaEhu?E@GCuhyGJYF2h-D2SP(O@b|? zCIRjBt7Sa4h=v6OU=#S0{YU3j!HP)~FbNrON{+QM&e6LoCzzqKU(k9Ve!ut4WR9Wp zNwPT*fbfw z{C`SxL8Ne5hDgXm(y5ndBZ#ToGlDygN&f%}n!qxUTpj1-8LX!g;~>@}G1(sE%NnBy#s+q9 zF?b=wT;%lYk?l%Mn<8lvMTJb# zVfq|&9`%uO;7HOGAc3^*Z1d|^4dclJWebpV&w9ylPUXmE&PdNS&vRsioGSuY9oWbt zr7{O~kTGHye|D@qi{zCN<(O_H_7z!HS8zBP?hRaQ_Ziq|dX$WStfX_3j1AwX6|-Y@ zb~nkD3`uE4Tes8+#HjN5a=22-EFnzpPU^4vb?$mi=#y)j86+>p!y^B8TQo-zGv8ya7!QBiH~ zRT4gaUBP`%UTdq-?CkF?#>L>EDoDp9k)Ql^zbfTkH5}@Y|W!@`!VC6b4INY=*ln+XvFMh1Z0&sB=@TNRpQSaR%9`djDW=R zS~01OSnMjnx}0LQX|ocI+DYfc;zGy{3Im^C@IRGRzky+dmIo(4DD7I4F7LRdWF%uf zv%CEMCZUoiVBz`SpO}g{4K8NcUCAQtbGemqwDqlTvayM#zzTybb6nlwlT5x{zb@6> zTeDV#n(Jy71{-1JupwRK;GfGiDd}Nym9w zq>u?6`Wn|wG9bKV;9ELWI{;nq1L`MEu7ZcCMsLZQz7fOC)OTUih!mnCG3 zjow+{jmIC2aDF(^#f{h6;a}ab``xRtnpr&62Lccd?_S^ISFbEnR@3E*R3#ZPw;Y36 z$|WftQEhtrIda97bB;1<=9PTb^Y2jQQamUp<@wto$#v=|NLrhkt?S<0tC9u=#n?Iz>q)u4y5E&oKwUu~95;R{ z)?3^2KjBH2aQIbhg7ZmOoml;ahft z!RZ1cD_+X)m*f-&tY|#GHiR7*_s`>4 zy4|t3jUxbp&9RZ22+DqJFztdm4l|RVn1Q-x5=S=o>vS%RtQ<>kGAcYHOh;lXB8Whk=6w6q!5`(0vC(RkW2`i;INv!B#>@Za5zG zyi&1?Ow^wmgP8!@+j7V{u0RBTDy-0?g^Kb=Lt3(0i&eMs1{nauj(B1L`u*R~)G;V+ z!A3ETam7)ck0VO^nQc0Eh7zt*hH3E`=q- zAm0?K%H?{g80}pA7YsEtKQP3BhQ{XsTOfT$A54$pG&t#FN^I=f>+CKeo-}4<`Jy?& zUrxWRVQZK1-`|h2?l$p;0ox_y{_p+r08n}n%|k8o-bQWU20LbK$-Cxu_S|^_hDZB9 zlCC4RX5JH@l=1DJ=f7jcbH)j7Z7H^77g#2 ze(j$qG6=?h&(^C=Y#?!kW3`9Md)D_vU6hjCtcZbwM&ok=KIrwRw*p2R|>PvMJdi1Mlw%cn5|MXzr1BhXDzg=6OW&EN{)h_ zu{?4G!3qX&MsZeIfSt^woc5_8`PmF=3u7FbV`RvmX}W0`?mIqaZc8%ckaORkX+*A@1{aQYou}}}ugm9rGkN3m;1>y_;o= zo_4WTA-!1i#d3OPt#M-w#89|In1`EpagxU;@o&%CzM9fgP4Q$pwt=prwv(yu0gw+e z63w_szum!N++)zW@R8 z#AEBhuOaa7ihjnBi4%DNW0Aqm=JX`vHS|}7;L#`1;k(nc&n6p)&zNNbS!9zXS&M)% zoq&W;s=J8!k2UDc9CS$hW(C!~$ewVE^X^dUr4LV+o(LsC10?f-#e7$;*^4btQG;eY zi>XMGF_n`nU2u9}?lt!Zh;(naSS8iWhI@3}&k<6)l~kx3l!6F2K8GBF4hPBK8N4~C z_*2F+4LS{w-dn7P2P(V;+8a4>@{E=}&T~x_Q+u9p?Hp3I!9$4^ihF(F7D30xo6s>LBNLFcGYhFG3#Wd8sI41v_(Fx}S*zJoK{+>J8&Ig(E@ z6Z^u4xby?ouaxI<(CDvpNv#$sGx9Ml<$y`w&g^>zAC*_Ov0X+X8^O9Y&zPmT+6N;& zxcqtTU9F|8Y@cgOhmtuOM!9T(=H%rcJ-+Y_j9~Fx4~qO%XKggLnkn++Df|0?zbiKZ zivIw;#!1FG;*(;f7IT4O(yrRy?E-6dY=%8lRb5iwedMU55Lvm!XU1AQ4CRQ)&owkR zK4?Ta46H`wJetNwVu?~s(X+D&2M zzS!0F6c{}{>uA{XD$3I$OF0TCnlg{rAL~mLaz>0u0ds}V$|}r7E@R(?!sl{;Pft#2 zoxlMZBytya32d+)^ch{VI_57gdx4f#`EoIut06e^5t(FW`O_R{{Qm$mT@;qlT!RBi z=1LpolNtQ$J);(nA~NNWmKi;Vtt%Zco|=^5<-i1%1C`Gp`%!oTmQ#)0Kl=3;E|QX% z3{L&bJJpFin}%~4-V|&K{pwB1CH56sNSW1qRUFbNp18EeUmf#?C*R<^^(b&pY-}R!W&sH&+|_@+gzdmRG1i{4Z5xX19nYmcPcL$^WIl1tQUVDJ zqiqC~Rl|DyewBMi)Ed^|P_QaSaynEk9Ae3?5^vuL77r}-U_%`K8+rPQst*0R0aTs{ zJ*!)Tjn6@_y-=AWbZ4jDuLQ3`eeqoWn;^AyQz&R88As0m`X74Ka#SmAAoTV; z=ChJ8zP2sen3a%7tgKElrv{|Axs@+Q)x(p>L7utC@~TSA;E)O^ZWQ;WTlW(eM#C$D zTOGOo02*6t9LIOz>EyeFHUh8!V?Qvil*EhXC~OSpB~PYn&9n_lYYX_?u?r?T^flbB zs4kr2b1Yflw_GoOt!ox!&VgWxK@mABLT+~B10;TY)^+{-7A1FLbDV8KimYx7@CRa2 z)dKUMyxg3Am6>s=D#^Kmp7I6FoF1o2ewWGfQ3@0ABu>yM>Of;#SU8t$ASo$oD_jGQzmKb2eX z{;?veDI!T*I|_Zq86zX|?^;&Y$55O|%s^9GGSGuv!paK&Hm9CKXvX2(SY=1KPCFVD5f;C>a_X*V**Zlw-mA1-?4yyDVB zKK^+pAa}0I!}Ixe#f)goc@e6C+z(HB0Q5+v6Irf!W*lRlYR}nE_s{28x^AUwBOR(o0QGN1f();J>zW zfZ&Fd5-@<{k@Bk!=GlDx;3*tGXxPwui1=`PWk!6NM4=8T>bA$B;x*~B) zU5+1JwN1rYn5rfTRtE=fdc)H$-rn@Aa2X7U$``54X=+LKYjZM@3$qnynQ&XSN4NOY zJ{wQ?n#CbkY;Jrg$s)Rz#y4j}_D#?v%B&exl|6E#cK-l@5But>ydG7vvj7(W;AcOr zZ^dk5lGv++iB?8%$DBH#&*zWN+MTVgV*)nZndKcuF;rzE6Gno2%|BGOWN8V2y6glM z$F*}UWb3HguH}kDIwCXhxY}}loQ{>=_&Npt*mRgHeWEr8ZU7+k=qk64d?74WHs;vx z-p*PEA1kN>01kwm{lV!&mA5J}a@8F6n`s@>$0C9O%N(2^rYlNII2H&LF2-;9>645F zBRp}`{&mUemyO~5MWdZdTdwWnIVyg&+{VmqjHp_8yM1Ut_eEGJB!t>K4C1wt*zCdF-~i6} zIKjssrDAGoEu6~790ow$`Oh_xrHu918Mk0C8QYr95y>pl1sElm6X}fO`PAt%NiSWk zp1##~2i(r<2?;I>ZUmoy&bBDhw1Y54j0t1`Pz;^f^r^N50b6$b;N?2-5A~~7%`Tu~ zj@gPBfx_+ky}>8g)vZ!9467OfUKJ|&Js5iagYY!2j9RlZF9N|c?^1BSo#@lldC}oO z7|9@XsTMYoShvccgv)g#`c(EvB#uH9gir=YU&fq*?5Y=Uo0Ed$Za)!^(0?yVx8R*( zFAR9X8;gdH3rTjvEX)~##?Oy8{@Jh`dJ|83ZT7}UwwLOvI{o4?`e*1z(z)q>e>{VE zMpof_6O0gjh~xFFrxj8zCe&oM2DE~80Ep5UrGZrt8j5)G^U)0`dNhuXd_uRa>3)`!_clNU=< z=LgZQ^y#%3FSOguF|95IoUqQQtdnlq6Js!Qo}&wrJ6ETCF8Jl5_+krfGf=p6wQcC~ z$!Q`C7T%+g&&=$8T%VW#4P}1N{uR544~lGeq}x8vc$SZ!G${Z$3z3d_0DW^_Cvkk5 zq)9V`BH;r_L!X(5U!3IfcpUq4n)>W5b%>|U)cGt$7lx~4vG%-w7=9jGL1i>PG1Hnm zyLp`1+zAAcDMras}r4lxTh^8h)W|cLt{nSb_Yzv}ZXRvQd8EC#Fq! z6szTLh{gs$IZy(gPBI4s;GVrb=`CdY5fUN@rWsakfMjRX3=#)S*JYA2a_2`~t7%Q5 zTir)^iDw{~%@Vs|TmiB6e|s5B{{Xx_E1tVgG8`h7ZKYRkIA!TtG2ct#KMzB1B!*l4 zHsPeSj#b>=Syjjx{`MJ2>+&i3V>D7b10xSHNRwy>@uQf>l)FERCyT?Yrouur#Fmjy z%!qJWKcf+o@6B^tLn9pgp>T3}Cm{a-N_?>!a+|j2o!>AgcX~!d%Vgnkn$G7^R@j0v z8w;P5E-_7c1Z<3?gBjc~J5ra}#{)R@6y#h5c0VCIKN^N;S%|IN0IIXdHiDy^lh6!z zJ*pd)AOwdhy>cpl1AV_=N_ZltXt+QZzv-9XjCuzSW-Fv z0KSLiRxOHLMI3Fzz?)S1`x@CJ$&|UKWTLg2MygC{HNNC*ec~IR{CcQdTzN!^7`NVB z4l|YG{{S8X@U1x%OKwr5&RecNP^W7SJ7ngn+}p`_9D+FU&d|Vjry|h=bEB9+Ve_W# ztMt!WiuvWqkhbDNjE<+)pim;(uAuEtyX~Lwq`I6=Fqjy|MtW485sDn@$sd*u4i62C zaqm?OF9;|GHjHHQbLm$UM{#b54%GyZG0>iwH9?y)54_H&3=i}F06vv0+_4S9$tXs0 zk{;xBp_Cz2!2lc__WD&)S%W8-F_1BiMOynum2d>iFgP4_H8(?g>_c?ybmm>h1(Y0* zr}&yk6*%~ z%Fcl29DUL2QeBBU;J&sh+`9~qYMN}SuF`iY z!P`p5JJWK~!!uhbQ1TE7QoXPIhFC+tGAgXyS9-c?gW$9-22tVfJV&DceX<@9ff9ik5-FMg`$auPD406=hC7d z5py}e5NZo7s+Q6)c+Orxxb*A#)<^b?%__*`Zt0(Dg6ibFq(sbB{`PszSB^jq(!hh$ zHF9G})s@Sn^0C6MHYFr;n}AS&PTT>W^-Rq8+q)yT6yOR4I|7zC1b3{Zp;jihl%&C; z``wh+Nu=r+iOi3JzzmMxF#iAw=c0s4eDvX$s`sh1`PE8pV}~Fdk3s%4q{%%9YJxjw zXq$A7y$Ix1qFKwaaddHyQ(R4z@LbC*jLnPzSJ%eTg#+x}2lvfGWh39$8kU*%fAx~Z zwUB4-fsLalB=9{e2w|@!hl3rl@|2ex9s>4Fa&Q?Bfpp4!gk3%g3;&wdVbQctcut(DXgMI4G)1>8Pl*?@lze!kVR zAtkxx-Xy-a*Y1pp;KId%pg6$C_<>CDPGj*DL~Urww?@VVNq=W=BQr$1QM}W%dns-{ zxX09r%<$dVwzydyRRduF739}nLybL8L5-4os9BT+5tb^6oRDN~-=5w*s!QutYinYo z{bFR2d;b7Rp`qNxB&xR=c#_;U^=+UYI47s^R+YdKWvu>ttw?eKo2>` zHRN76@DPwmd26~!Nx%VdyX5}>8pjV?ooZcr9IoikgAJF8RAe78WgK9 zW1Qoqa@sz#Hivj3pK!M3cmX4?9lHMj z-y)`nMhfhaLlw^+y>ax-Qbm;#%)vW}ROJ9XbCK;=E#`KfS6z}u#{uwoBN!OZy-4}{ zLkHzbZ4Nl;P$Uwy(@2Bm3`Ym)&OLaiAT)G6VmKw0Ci4t4FP6k`S3ON^-80`_%Og6O z9fYJ~?-R>@q}MlZ0>^hFN*ijSEV<(~t!S>2kN|Cq!V{Cn`TZ&pl$&LbEsRcuGF8=- zW1cE>LeKl4PV4|r8L4e9;hZ#b4U>bk`=3g?aBZ}UH+lGyF}VTm-jz;3wJP|C$EI0Z z+eF#A^0Bma9e!i_56-x!xG#FBSERX(hwz_z(rel8t|gjm86TDz`=ipZWtpSAkun1z z%VZuYtqVFY4fvAh!@eQ6)a;7+_RYPcZjvGU%6*UNfnQK)cedL0j}_DmF5$b{(8_Vh z*mtH>MH>g(f=5!~u8ExdxuV(yO^ zW#in9kje{8|2$ ziiptyjS$bVP%C>{5W| z-kcyFVn+M`*~WX|AM5%WV!jW{_d&*MLE})SV5Eie{$e;Uv}6y|epLmC;UWSkQGCW3 z;PcN~O)}r@7mzV;HA>{RHva$%;{Mq-R&_fb1ZOm{7xbA-r z)m~_3Xy0?4=-3WA5$p7=&nN8`M2jS;h{B|}3J+!J&(mqeYMXdt(&R+WJV~BwIJkiP(zfZ{R!CSe8fox$RRUXiSbc+Tqb+0Tq~T3~*1H3PV zFbgB8AhtLooEnzg=GtPU?Hmv~pvH6b!qeK`HIpVy;GAw=l`SHZBF=RA(ccP6uno={ znD+RZB3OZyEmt1F&XFNjP|D%h8mTOe@)a073{!~7mLDS1bD>8eI`+*mBv2y<0G9{w z?^90`3vgMF?#PRt_!~#)1yPC0$C5)5Nvx)di8H$vBPaduT67x_GBd6=;9zo3AK_6% zr)KtVyl@tz%yR&!yn&s^y<=+{+>A;-QLw4(Pg76Kv%Gmf|xagb@aGDbPT#xez5y@o~`T?haL2A*vEwpLa9MY4rWo8vK?DHZD%wOXnj+1dp~ubC)uJ5B`c{uT zaY-moGiqm(ikc`q=T=dSjPhx->dbASh&uhEH9lUzc>2}dIzg_oq(Uf+YD=7j#WOF6 zHUb9Zz&|J|exKu2b=z}uYrZAHgl-2s=Q+h)G6L%e8QA$Br}$J5v5+gsxdpR~@so__ z^Q41Af5I1QujyzeahRWMk@}7~`f*)U7h(w-7}Yi*`IL;0uhzWB!#7Xi{{RxrZ!i(0 z<6#*D5_%E!?Ova5BePd>ptIvS?^+7RrQW%7d!$9C%I=UV{Ln`uh}pB$uJ2EJ!_?$- zu~=>8A28^vf(1P%SjtFL{Ko{Et#=%1`;ts&oSx#UE4deDOu_BSsgDHkSbF+Y$o@*A z#y@z+b9&;ZWb+xzZdE)lf5+=k`DWGJlEriKu^f?ER7TL2mMpc7)R5$-+figgqu zbixjMpFe@HBL*CGrYT1xl>w`w@i&96{4nNO*hykrYTc@m2m3Ak)Ai%tx$I|>pl<2U zU-Ro-H&WP#bWF35RY6=;h(a?3;gs={*wrOg-ab*qT7`;~!5=xv&p}Z28)=(S7-DAf z&?}LiI#pDJL`OM{oSr`IMQr2?LIl44QpPt(ygP7VYfggG5yfTj1l+>zjJytXqDAO(X@YO*^D;U zB>*2m9GO4ud)7yV^z(0M_cJ!;yJI%<_m3cq{(qsXG1|?p>X$-ENR}evIM2!=QM8ul zzaWpHu9&WYEea9emhlmhLx|yr%yuWCH4;NEsPe-RNVc?$W|NbQllkyDrs%hcs9x&s zV-WA zBHPHbk|o(XugWpbR0YSO;~lG`()D;{w|J*emnri+oQxX4x|B&Osu8z)AZ+@8azF2) zKj1XMa!h+$2g^LT>Z8>9)tFrFNc%m^b2v#hFmWz9;jw}{abByeLH3<=O3oQBH1y1h zIvvtA!m{oS9C1#?Rgd>Ds+pL)G5*Ly2W`G}gDX+PNbiAXO?e z{_Ku%{>^gYb&bzQ2k{b5Z#hY2aw8-d{GvPOIp@7|+V#M=x0>EA1hM6tI4E}Z{{TI! ztJbc|K^${H7DBs1=OCX_YlXgsX%8PNjm`4_ep8S$`u<|PidyS))2_N6N8(QoNS8#7 z%_1u@yDm3zoSNtK-AYXY;0I4K-H-1fJrw$n@UL#Y(8jfBJof$Lp&>Jl#9)3^=NiX{ z7AuqwD*1{zbs&a5_3YA;p2w3l%1<5Y>Va-y+F;HO2O#ms*NVdNPO4*v>{jdyjx#9) ze52Z_JksbB$>dzHMmaHKjPc0)>m}8g6Y`aI!v_1rR<q;HYi7pY6S_HHD8Q1;I(k)-*-cae@sL@v#5a7< z=`F01WkhlbcI5kXtx0X;1(d|dQOPTwqw7_Qm1Zo!nHaX_ALsm<$+JP0eq_>?#%0MR zy5&u5#Xpi>eTWc~xk6)(Klr2h8qBq1wwhLQBaa8nb;)I3_#U+Pyk?Y%P6+{B0pKw# z*!+keT9nDLY1L&dEJ|aWVPHo*4xs%y=BZp=z3>@g*3l=I9(p&Y^2d65X~d2#xc)#!*}V$0HdP40fq@yWDg*L7u1l z`qYUNhu)HG3>0O#BCN@69C7YAFDcmAKQ29ejRQ@YSC(->Gl9`x7(7;%gR4G?aJ!G% zH2F%26!I0p-Jhb2jCvZYcB?eIn{f=gxC8R2A(f<#SWE4Vz+y4{S;@!LfIgL?w#?=5%fXw7(NbTL5!HU@%lEKzn(^Nac%EMm-z0JaEs;5B z`t3fVyz-!m8t|F$z6v%zS6zk)_>@ zR?^-@n>w1LOiTw;tV(6dJ8jmo28kBD}qpigh5>ap9& zx@|K0293^kk{|f}iu0eL>sZ>>occxW%mQO5jr^_Te5HLwWh0_2$c!^e*&#{bGV!?k zR+gnHiYV;&g;u=UODrda}NR*J$kMKq@z5 zAKhPH(wzzOfX65X%x9caraz+vh@Jw44}+*%iv<&^@eoN#zQO1&GjJCwFJ zZN@!_Khl6S{7DF8LmBz9RV02-70byGl1muLY+`fPu>?f!W6u2iJqB1~-j*^x(a72Za=oc8+vSsM7G)h+V;mmeQ%MRlTztcV*aY|b z(k#%tX+N@znA!WBA9K7P)hhBb!&{+T`C2CIl#a?DET=bl~am0 zj>TqmAjk7A7>E*r}V0T~Jn zWUj!D+BHc7V+^F@j@6>C<>z7K`c-IP0dnon&7W$W9KL>K;AL@LF-IKkM66y{Nw)KX zgw%SJX(U_An4BE+>-4KG!bK63%SVD+)KnLdt;|Lvlb_=Bs+3IQC$-A&H_Ea~Hxe_9 zRT)lXSqV_t#_WCCnRgiWO%n%EfmNnif7O*NbC4Im*0DM_6{H?fJN6JxGI_-;caIB3 z#0JLTymb2Gtw$*U;rjNdUSBN~4pZe)K{OLY_EmS|Tt|ehf?W9(SGP+^^0NJTL2dN!0`dc|9#B*Ta^gh*B8$0Wn3EYg> zXx!mPf4%-9s4V{gX`K0kB^wAk8k!^qca~;5fg=afqJ|MP;Y;jhJGy&;$LCrYmjHqh zlweVQ_Ii({QM+liYt`5RUNEV~KPs61;@qncT=pI7V?@*b%MaQlQeDPZZyhoIHHBGP zH##Xt-Y-~M{{UxMtZV?|3{>QrwjuIkcgwiOK+htWBp)us1h+s2D$Tq`Fx!~)z|VT| zn{#@et+`f)bTgrm(Aq4}NV{f+LAhif?SFgpt{cVQ2}19_&_LSByX<+$9>{xrKU(ye z7woZ;rzHDSusDtZwL+`#%j$onZ%wpgIHr8TAb2hwIS{0XcCt1>2h*CB2tOb{DX(<# zcYyTo7sT?J1)ixI{I?-^KF$5_t$3cTqTl#_@WFWtLg3*E97+D^{{S3US<7LOOaiWc zW4Aov9o6)$tFkL-O{aXgZtG=$w@m) zU~cX|!nD4|eZTLM{uP*ybr6j{*4uo4DntS%a&~}7{&kTx-}(jRHGzo=w=X)WL-iy>(w4u86M zADwghS&=_?8f1_&jt+NL_NAmh+2y;AHdvbqJvw5!oiWO~$gT4vtI+is2m051S?BdP zXU6!_K2Pr%B|(pv1Au)FO<9C&63FYqEkc(MkU_W@m;wcaV?dAUfcItApMk8I++2v3<<`YFLBc5@oF*Y;(zJFX@p1!;yw5${3-EA`@mfn0Cfk9){Is^YmHa~=6Sq350s6gjD9s7k+qWoBSJ@3 zZuQo4TtjBt0JokYY4&IL~XMq{+;TgknJ9PZ0uQTuA zj=3ees|wWAAIe4`lAQo1vt_zzq*)}!cJL|0cSR9#wa=KOcfsf?87?1l#>x@0HV;w` zY34oC#u)8TaNouG=9zrRrF^vg*yEpC&@>)k7jB^+F3W-Lee0m`pNdYME7{)$`!Ril zu+9!xaqXTfoD!G@#$f7l3h|Z4)7G?gJuOz=Ib)GOe4Cyn&&okK{RnCd>hD}$?j`XWKB}SNNzvUH>d@3fwbof&vC))+=}v#1$e4WIgO3-cJo~okCx<+r=Op;J-@=P#!cH}&h?!lZ4*+S0L6$q5xRM?j1WIF zoc{p4JJu(iCHts_NRS^ara(9&gY^czE5uOR>o)IhB6(y)D6_8^es~_g!Z_}=#7*Jn zwV&-*az|$zU+d(MpS!t9VaF!`;8Yq#89LM=OPEM(mQovc_qvmg*(CArRSndQ46@x3 zCR1n6-{)WHnv+-71?ARl*J=&GPU8(Hxg7zfTif{-^4`jzgkO_!&R-zoAB9Yf;#0mR z-TaU+GtWImK!_EKZUKnT(xTjDf1BoG_lp{y2#W)_XOoN`qx_Lb+DBdCI}bfdnGskP zXD`4|!wP#(x z!5*u@_r1@(YH&@CfU7`Pb{$4|A9#{3)5xT007du9I#Z}Q?WS!aV(cM+|G3PvBd(&*;jNB+}U<{vnb@MwOr)#bc)}Lny zk%Mmv2PF04v7V(tlYriC+!5b)dWx|OD6JS}AnoLVQdpu~eer@9gI8g<50N7Ck^Eh$ zMH4A9v|$YU#CwPYmHz-5jxuEVl>j??RmdYBXbNNk738mSDi()igrebC@q^mB5;%??I>^-x)` zW5^<3TD869W=ACy`i`|l3=zuZ$t6?e`s5GmSX~{8HbWi6V7EqYnX4Lpnr=%=GmU`m z0P(ogkZBGi65MUujG!kVin`|;V-cte)7qSeaZ}6%g6##mFZXHe#oe5M%w4*BoY5Xi zx&iyZGFP#r!vg&dc^RmZ~8C(E+0tvzVE0yq#%iG=DSrfK53P-2C zcJaXIRdAwA;a42DU+}HDx{+&-(ky<_aEj59<(!k(is{na`3tai;ze0dM9o$;zIUCGrT662VS59`xIV`()vQ zn4ujb9Xf&iX_r%ZQGmpJ#{l*GYiPv6Oslb?GR6}AfsB^;_swXv+zwLGc3FN_ zCk@69D{O04`2PT0fA6~IVzrvxnmOYU%be|VgIZr`liTw@<56NyQ8zMNT}By4mm9K- zb^Pl80E#7TV(&uJd%Id2uVQ{-w zfKR_q$k$nU2a^W)+?}HeRAUw8_lnCbyd9;OJ^?t|GI{nS)W+AjRWw#3*L7KDPqRkE zS)fwUt8gouw2a%_0L-LsT=De(01DBQY_`)XDBXq`$6rj=ZM07EIUMAU`+2VDC1cN# za3ZGmE(gdIs9sx=Is64rB&C$BOcB^)^vy%|Ljzzl0(-IdVt+0_I-cG)b&^4cD!a4W zulQEBGi} zjF}sjp(zyFV$7P-G&E z=9^`tNjhb)xlTzK%|A+v>Nig`qhK5g)|pI^i4c6TraZ&WYtmaDW8UK=w-&N8mIYzM zs&p(!1CPqH@1#K?^Ic0I&IcpuT9QQbi-wTzVdb)~wli6>nD_*RNgFr=6-3C-hTm+P zWXog_U)TQt)zEuYrbJt31Q&K0MepsL(_|kki06P1f_vxx0M$_4-fa?0z$|V;g(Evb zJAXV-OhvnrNJ$763U;|;jQu@-`qaw{L3M&y{Dkh8KPdyL&wk&XM$<6;7Isw>cza51Jw<5YJ1et^qhdXoEidbe4tY9z;w*{1E9O9|0 zrshrLg^|mT_kRkM?p0{c9ubT(@!vHptCGtUpJ*;0I9#0ft5z40#_)&Z7$YCU>-DOk zS)y~Cj+vtSbAma=UC-KE%yYEk zDi3e-imYuy#AH(EJ5@&RNj0KGG?^SWZ#1M9P++A(SIqj=j47(z3v^hbkcl#Q7+@HI zli2?NAw*sDW1LEtmY9cfVUhV_M^Wf~KDE+#N5pd5T|sGmE2A`JtzF0Z#vj0a2PczU z-Ic(J-U7s_BLp0RJ*!Ic2`_DNZ!e!5QiWDg^ABIBK9#M}pDnaEpW-xHOzx4wm$OF! zMvM$azk3|Jbk8Z%`?bOA-Xgv64ckw5Fe=C8MPHNw&;jf3Qcq@JJBT5-9oZNe&tHBB z?M#ZnrgOCmGL6K8(0?kERb*AOv=Ri7^RzJ=3C<6tSk`7=GAmdA08F8=^|GN5(IbMgb4&DG~e zge+^eHQ`xD%089R7@k#G+2~<4zUzdTf6Gh|r?2Bn6SM~fKJQ$0rHVzleT)i7qash~ zQ%s8y2Q1FjRw6RF1EiBgua@OM-0*yL^)@9$M3xoNV@flv%*J?mRjv}LxMLClu_050o!F>i+#`|8m0`e9 z&U*g zR$zxD9{lE~NZ2@K*iPUNKE1#C^-%_mFSQ^6ie!F-(S0}4L_A<*1vVz&l$_T>`YQk}6MJshPB%P&p!8`pAAC*2R#Ji!}x!aO> z-?*ar#zCGiK337bz5RzTyDdxdVau z(@cOqUREk9G3c9lv-~mO3UejTm$hQcm^Xxy+*n;6~e^{O|Lysf)-3Y>AzY*z7!grrKhY+wMAuHSb*P=BRR zn)QrKuHdr#stqKw2^al^1WwI)yg>uBwY*ZnS%lAmn z>q{w?D}X`CBMMh@Ia$8I^P2H9Iw(5-d9GkA~9$s54Iu64W$ z*+0B$?qG;{h}{^HPCe=$H4i(MrIe0qTanI?%ehtFSmW58J@NhT#}(+_9@63P{*Q2| z7|Sf%fhVrk82mY7{{R6VmF3dBx|XY?Te7fuC0PQIjN`cWuW!ALZLFe3i{=1)*gSl{ zPN$E|*3)a@CYrmuIX!CPTN_a$q4I`uho=KI$=NWwvz}>?{{7sRP6z;ScKd#{zv4UL zeD4srjg(A2UD)#17$@1np=@5hIv_;q&X-vgMe}CkJgBzIXkm~zJXx#;EHYG zbG6VDyRZ0F9Wlud8nIx0V8ubJ^2lPgoxx^U@SuUWgPwm{%+mgJxLvyl9ANgY*dvZ? z(w0`bW3o}?Ha>P=uTQRd`cqa$iJ1|6&Nh`FTvKL6ide!)=5AjvJ5C0F3VORn0&U1A zKdyS#r#;!LrdyVp4=HvwGBfGZ{{TS?`qrMOBTFsRE@5xnvTP>=u^1mr97+EG0IdBQ zXGxr$`xZV1-kjp4xG`K!DEq*=lg4q>a(ZwJ{unidO-_m@ZLH@^h6$y0jpVn1)UHX! z&Ts(rt`hH0Z9@C)SIjo&=B ztdWS>qXn;`gg-~(FE5%g0RbwJ; z9KHrWg=#wwGHZe}xMMio(*qQ)MZU<$3|oNY_xGT-h^L)x$WqNJjtBs5JqX9*%~`Tf zx@*HSCVZ(8K|dm~Mgfn}h|WJ+l6ldJoMUo=8wa@~{3;uTX&{`G-Q?qQ?c7uj-sJb7 zB89M;C3c?dO)QHU8*&t*9>?k16=LUHis569F|`1}B$9LLed`S#Nf~z*V7UsTqY!bQ zdYMQ1RtmCYkn7xZ{{R{dELnrhEHZZ%B=gp_p+X302H2!gn0*hmP_$DG(9RZ2WIYD> zm)z~w<~)7VK&{ZY?$M(ZBLsni^~FY@-GWeK__Oa>(%iq4<8UM#{V>#jXn+-bz=(;K z+;PYFd-SQY9oc#2PbOU8J~qdX!~9yTM&iMI?Z4{2VJkN~he3*|1SU{sc35MLHyz)@ z>7TCQ(>UI_#%rhWC&Qbch%!ZYaT_j_k^s0kYo;B5?x65bUutdGaXK%AdK@>>oqpyv zTRV0TSVpG_fsdK9o!xlPUiItKHO`x1Gen6bNe7)Xlff&D44#<>IjS*O9W`N&J7tOh zG37?MR8gER2jR_Nc((H5XmtG!(nBlX+!*B`k%*9FllYZi#<6cpmXli$=<9!D@b6O8 zS=H~g4BU@7Gq-k7VEb`_)2(===AhB)5|j#8&vN^a5aTQ`I`M^H`|7@k_=z-r9`FqQ zK~xZ;!T|*Eq+=h6ubM7`;@yy9B8<+*`{a@QfH~>XrjxqnG^(v@6y>$^VJCVi3N~%( z4N|taxV611lQb_I?~YaIG1HFqaosnJu;5^AImJ-8Ku|J1=03H%B_z(84;nkp&8g|} zT?ad4y0rNxX5;2L`A^*+F9Nzrtn|%KO8Z8IBV4O8mPSC12>Yr~6UXxQuQ9ZiRElg7 zv0pnqs{O97HI3wV5J=Hop$@V~pS(F4{_xLH-j&D7jSm>vTj?ohWUgko5iHWmoNeB! z$`8H>u4F_!q->q6`*Z77u5V}6E~L4-oX;f9xVae5pvSPl9>7$HA__8ihjZ-iagLvu zr(vksXjX#X&X5I=?`&WW{{UL_--Wg-1?|?OCeXHNBCD|e=mh8RWBFH}THATo3PY;x zK>L?G3}>43?H0}3Plrdgkzu*iW>U%yGaBw`2>R_+{7q6w#-r?A)IQPus&zZH2JY>5 z$K|d9aq_7AJJvR~R>9s#qRc3EByw|*IyH8WX0goAEQ>5_gjPKi?dU6+*6c0}?IR#C zCSxR?gVffrk>%7**Ex2(R=b5EK+H*L4`Ie@Y8@$V;4SurDi?5!XFtlB1a_&Vgtm`k zE1l9X`LT>uf3!;xC1ly=rq{vy$NE<8H#wm)XyH%`G0#NBRK0{-$~ZV*ew0GZ=9tdk zmjq9$nn+6~Q4Sedm<;>Y zlCYXgecm&Ns&?WiWhmG=7z9+8PGM&(!0~~B&1GY9zO$9FD(I{lRVcvx_{beUI?P)} zxgKa{U4&#hf-}dzy=dxeTTLuNl3h?9a-(-zWt<_c!nrTFgZ1b+`~_4>ZqXfY;Zr)t zAiZEuI9Rek{93eA!ja1}sMIcs5rrl-U+QXWej<@c#9>Ge(AK4^#|5{T=NLFZKl=4H zM8-_VJ_u>!-wpe_5ue^LAIVKqhrpVY?BmaaAQt}sTu;-rbnxH6w+hLD5j(Is=hNv` zns<4N#_WT3PCI*a=~+3Yv9ZrLg0)L?xqOYh1GZkLIH^Ce?xLB@$ndmdE#*qGWAh@q znC@gmK5h=@3;`n@I{hnsO9SPP zZlgZ+rDdUMw-JVt{$wMKtqPJC+nSE<)+e}CRXZHu_2WPO$*I;j*fN51jAtEd7gd`^ znnpyLT2A6b+*r0o?if{(qr|ML9hyV)C?~yWnN&jCLGu91Z5)cIXMO1GaAQ0&j^6bg z>}nBv6z7fzW@IVygNObqh`o`Y$yh#Y5xo6#pRG?kGP_FUh&bwTf@-bbnAafTV+?kd zJ^r+>q zc_k94AP&Q~(vEf>$s)Y>1jfhg=qBHgNW~|X(ki->8!HTBy-LT-n8d#_fyi!o`qcJu z{Gvh&072V|IoL)Brk59%$h#*fRA-Okt(z|s>M_7#Te*uB;O{=T;;X}G0`7FlZS6c=ffy z#IfaDcH{>gMSA|9Do_{!;d=M|lP1f=azfsR1pnP+z`tTHMiV;};&=bY9}lym}d9I8pcQB-VZ;S&0yNcFE-k)N~3Mf<1qS^RHIa zu46LKJDFos<+fF_0SD9QJ69|4{{T%}e;VqOB4KT!2`Q6;B)|kFGmgK?wROEj$d@*o z4aAQEvdDvIQnEywTl>TZWL}u+b52^!bl1CcoV9{GsC7w{xo2{Kw61Wg(?2zH-X?Xr zYr_(U&T)@Gn{p0w%PH?&9Kt)h*=+Rr1VN#aSzL@`11CA)VzV1c{?A9cxB*jOWlVGB zhIPsMm2bsJ>~XwKc_w+x!Q!@=WKxcZ6z@4FkU9M8H%kFzyWBZb*!B8XN2ns6DEtAm2XbQv9~ivy~i%aq!A2b|We z%nNQ~VYho_hVo8u8~*^Y6=|RzQZ!pfm>4S&$^0tJnHG^}_Ny0_K_M7!3J!6c`qTzx zH!p6B`?-|GA}?TmT;ta*pZE&*P)AFqd2v}tw&fu$8k6#M<36>_s_nUs*bS{Dzye6m z-p?oStfXxg)~8i!%Dx@8JAZs3Mcf83M_S8GmqQKmlem-91fTG&O*PnQ76piSc9^6c zgHkQJY|ibgE?8uC=a0~SVz_56Q_`gla|-??jS@5}S8ys%bMN%+^)-`s6DbJT^RRKA zGJUIGQDRQTA@+^?PX`r~acs;(4bhd(QNC zZ{l;#D_Y0F`lh#Ga=K=Pt7?8!Sd_yFJ5Yi$-MLi&V?L)8rBX2XnWP~pY>rAYKtS<4 zO|%u=+p*{CQa~PQ*_(+QEEx17j{g9Kd+&sQWxo`7zs$JRe#@a*INcfAXPc-bAYiAA z^<@Lln)N>m{>i=;)1bGO_+4sO4%jl>9IqV!jecKF-NkTJ$Iyn(=b;=8dG4%yc%Qpi zCYBkQ9g3^0Y(rzvl6{K=2 zTrI3?!+ID~r4sq?zTENuoptsa7C$_Ok8CjIEA&A@*sZ~!EFR`lx1tGR_rQHruhiFkLw$KscWV9~TZ$)f4yWxv%3 z+2^X>BHZ!v9lWr{NXX--y=u$h%{xU6 z7K^6Eq}W?K3wu}*S89>-IXESqiZ3Mbim|B(<(@*bEMr|d1{goQkQa~!9PJ)i<2cFb z#%ezqYVD(F&v?XC+3-%9bgu&1{iAbUY7-=~5%Qr@8<0KA zK2Ndrsi^G&X~y!g=U*IkKO@012;2*)?)DL!V}t#1UK6bALgMycv)jfIh+q#u+d}jH z*B?)6_CMMU745WDb#^N9srhh#k_JHTd9M{aQbjQW!NA?PX9oc1^{ptcaL%i0W_Z22 zf=Jv(iGoQ2JcEjb8gxbh1e?9ORoR%kHaC|n2;g82`R1v{BrNI}?i~QGlQcmr(#dhS zL>0pAVyo7eC6+yh1zZfAeQ{SHwPPULlEH!>n}SFu{{Uq)h9=xN9CMx9xXx)<(1sWq z;y9&QFi9ibcn7EBQrcVZw+>uJMYS0#3+E%e%QJ?gHB5YVBYS34w%!4+Zo$Y+~47&zzq#+Lr&q_DNRjL4F@?O!|Q zVn|Zkj~OI`%?Bi%-OXzqME#@4yr1_{0Hu^%Fhi1iPKuhE1IKe&rYgzAOgOfU|4T^wr-E)$Cs$$*9lx~b`*^T7N zLa>p>B|tEv7~A>(07F%N$@YjuWj2BQS^iZW#pWgRB3D8I#^asY$6uy6`qZ#owc@;B z5Xv$b4o!6XLxNNq*rOa$eWo~-L&|!L_w=bv%G}$^vOe=FlyBkMK_GMLF~wQ7(>%M0 z9LVZae)n+5{{VoWT8>*7V~#}+h5#&a51V<(BpmiR>qBXd7h9PxC)zI9$jc;~aU_6a z%sIy6=xWPwS)(NI#FjsI01wwcr583VSd|-2+&EFlIR5||Y}ysy1)+?yu!;WparbL3TgW4w z3Mw?61A3oBS;**tN_$0*E`IWYew=5TtrAEka^H1V13tAi)}SSne1H-NAf68xsD!D4 zSQU0S!TE(nj)FV(+hD<;7-R#Vz0PTDl*tn9{{G%QDVnwx-9Q0@xyQCfa4N=)G-gty z92^mW*C!OvJ&kmYj9XuTBRqT6kqMr_FjC9r0Nc63^ZpfDP0tX?IWgyj6|bS&G?2^` zH&L`P7$=YMHJpMbBM-8Ipn#{L>5g$rY8pA>QqE8VzZ~S`kLgn0+r0N4X48Vij4wFO zdRDYB#cLB6W4cJoHqniTb8%dfXG~&6aWrJekp_gS{_N)?9-S%)_O@JRFfcr@aYomY zHr@+|UM21-YcghEoFPzfTCz5=225yKg95}JzRYSQl2k?mA&+u;ahlML9%+bds$_xr z5&dccw*LTWMMlc){x4i&rob9)vc*2v+;^|{y42}zk8q$ms*(l=f2~C)7LiPh44-g- zIuB}Souy69wllO5fk>qrY)v(i&Q+CEEJT9GsruGT_6!x|LNX3FG3YqsABS4dh6nQH zRm&rSd8+ox%*sI95B(1T!ebxT=(yC5!#g%?m8&4b&?NY$OUlZ#tAaV|JDR$c#@IMNDQu_%mKn`r&u{jbN+#1EmZK}rBRqbUX6h%J;p7hACN>x2 z^r{w;<7A2#mD)m&uRpDHoaUCBYAxYnzt z!*;2#%8#o_B&>wKbJIBDv^?*2{LlDSMx!U$CnX|ZFpTrktBbku_x}L#`U2;)d?B%! zw0rGRSfqBkl&B}2-v0pae}Jw1Q2867MOe_ftCk(sW0lXR-CX*)#&9ufz&3H|UJTM> zF$HH)aOH`}ECCo9_oiP(DYduhj0`1}h$Q&UNzg#N@4(86dx! z+sXroq*UN!_8{jVjWAUz=!qzd~ z$1mC^Hn5^P$=wvi=SJkKq_PnWJv4Pz#bguTO4!*at8l3L&&-OrI2 zG@VVQ+JjF&O*cH6w*K5%}Ng4#hN%DKe6ayuMn`PQAqgOqLH>{s1|8NtXO z)}@Ngr$r=?z$xV`Z`kI03RV+z(3o8{m$k zsr*OyQQ_@V5nEmPtz&UIAaCA9!!SZf1cDCY56#VIip9V&djHCAYcdw1CCBO73lhIk6~kLHIzDSyfzaChib+bAdRGM7|ucG(+ASD zZ+t%g0B5j-Npn0ge9l9No@siG!ZHY4Hb^~2(cY@Qg=&o$nSXsGa;p{@2jwGZTwo~V zkU1c5eSTJTXD4SP=%I$C6?Badwy|ofZ#2{fDKghRO-v6 zLoK}5Owaa`*9uLtgS#a`$TRhIyN(pP@JTyPGBeXP=KeAGui+mISTu0zwpSMsPU{_wfHKU}4j-o& zBmE^ajbUpJf#J=;jieC9vrGewd6O!K;Ea%@etg#>qIg5>8iIU? z*YUphQbM8-IR#T_z#Mev>BW5=47&dPXYxD>*_FM@G5Mau;O$#i_<`foHSOrI)6(B; zHrAmYOH#o@NLcW59_8baxVSk}kD>f;7MJ1pH4Q%A)(9j-XtA&ut_dW9yqx1bd-~VU z{{RR)3v+*|+FwU~EN^-zj&r^=h)1-QjNwbOG?>B2+Hu&L`cL~l9Z$oMK`)$c5Cyl$ z8`E&k&rmx4RqFdIE?69Nq~)xSE4dLvaKO11WzYvax0keo`0h1w^G3GR2p)P_+rq6p zs2gBtm3M9jz88`@;6!p1o3pW^i%oYxQW9(z9s$9Hjl z-`aMOFPm{~J7;V#;oTP)<=bRg@8^3~DLdHKUkTt|C~NN*d|bECtwhE>J~)w!l$2?x(QLd(N+5mX`LmBOpcuN`JUPU&-p|Xt#Q`&CiVWNoJ2x)qm1o z3>gbP89*4|h07jK44>WS)2?M(olQK#CSxpP0Ps$7e;=6fTGV>j!klcmnc&~Fe2~Gd z+F78MIIXhHy}A;hf%V{;@bf#K20$Y`^V9%G>0X!d!&hB%#=4xli4l<*1+ohcbNLc0 z%h%+Yor&JXi9VV8xUTAI&^T&7X^oArvSfUYoVyI*)IjAIjitYXy-g!|tN}Z-%Mg2g zDI~Wf1l%RS&H=4JlHMW|WbGiaC-Gb#t~2@7cF1>D!h_Cz1!zwQw}@rA^9dvUomW2!7UYFUB%jGKr-*L(IJl1UR z2UlfJpBt0|$j>;g&%#R{n`wEhPOUYx2x5%`51LKGr@uM;4F+=8VxNqz(@aZEJ|$(f zwmYW6lbJDrRp8Yiy0>YflwCuW+MDpm_{IqBT>k)x?`L_|H*)^~^3qmPKxg!_HuabI;bYY`BVeLy7jz3EZ%HN=NWkZR{Td##++@B*;+>0s&{48hTeVpRM+|q)q^Y9 z+&aeNagIQ)57ZUg`c^&ca_KI+VE)gU{$u2GZb9q03FH0)iq`zcB8owEGTb1+kUnty z_xAq)8fDd-dV;Bq1agm*g*d_W`qg_|(6cNTHr`m7!lax2;XQ%wJ*xH9uh`$shCwWB zn}Z&Ek(2MmCUiB+vCZ7um2j>$xMHV}NI!*5Viyc$`CJ|W?meo!w-QNnCCddyb}Y)O z6z86wl}ik!MjmMa7v&3%f|0-36I>1UcotK@mvGzk#XC%w%OHmN*eJ$&VCJIM6{d{{ z94nk)_vA8RPty91?VB4Tb%p(k>)3^wC}I(;dUUZ~yZTMdqTll^FGi5lFi zl?8WUwvW46B1|^j$8l_sM^Wuul1EHrURgpHU^5!z?H-uTSy)^;ouN>ikPk|y33*F- zE><^F894UEZlQ+K1cD*^!ZPf`cjZ|c#hKS^wllDE^JJaB;Z$S_hBq66;~drPViPvm zF|-4Wb?4Lk`c+A6u5M*uKZQQ$PH19xTbG0aJ3lah6gGJDsNGUaVYvb*Yy#Y5V>NEt zCJu?^?|s5D8@J(8c_CY92%%&OQFtJ5yl33h8Yv9Zt(rx7BTQR zR;e&`4X6ZDM-mAMQVAsS`Bj+6wqUGwupk_;=xY)O)MSCM2@FpxoB`K^^`vMoUPVTA z8-N?I4o9^=W=FI(=9yMp7CGbY^H;Q8XKbw$LwOH_+*VGXrHiIimU770+!u^jOJ!>m zR^DB_X%Elajo*!2Y1qnhMy1}OF2rYP1C85zQ*CY7DzdOv9aQ$F+A6Cmvw)6q&sw~Y zEUb(ejIm-@p*h89(lb&iT+GorIuZ{?BB)$khLadC85tyYtvjoT1m`MzrQ;aojb(YU zD>3r+hB(2YAZ)T88Ew$~#01Zmj-OgfId(il9Caf-y=p)8CKq9VR}2B`QJZuse|l0z z=jrYAr65F>@<-&93JSN%IqOw!ZzWz@Ev^nR>5i1^Y`2uLFhY8QIvSHukN2__aPJy| z+3rckKD8xCtkR2v7aH!V4q<>Wsw8$bkx!1Mqq1L3*aoLsCVPD8F) zkEVFyy1x$mW3$wOg7P=FxegTqo){c-KZ)n3HJhOK>UAtaYR5;e+ern}zU2{yC>WvI zTO1G0xT$XimWYb+13ll$sLlcm;4$5fKpRKvTb?8F6W{6Y4XnmlE*uq`cPI+u8@jF! zddR(;_>WMvDz6Ax78oONc~GZuCpiG0n>oijdMGDqYD=NbIVZ8fc!JV>KI91yHLAZC z3^yZTl#n~;0QRgMEtz77<)wH^nRbjTHyJ-n=DJ@UYK^{2J9R8Kg zp|R4vW5E9av_8M!j|%A)b6Z*6_-*eRSGIDffyUqB zQ@iH$uOQMRnXcpkWhO>xOqdxU07(3at&58}XVXf;v%HSTpgOk_uuK7kX zk1UbrLCP}cO#1u6pS4$pd`+WU-)ok7pN8VPpLmeO*A}pxnDgc;;Z(d%GRN;J9TPR# z_@ClGhde)}S!)+MovHBjqibI!^fNOC3>6@PH)gqzoB|O1fStr~Uk~UHcc z@@0a@elT?@dt_nL!!dE8cH|2O(1)K8>*KB8@<1+c-8E&-?Wm+cO})t zp>ruAnRc9qXD1|_HUR9ToB&AbB=})-t)Mz|e`<|OD7CjgGv||t-z1NrCp?mC9!~<_ zX)+{NF#VcChI?m1k`f67sx~=YxxgofAaF>ppu^IkAB7)3h@EPm#ALcn;=!dg(?Jwd;#h=VB@hTt$g9}+*<2ePO%-# zvc)VX%av?FmE}m8cfzOp^G_r(sqMADyI)b=!Eg2@Gbk~%(nm9*gXTT}E}(tTLni~f zdsogn)|YuV#QCfzlw0X~QXNAjFl1P=ZH$=c!J27v)RZ2)IYo0_6FECr-qEz{&kXoK zM9|vRZW$R#ytGwS^B?bEl0HlY3UT)pbAm{wURlScYF7;%!rVlmWZ$%|)&oEH*T{LQ zpYV}fU&j^In@?k%$sBhdbiy{2!0ERH06w)Ir>gyz!}^w^GM$!?HN%3~+kpAUuLZjQ z0DigNvKoHj@?VS2{d-emAZ9mG7w5L@1N|$Px(hK@SIV$cmGwzxciv%0N#IuSE(O_^5wan5teWSU`kjFgIZA9yhIa-#_SRonzGti z7a2=u1TbyD6P|Hek!bhR%$T<%vl0Q|RSM>9?UD%6GICLTwo}ID>zw*$`PO{4A7_Qm z!dSYW_zzv*O#c8OT?OWqadRn-S@yO*R0DS;;Pdzo&a7YPQ7Iv$eStRo+(9V#|I~q@AatEI7tRRQ`g?DN{bsv$(^U5?@@Jr00jUAbK0coiezz0V~{#KB$)fl z+}BBOtQjpLX%HpG-TweDMj2sT@hU*rc^=(qY};$8Tn;gYJvk>IPHK^q;A(#I>SfCH zEC|gjz{?qRZ<_#P`>X3oVjluE1Lg;6`_=n9c1xJq5uSa)Q3&0<`Vr|vAvZ@}b^aUh-k`5%buWgBc!Zx2m) zCx+k3D6x{TPI5u)Q1&&-S<1{pPbs%5aX+1v&&~!h`PXNt&Gt<;=HRlUfwdf*6YML8 zXx``Mj31Ybm&<)}dYsZaDL2&E)Z){n)6B6DCSVl_1Ti_sOnXTA2+%7 zsqCT>Nbew$MuQ(CH{JQyo_l!Kg%DTC63o| z<{qdQ+Zn}E)ns`LV{)9i^qXg%6OA*qZ z)-4spl1DN(&5UE3eXYE$aG*Kzz{rz1&*fHbEaixK$K=aoayb>0iHS9=tY8tzjmS?V zbrcAiK@%2G;dRc9lc-xyNpmk$k5NWxrD8EUp*XK!19PJMRXUF7GH8X&p0&!jd2-cmILpO4r@gsOu^wue7MV9 z$%42f4u{^7^TO7S++}m#y3ZNK;k4yk<{>~pU$|V&Cc(aCy!xW zF=ld1=qA?gE^T&9eqTT^KPt~#hZhSd`Sy|WuNW0fyqm}^kU2ipDfwX|cQ^+=m2op@ z(y-KIvVNtMbU6Ff(CONgQCh0+W@l`U>OTtPqnH!1&J=Vc^r<1aduhNQ>ic7gW=!uA zaCVFliDOnb5 zPK@XhT%kQOYANB87!gQz0AQSazlo?nx9#MQFp4gEUQ+SZ!GxvwqnRBMy+?FjE zVGzuUIU_wQ8;zX&w|--vEVgER5W|3ds?mvoBg+(hid(i3LX7hRxmTh5s&vz@%8bJK z$E5>N6GYKw*EJ~8C&L^QG3X=4a%-fQ!{%bYVzx8Fgd|9QP#Y=>lkfaTsqRSmYoM_3 z`gnfAJ2?4}jNG{yB<{!I$7;^;FNjdX7N2DJgfj<~KJ=up;C(U8Y@ObQYAHtS#( zZ&eaQ12PaThzqrN0OLI>zKB*(0P8>$zp)m^{%oj zLu+v5)6Z?EC=LqayRhy4$gO=J!!gFP{kcWFQb?=zcv}h^`>cEYdVAKQTfr==ExbfV z$^szZvFKm(;~&tB`Bf_(+$nOzC;M@+spd_`$$|a=S*PXhTMK5n#NAXgFFY}#MH0nnqMx$ z-XK|7mj$pf#s)d|0;+5N4;qf4cPiZ6+gmypk*067qcVavg>Bd;oMRZk_s3;_JOs3s zxPl^zsTe=%!2We!eIn}Q2wvc_oVu$xR&t=6ZOA0x=N`;^(MnT`x@L7F7PdT#!2Q|@Xe--s>ZCKeICLKD$>Ee=TV>yB) zC;>=`ZQVg2wp$%pn=IWW_Je6V%cu&_B@u)CL|ek`P2-A zQ@b09aL0B+08Mt9J*ASyt95Vo+pri2wz5X zSE%<#zIbcI_ZqdRk5IQVLaMBa&G$iGqaK(TsJ=2>OQrlS*3-?(+}gi}=PF5+5DeVJI$QLs4R`j| zfh=PE@0e5`I|Tm#ALm}z@O!{=X)g|^b0^tz3H{l}0E`n^&U~uHLZ34@uZ?e~cn`w5 zSA}$_OmC>5mA0OdZakj+vHo@Ay3L7#acG3HVGNEAGl7B!y>)*Pd{n+P_>ZaT=*;#u zVJ$6=-U2Fx{EG)+{`GM;H*$-!l`I+elkff&lI~ozm64Y5G1_GL7r6AO_Oyk=Dvy+W zs=Ob@wC^Pg7s{cxb~QyLZV)L{>4Dy=#)?Hp5-iNSK+j?QDPl~q>}qv{#!JXf^=>o# zzO}pI{TJ=m5wL{GBMr#8;fF#`ws@){Oq)j2-bfi1+}vHtL!_vF;^S_A+4-73&b=$c z9u68*a$dL>I+H(^(+0xFo~onz(s*w`w$ZJ;=@f}uX!B1=P8;PO*#!Oqum1pS#S|V) zE1B4k8$asTzvEV7N2uy?+Ok6sX4D7HyIUwY9kI^@S2uBX?IX)8yoIs|Q;yj5t%i7F zRtluP;f7((%wrk#;8s?z1DqF*TiM=VG^NL3%tnz>}5B*2w&p-yq$s9q|tLccIpUBq|mpX4g- zlufu~jFO|Lz9<_vbm-xp@cFFAYM>kXd-SPqWDs6DnFEAxFD$voUzo;Cs1GFg7UhP5qa5yL8$#ANk8pZ$7{@$Dp+ zZMwNpmL#`&xh14?TA0H9>|^lhSxMV+;)>SO2cOQ~LvyeJ_f9xJop(aV{t_(#WVZ_{ zhghWAcRUhyU-uicet51TOJ%ktQylP5px}R6>O2`W&xUT}zllLFAC&OF3nFJ5NIk!m zb5y9F$6YAja=cn%=#ngVVmWRABmCdk<*y|n>i^Mt`*%;!0z~QZ!3@U-xisE%`x!3H^pA0P5 zJD8Blp^+44tx0hQmu|}OknC_a9tV2a)Ga@=u7pyjlO#;Rqa%zh70~%c+8E?@W4QNTqu#SF ztt7a0{u~T?cPIK*h{j13Z7l;T$e^940QNMPUpaQ33icJJHf-%4Ac3>MJGvUqg&4GF zZpF`1XahIITkfo_Lr7RfHr3C#$MhA=I(m%$HP?8W5pSY2fRC2oeBPDGGZh@;K7*}v zLFi{KMYmHVjmB}u)}G|VtgJFz_U%x^BSz;Z9OUALDzY;YNe4XE@iHF~+=>X=!>;~n zP43_TSMsR9%L9-}^{ck9e4B#|dQl=>hOXt3J9`yhXb$YQJY}! zsK!a?Nu_&p8!HpIso_sNQn@xnUuKJO+DO{D$)==YL?n-uI%B;xot36n7~Gf?fHYRa z1BR&PDJuxp&zRp}Uo@P7#}weQ;K>@iMSgZFc?;`ctX}Wyl0}7 zhFi#`)I<3T{<8|hKQB_i75wY%ZxeWBV7pl@b2M&`Bsf5!I0TpHf;OH8e&Meh8!1KE z9`*tXO6Qu(q%89Xj4F^o+E{Wkj(@_VyN)PV?D90A!gsHgBP%-|ry1j>+<$dcdskB0 zL(Gn21t*F;Kty}%AQ-dQ~8?lT-#fo>WWIu5yt6j9QQIV zRyBEC0l_LsDnBfJNUtu_%3WW)?wCQ8C60W?Jd$ya_1*ZxRXTTvov zoA6_d`B{*-2<3|~@6LZJ`tVF`B0A0X*r=jPr;h&+Yz!OOvh>9eQ)RHrTeR|bsZdId_6-El7 zRY^P?1{`BPm8)kGrJ)R=j^&V_P&wn@HO%!oO)WD=vbKYK@tIaKHx}uN&G8k`(RFKQ zibtB(;54BEMfsrlL#Z9~Vbub9ZkYyi+RqTh)LGAo+e)AcM5$ zmBAf-vCHXN)SAj^w@}S-f2&(W*6ScI9kD?94EbWx^&kPaIXTY&a5|o&b$>H8)3d}t z-I2><9e-cawmcsV_3izi*_c|z3nKZK1gai79A`X}*jKd}awExg9ObF&dQXBE#X9wc z--s?>Rtkc5(^zrS;unL zjEyW3e48U|@$Q9vdHYUjJ|4RHV7Hpo#^^@EL?OWp*c~Mv{{S%bIGZTXnS%H%dL$Y+k`Zqb zg}Ho#g&7{-m2lergzF(O#t03PyL$2u^si{pG)T1Amg?by&%C)L?dQ`yD}(V>w&8yz zxg>eNyTf%JzfLJer@eBg4{-90Rb{lg+Uv1){{RZ(oy%FbGJU0r#v@r>es&|5KVE{n z4RSMkqeCl3vdGK^{QTc8dwp_^Ck z#}4Ek1gAWH9DY^7HtJNd=%{-de9v>hQ5#OP2_DejE0RI^$pa&=W16ZY5p5*ofswkV zzL{jW62`t%Z0yLUt2AOs5NCGbh8~sQWO>)VtZTB!q`1f@3)-njj2b*=3_n`CbVDeM zf)wPM)6p#>xn@r<%(w#)pveQ=`%nqkgHQ16+LFZtV9RkJ%V(mG?^mvPXTxiuS{de& zAfzdglmc9@PoPo8(buh9@YjT`^ldb&;pUxBpK=&(IqFBhwOZDZbbBbKnW2FuAVV7h zN)e86=(+r88$~@r`ztTXtF+~PBT?V$*Yh=#r@+^_FsIH51xV{z`lh7v6^RIGA>_yu z;{@aJ{{ZXOjg|bClB1{*vkc^PHAE6Ugwn$o2h2dbPIzv=;au(hnzOX!XBG`3D3)jw z0mgBHKa2h2&39Mx3rLoDmwYF1_XBYw^fkwLlKR@%O@A)ps}m5o1ChdveQHF`Fz6Ro z+LX4ox|EVh3gwx;QJ+J%2lcBB4y&dyNm@CWu>gjDn;6ei+cjHEcd*qh#H%mbE+diV zJwATJt!(Qr_KO7p$e9?D-km?5IjgxlJ2M9FRlOO)`FR9kUpcJpOJ2MCL~hKIZ9ElF z2?vgU3WoR1j!^7b_T&zDs`pd*u$FDP$R8-hP0o%)Yoy=WP%$y|z}xH8e<4_pZzZhJ zMm?tDGaJhx-yI?Jr33%&Z54Pb<~^6y06No9{H^C7Q-9`Do?*( z@v9b1xI|kdmCr4XhcwMnXznCK8hJw-U01myBBQhfnDR(pFQMQ(4IX`&hi$ z6olE9#xuokLd=nQn9l$YrZf5U{OW~*Y!8^pWyrzxt(`X6rMXh&kVZqX13CFur{sSj zT-7!;rLN>Qb}KB!WhdkSpL~#irD|Q>Fj&bYmyQoOEF1Rm(n^b^*M%#zSr0wSYf$hUVmdc=Y2Q&trFMWdw3t zyl-s7%}G^P5-1tK$UKhXye4f9bd5xcLaLH5azNwr$RLX7ycgnFbR}&&R4m$k$;!+* zHnL~$^U+A_{Hq#}xw>ar<6jOYzihX#AZxv)nfeyV&r0%rN?2gK39&a6U;yqw<3ETM z?+{5EEYZsujiWo?_=X72%j&1M(!9IJo)Cau{{U)P3@a5$B4L7kPCHanx^|HjEzsd_ zCJcc;a+Mv<$Hat#+fH zLR~`~(y$J`YaE6>D>=`EuoK|!s1o?h%ty;F5%$OtRX&peJkkca=t%f|DVCJS(0x-RO0ip=E6TA6q!jG8N#q}kJhHcyDK%S zOm;U_8RDn6Ee<0D6d!cZ9(sm2!U!(@Boyt0ntX z0hx~F;PKM0TPbB|-{e9;;AXOP9YQ&-Jdi*HWCA+Y`35_gV+VH`9CQ^D(AFoY8pE9_6YnBe@Xa%(=> zWStAP=G}wxo`dCI<(hrX;dqb++Mr{R*X2=TJk0gv%=Pxx1?^hX2L zngUXis}jn?o;@o<)jYdIV549IB;zBOHIU9e)anTvN6Y>nO3=1_mf<2Lx5BKfM_-tp ze*ix-P_@`3SH4g59i%g}e7q6UB>H<+O^vaZQZhFNZ0EIXuoHPiji4_4_9u`2y;j-w zHj+nGUQX5LE&5j1Ib<=dL<{Dz8$)0m;2&@E>sXV`ETdtNg)Rrp#y=XheRXquA;?i| z>P&ItAQO^F_rinpKH{=t$!N~fA`g?ll5y66N1*&0R-58Zkt-i8+fLU?K_9|N^})t) zFgfqizOlRUH2RE%+GRGkY+7c=%MlCmsP=5*{s&(u{1VaMQSlV#e2QX#$jlhzlJCLi zo-x-o^cIeONs>{qCQ^;Rk&NW1{{VQdGuVOaUJg0)PebUiKI|@bqr>Lj&UoF@Mq#&j z9X@YL;IA}En&wD1ERp96C-9X35zqbg)6%r`KM`D7Tpc%4xMsSFJlR}H!Im;XVeUq1 z?ALqbm7`eVQ^Uq_giWL6QP6by{J!<%a_eK*b4^_F--x#F{j;R$Ok;IimfACb%mCv) znf!%R(X@-3zqG!?8Y1IiTmW&N^*qJ3-y2@s>>}LGV};M~s{Ejz=Uu*&v->?!m7_pG z{qYPlx4HfB-<3BD7(gTat?l&X8eV1y2llp+%%EH5Fq97xC5y!P5>3lDLFeGaf*t$ zHMX85^Av z$ARyGpQScQS%CsD#43_l3XX{-Jj-CGi6G0q~{!s0XG1!0g>l;LZYj`%y<(;xX{v3K@w(MX1ENzV) z4^r7A@DKH;CJ~OOsQ7Bv`#VjQ#9P`nS$Bb-UP%YI`9bv~y>fc({Fj=vs|aYJxdfC> zgy%W-u9m}ALE+hAXb|2QR78Ln*@ogyTR8v>5wx$&Gh8mUG}@Z2q!6Du2IVqI^JCwp zYbb7SQ0smI*AQUA>5ZYOR*kXGsBHmjQ+_deqI@W|Xe!pErC_ z_K#^mz*Ioa%e6RKKTLn*X{y!E$ilre(4{rTx$apJ87E-m#5 zEx=MqD-2)(jy~znPFlSy#rgoyVSB4dBp!2MV=iAkN%pQn=GE_X-#Rd%A&j=g>loSt zx2bII=jp|9CUnX-+hfEut97u{UJIAruHSFxfPT43+XP-lg`zQ9%@hqMh_E7Xeg?-b||>iRS{uv3)pYE zbrZ;OAR{H)kCb=sS{@AWZJ&WHWR^H#ywuR*ONmtN2X`QP0shV^OBU%`8csI?>VEkfiwtI><11Rs zw$W_6$X}ad=jq$=uRHN2{5qG25*0?1O|?>7u^>DA*dO=SKLBf`@t&yGnn93ZZZm}A zoRA3P@WpUcz*Z6GEoHvkVpNK?f&Opa?%jZA7w;xm(z zSu%t$h2&=}#PqETwG7W0JBVTfRaRp-Dnf)-Abh=kwVBfrD;bh9cQFgFo<5n!=zlsb zVtd5^w;*KkOtp<(_BD}rMp%z*;1A2KY-rZ)F7U$+4>%pF>_pm$Q&G1tN~J*Bz+>x- zQox`j4hSsL1u5SZqpsR{FKmsSs&SF)=~?W;H7Wp)Y0lN=teclKc9L89r7ZF+Xznn2 zS5>21-0Bv-Y(_#OZ<+D(sch{8^vS{FtzzgJ+Yqkg;!zI_oZNxFSGfCmRAxS^VC%OebvNI{*NBuTMf7#K3}C@ zk_c|%b0A#EcVr%a3bPZ%6V0|*f-{n_6kMj}-RaIUK=uN-CpLCQDGrV9npH`y^0RX1 zxIFayt3uijwFusIL;-LMaBw}TUD=W?swH(Hj?zW|V1JfsX%b~js)Ubo0iTcX5rk)=04JS95-G+O08>f%K4E{VB-XIs#?X= z*A~G9Vk{CeI`{to8pS5wtlH|v?Z1oQ(5-FbneH6g{^=KX?2Kas=bU8Xvo$XmzOKZ^ z9YVLtBkU7V|Pj-#>k#P2b2> zU0)X~vhL-803Y49cT&L;U;>KXta!yszPBXG z%+R?KGXe6h?xB4E?Ok--dYqEFS{XK888?tAO-5+n1wxP;zIdTL2jsGeZohe)6m-Yu zUEYp&eu38{3{4>qFvjGB!S2Cs3q{d zzSeOwWPca`09vU#Yu3+87wBAL~8%KMMKOZ}Wm zD>SlqmDgltPVy{(9oUhG`qRubx#+Cyx*8k7ChzU zbC9)he3%dxJKvsrR7)I^KunAne-7T?<4=_m=HEd+!DSK%?e|M3@k&6!q?1jKNc_Tg z7w7;OAEjAHkY@uNX9A{(hJ_JZpKjH5MBJ$Yc@G1iD(ZUsS7TycDzX6* zA_IvXo8`&RQa`0>>9#54$18w@#UWV=@gGbd{CNICy%{!eMHck!J#JoimDV^TjDinc zz-MpPp7El>w*}1Ei9(aBvC`w+blXI`b}EdP z+As&`PvR*jZGvde2$uF~ad3wqkOnX@l|RTD=i#}Qd0Ov~<+7aTX&o{u`|3}5BHdhI z#}GbLO601pM<jjGPkf=CKFXFHFet*&Hxcfg1s z)1kb)SjU=TL?w>qEK3FU#uR3~90a#U4_!*>`JOOud zo@A3rsQFQip)_E89DiioD#pAC>gJruPfD&^%Ns&aVYqX z-NY&yLb3+)A&|HxIXKP;>~Yezw5?)oZ^HN2R?y<+$v=G#;LMVh8(a_KQIfT;^sHSsQD~8s**AEtoeEMM(y%wZ)pA* zx3aTOF;YnqKbsB19;UBJ_7%i2I!N0`%KrdPztiXiK@_pun^~;gC$V4*QsW_xG0&j? z06w)1)2*kIByt`2`OxF!9*5r?{VJ^XGECl%cx^P+y10KT*4z>2l0SIcc8Jw8lgno( z0|4`0N8|4j>Yg#XOE_X*COH{e)&qzjPQmwJK6~?B9=muSON|8N9r8S5cq~Hz8=phR z2c}Ldnz@Ea%+W@n-^(F}+@{gSG2f2$(@rWb?B%I9FLg7Si%mkiL`F%x;=$4P20R?L z+>#f1;10NK<0lj@RRoxtSR=t^W-bHBHsb+F>zeKvtwgh2P9ytEE*Yiy!i=^RM{XOF z?rY3^cdWRQMJ$rCHw(@IKHYy>^eNAqWN}o5x|-0$x6ys2CB!bFS%BdEJ!h8&tTo8&*cy6O;;17|u8ytJJJyScEbw7cGX#$I1xL6>cxN zjm^7wp}Q>$6^%+B0md89Rf`fWCSutiI2a?9IUId(D?53G8emtBr1l=Q(dn;h`f^-c zs1wRT0P~ZQGJh(TZ4;}SRMspbHZiiusLboPEb4aTeRws^>a*L}!mhacN&;BA{MgPb zz0!31drbpQ)grq4JUeApxNb?1rU1v({-V62#4=pm-d!mS_>+CJgPdgNZ8~uZjP3sb zXT4zEd~J!O!U%M`l~zNI%Ar{2+;2R4*U%m(yM@uNW{x{Szb0MTW&Z$g9ep_WubjRF zTb~bnW6=EQ_ciY8bqp{v!a&*2s`*Dg$I`xm@h!7Hs@iqTX&sD`<^0CuXBYr==Wy@Z zxvPH=?ZZFZ>$x_gro`S?m=T;XVikTd}v#2t%1;V~|I< zJddfbY}D_sEp=iVFEZQxSKj4DFtw4ZTt_@1WoF$xBsd2n+b6Xua%(}0O?!;WBNdk4{M9mf$ML#0>530r&6quLkfAsGkcy zEJET&8eX7Jm`FH6KI#Ux;S& zw4E$2nWw5g0;FtnjF2}wJF^`B02%bfaoT=~ZT+PM+aEI4&GtXprC9^WqdRwl!1o+h zE|sNS>eo{=Dr(Q|tJm=TuuU^n}@){di8DX?lK3qH5gE3xm3FLZLu9vfi zDm;7h&g`Xkr5!TpU4>+~SkamMw!n2=tfU+gyO54YBbwrMtrU1?TZ6>53i5b*ONf88 zV+pb2SsjFthU}zo!RwKX0nK|3qj3hO;dvhN?m?*|2(B7d$&vc89@)+@&=M<@wbY`} zQ&O>(1FBiwsfgs^TaZIzpSr;R0N+NmgtW0UiYjaVGPTrA+z9G0CgrXWwofcJGY`uOe=%JAx|X45cJ?yb#TS$}L-MdJhvqoR=aOm< zwMj8;tifdo8zPJlF_rodOMPp$5mvdwQcq)M+UO6p?;&Jh(kp?}12`3=Z=hK)M@1|3 zz{PXW##x;vjg$|)!Qf*Cw~SHmwRtcWRbi9P(t1{wGP~uuYADwB0;!BfqdvouKN7`j zOUrrLM$#Go0Oy|o9la|DNsuMuOeAuFOhrh*Jvlz$r~zGva5`5xs9ip-aML%*7{dU#>Bk>lX_in6 ziDW`YmQGk>t~zJ={b`zs*|& zFvrWZg~6-P%(Eh{cva8Vw7L|P%{@lxT5zC(0rk)EV?SDiOAWj+%vW@hPqc;|xc>ke zue@O!0nY55yx>&Va~X8|a-$(7&PV(Pl$j|VPlaNRe-dA;oCY#sM^Fp#`c{XBkWUTW zzEq@5yl&%m8*d-?(5Sv1OZKg9@;L&AkxRBmL6CSqjat&=jxB#tiBl$3Vh|0%aNCDZ zbtmau^&c{^r8Q@A>}~UTLn1=h`?(9wJ5$U*XSrgkk`h4~JwCOe4x0=fY{E&9z_M`6&}T)`j9KP>9owjOy_1N9=Gd#N&z$dMIL0Ak+xJXJKa zDiOm1#|I>I7$^EvR|EGyG6yH_f;g;Pbv9Bh$#6+oN`nv^i+g+2FMbw8QyP-rHeY}~ zwJo*Et+}?7X#!2;0rPI>ocEz+IUnb*|T4?DWSR$2lA=uR0<27?423(l5KS=3 z%#HHyQ^o=6Y2TDAX3uWXGUJo|59;!AgsZOI^VD&sRLMCkPu((H6V7^U37_*Gs9u{qD_TrJJ4vtLOf#pM@%W5zb0 zrFEVj(`_|N=&V-h+)F2zQWf8{f3)4J(xcHdtsWy4jh_34C+1_hjyvSl%BLp8s%~e9 z-S~d$8%33d@Oe;3`Nw^@6YI>jFG1+Wsb{L&CIfBk*& ziow3p?XTl7N=cijB;@{dMy>Z8={2$Da%k@Yg}`o@&wPSM{(@?nA!IA}K<(bUJDGpe z;kBL+HnOrG?`;kT^Y2*Ng@~SJNMQ0M+qeZe&2%Syj%9RW&AuYIIKaWpG6}}_QZ}CT za#&1A{CS73QPb9{#G#1_1G{#nGI&Rru^0Jx2a#Hqm)~cQCQp{*kVoA<)mdWmFuSq= zz!>ZFrpgrKsNkPWRLHJlrn9;++P38@5tDBA9ZoUDSo;mX{QMvL>ZY>Njn12Hfw%7) z${y9Az7Ca?AD5AymCDYD%`|XU;cg^lD8p$aF+VZje^P6!wS*f;!!SfA4$bouw=J}O zB!AfhSCVH zCYNA|La0RB=3}AiYlgqnE_^|#NpSDg_(b7w00SR@{OHzo8Fg7Ag; zD2^0oYU7Qq$t3+~T5e|D%_Vsn#c-qm4&omSah?drBc*1%f*qw8s-l2540gq8+Ryf= zW`MC`5x{(46SSY{SlW<(wIl#GUk+6E6|_w11&C{tpD~L$D#VYb59mKyv3od31VpyP zBptm*26OeR5QK^n>V-MLIqm*SepNW{#E3~7H2Eg*6tl1sap z+I*52;x+5(gdd%D^66_mq=jUmaS1y_xp3Vf&~aeA;H<$M>=+ zeT9hw70q3tSebSfaj?HR+z;`8z>Y>etI}rDCV2|95*Xx=h-B&JFzyFj9^6+ktmrOo zok#BDArmRdEy%~vV>$ITRO&S=FsV-a7#<#;eHAB5n1uQb%&WU+2HIjcNrRzsf zf$V2z`)mN4XKqV{JgN5`MS5_LGUhz#%az-xOS|l*BMFaw2yLL1AaJLia!QlzqrVkW zHj^bB=WtaG-GS@Uwc6E>mL_7e#--jOdD)OR{%l+R16a3=jTCZ?%d>XoR@xML)~P44 z%_n7ZH(J!jn`U0>1zau@e;#w3V1KjJbj^7Fvv6+iE+%y$iD%oM-K)0M^$V{M>yLPi z=G(yJLXI*pGtZ|SXZ_Rt*2fiiyKiNTv1Ka3n-%E73MUk$%wMqm+sbemJi<@sn)E*i z>T7=s+0C&gfrB6;ml^p-;5z+l&veMQA>?g8HhsD0^BAq~4PIN@c#6o%OmQQXXJdoU zaz6o0tL$W*o~Nh4zi5c>LFJag^e5BlT`rxbOLrVs1luLKA2-TIPD%Va))K;P-<+Z| z=V%-Q>-4RyDl`)dc_wI=c;p=LYo143G~C;pRkn`ZbeKy|2IiX|!Vbgw=RcpdaQU+yV&UxFDl79nS?yW3S>oE+HUnOT9ihdA#_flcFYhYwfa0n^i_*k(AxY_;Qeal%btzWC$Aw-F?2JE{j_x8ZWeB5sY zzBSV1mCTn3bgTgRaOGG02j^c!+0Jfk?Cs=J_PvN6AG+j|x2Im7)rDC5*E(p+OA&ac z;ypB|X}-|@TBwmucYFP7hH0mZWH)5o2{Ex$?9O{1Q(fM_popzVjow){<#MHZ^y^-A zGt4GQt`)(Ex^3IeRDcNg=kzrZvo3c`Ul8ba_j)6%AZZbcf{F8i>DIk-;4j40@m7d@ zH>nZ*yQQL{78vvG2s>p{-Ghfy=m)XKkXveoNM%dZXs3@PEN7P$jN0738}TK+y`}FL zc5UaP4i(dT2_wgCzz#cZ2iG$=b3`f2mR?8Lma`2aE4WD5#BPF3o3_3|$^5!k&wsSh zztprSEHuyF+}uij(;}t>aUtH>`t%07+y4NFQtSFvrS6ozba#<`p%sb3HZsac>5ld1 z9x~JXQLjrS#ofZ&+e8>V3G;c1duJY%%MB!Y6;hNmJX64yR*=m#$`v-D*|rqM;6Pj- zd?@GXR<9>V)ov~=&?3gD(;&bPc-iC5dt%G z0D?MZyV}(8(`9W=WsYcWoD$~%M!zssI4VcE1RtTTPYhdW_oe*!8Z|4mUO|nl8!P#q zwTWW0-$*>DJeCpX1mg%5aD?>W9D498vhY@%hD*nq8D;Y$Ng@n#frD}h?Z-8gmu71f za}(kp!l^t-qR%dq`^{rZR*?$FhL$ztuU-y4GhQdAMda#GH~hQY#T@u&q$#{U3m$UsZv<3Fwk=UzeaE8xx1@fDT!lCaxBs?739YzPT&b+dDf zo`OKRQP7-$&1*uhCX9;oH0DPKso69arcmqUy74C6%1@zO)U9mh)JB97e(BD7914b8 zriS5dXI8Vkg|^8Pjih_xuIcR@%O95U##KuY&C{HmeLL4@HkUXO`y?~4OLDBcOQ_sN zcyMqJ)A+v%;{0Q)oBN3EPUuz-^`AFWyFbdhuNYp*p}cM*-yFr$?*cyWAI#U5+DtF5 z_goU?)CS&p`twzFxk+0_Ye16QhB?FMFOkp#fImZ2ZXK>xMQ@r$Y-I2`IIHE{qmr$I zh9u*Yl7IGNS+@WgScN-C8%p%fYa)tM$f8Crq9tMpRp%c{>pmSBC%d?STM5aG6c5e@ zKqvAw%S{Ze3cIKTD9B!GrucU&$EjU|Dy)GOkyzu5l1Cl56+>l@5+SlO1{)i3A$tI8 zh`M%1aEEZ+xE{DQ=$P-`2@t3xXBikc&(^KRuRV-wv*kmU zQJnVl`c`$6ete-(8bh8wm9eDFBqCV%ZD(z~996VqEjBlNF9fh^(MNA1Mdv}eWO0ql zwOb!t8rQL%Q^oeOv~3h_7b@oui8j= zHXs|BvBA%=_cesKG>*qymhxPGqXOy`K2n2#J#cEeTbSiyWF!<-!Cw1n%IY%DWw>t) z!-gGxm01_}s#uT!Qg=6?#c@d;F=gyCM-nWO7GJ)5bR!?FQkn^%kn^}H!h+nI)MqL@ zq#+QEdM&z z{QFe)mUBaE8c8Cpu;t5rcg%jZ+(ZA9_NZ1w&kbK zqB-3<&Rtq4A%;07oxvE4WVh2DKO9$MqkUE6u51ceGB(QBUGBbjw>yJ82=a zw)-SVpDGmukINaZTgDb5^4?c6OTB)0%K9qur`X{0U351q7?s&cQNc{tF?^BhvdCRz z7W-XP=NUL7{{Ut+g*i=5?cCmRPCTg|KdD1;2zd}Ril7F_{OdI?qLq=+0*23Ae~ot% z=@Zza!#3=0-Zv|J!}6@J_(rWH7ZVs#Ib%C>o-@z*SG1c5Zs545J?t7ANxWsTwH1GgvtI4zJILAz$XsHb+uWp!x`wlG=g38lF`RCU zW4S-pvgevcQ|AUG4#%jcsoMEtmM1jZwq(I4anC7kO<+BNe;?5pyt z5z}@$k81aQA6U2XMuTgp+n`jGk1-ec9~oi~wRncNb{^g_@`Yi8fn5i|{b1?sHI2;0 zZKqwz3Hd{0boztMZ#s94(ajlh%EzQnY_Xt_L`lXD4Nx|#B#O+7xx24g)xDNA11IMG z=;>VD_PZPmV`6p=21Ws{DDxeXvCsIHD@#368Kfplaq}{%!BfZPI@POdYgtw)f<)|t z0rQQ+*NoL|RbkYiS>sdYB01-)U}b?Go|Vj6-8H0)spcek!zkddu=cM?o6z!WYVEVnjMk5$PnPhGCFSm0MKdL zu8S718NTGKHj+Dv+~-vu%^XFvA8MX9ftmTs1;E?%&2(0}2bzZEvAd8XepLPyHJ692 z?I51+-caoi-bn{1oL1eeU+C7G*9_k=7$f=AHrceaEJG4UZmip447{SApGvkbpil?e zTaG#k=k=Z7!Xw`%2Ri{jjaiFN%t%|z4S|qU^{iV(!a8npK3q1|Q6iK@+)$R}fB+}? z)-Cn#+M#%0#>|7$8_4zig%`KtA#opKlH(m1oq+yQboLd-{1=g~z8vWC#8K^!nmodR82tUf<2dvrd)Hs9 zsfNe^%Flr$n@=GWt`Fwl_~EaItp5OgBkw3L-9*l~)F2bEYiQYB8Z=fUs%O4C5%sE? zwULyr)>nARK_ZM~jBYAU$knPfe(9o`^QEY3O~NW0Ik=HG|1Xpi2TccmQZ*lLBgNLw{9%t)8#<8MGiq?IosQlSo&Bnov*P<}dx2|VYc{K>q2@SH5Z^CS zNdQrzd^Xp-VJMGLmP<$(f_;+SOdZ|JNC6oC0AW>+y;`y+O}&cp-`?GLlEJOqPjoKO zh28S2<$m!Uum{(+YwoA8y3=$R?zKj>`+JciHHsfBj-zN`KTs>eJR|UPMexR+To-$+ zff6_*sZ|8Xc*abd$jNMEXE|K`DWK|*_@l#dE5;<2+!T=j2XNcPdG+gaJvKY=wE1^A zPY~-3acLTPK4TTy%+2=*>J52ci#4Y=vMd(j5Wub^$sm#r?0zTn743SI7WWo&NBg98 zW+M`hoSuiJd54U2w~9ILRpL#^Rg=ly(l5!tIqCAAdiz#X*|k{TW6rgmD(OwN*|^N$ z1~MSW2e;OswbP#V0dh%YRv<3yf1YvqR*2Ong(Zv26EBwAWIW-!0xOTdzmnelllQSf zyFX@Q_5T10>sp*G+jG)B3HX`x&lBBvTIGbcQ?pHGta%aSPMRbtG8;>s5 z1lt)N0Qx`$zodHi`qu_0mX5bQIumsx%VU@Ljp2EGJE~a4aG_e_++mntxKvjM{`rwV zI>gXc&TUHEx)0tmh1@bhJRUpogZS60d~?&%O%q1cKrAg~`!hS7vZ2loKBS+(4@&Wo zr_3H_j^IFn4rUy(f;QzxuYbT+_&4&XZtB}#N-A6%XJ3|#Ef-X&S{Pe?tF-?XAv$k z4$#Are;@IuPiqzIK3uV^v9l23OcTK*A7T$V>`BHdd0}gr{K&|S7zQzbSNe1K;;q9P z5CVk+IpRT_^NQn>Z1iJITFm+9_KENeiyS)jpCpeiW&jpt{_!>Bx?ZB!aI={}cWufE z1Lo%|)1k$EyRB(fdaZ=^S36qT2T&(i3R6U%zqp{5VN7o~`j?YWA^4{H}TZbx`EI`K3VbuN=#+f|W-cYOe zL}i(3ZCd>=wK?vTlxYswQAYzOY5hN$snXSLtjLv1?p%*b^ir}o7_J>*jT39HmNy?m zP{fHWQmz;}{na1tu>NFIZ7xYF6#y8?2PBjEQ=EbcMZpJt-hdx^$OtEF802r1vhu6I z=dDZO89a{==Wb7yROIys82+_Jc|5X0<%nL7)K%{Wy368QVJILlC_cEzsD2g=%v2O#wPYmm9qBr;8vI4#Na`c`oVrso_( zDx9`PbA#H5iSu|Y81^0OYWqiUtlN_caezMcWR-IJO6@7!RT^BL%!_NJn%Krd` zT#j#<mJ6MuZs4sNj1Ex$08ncHI@=ZCyKDgCj8>GQJKqnPGFowPtW>t+v>)&$v6>uN z*Jzrb*^nYd=D@!(S8f5w9e*0xxi0GqB$*|9;Afn1TqIXAN2g5`bE$QWv$xcPkJlBm zqU$#|`hB~&Mlo)O3^H;EKgf#blrCzdbT%#|c~)cQDN;6`0N|d#g=}4|n6?4=RS{Cr^h2&Vr@jLEj{7pXJRhr^CKrJGXmBV13 zae-X!mvoxk09!aqg&(|x^RciFKU$8;^7l`()ftvfDd+?zcS#;rG4Iy-4uG zb)!$KUdsz2viytyE$M-pP@=9=s4@sHHT^}BO^QO}F1P`ID(C!B;uv%p^(zfMA(rLx zR!&HaoaY{`&suHQi|(~}BwIuFRok#C515MNZ>`O>-|uA0xW~%feaWuMbnP#A;i*1l zp_vWrvrH$ue>P31OguiG)?f`DVxvV!yR%bayUfAzeB6nlEa09sXt!JjjbnhL_ zc1@Tk8--GlU1yICxpRY_)n?jvDnlS;BRjKIZbXXfzXUnU`DZw+k~D11@g5h-h^S4& zBOR(=1j-Xrh?Eiy<0{7+eqs7?OXB$AZB{jaA1xz6l6&{8%PG#CB$BHtv%UdtKmg{w zI3(d_d9!}?&ui4R2x7Kv?S?`DU;e#tQfj5OVQ$O&&D+aJbG1)T&zyZK=D*@=32fw; z+$5IR4t%lteQS`g)s-$q#oO+>oSEfT>VW;!{{Vph08>(_#Tq)2d!0P@cGfp53A{y; zY=%w4DQL>lV2>_f{eD{{J#srXw5P!4Qqv0y* zMe7}M-p*K>H&;!COj$VTo!JC)jxk#>c(UFrh*nqgQyC4?xYnOb+9O*?lw$w{KA7uO zB4+Xj)&*~Ur!3QaFanV|D#u$CKf(8wHT^yaf9(kz6H`?z|a@%9BDOqh|R>Bo0JzF)Ygk6uYe`mc@`$OSvkCdKNc9(P66g=dpM>zE6Tw~BzcYh!H zLXE6LGVdp$$o~Ky)#YEauB2l4aV@jBGcLD#A>ME>V~3OV;}zcOx9xo>Smw#vcH@A~ zJuBle5VdKveV!ht?qWYux!DUO_}}+-1Jm)WOMBBVk{<-gCkLF7_;mUjwQ}z$1xOMo zB%xgAoOPtqrxHE9k#jI@(g3(5VD%qPKBQ8Nb}mxWnBn|mKid3Xe5(t(EJ1w2Ilx>W z(ygp!EjoM2b1TOi5)_fU135mGTF+589wxlKbv|1$$}!*#*#1>(#B;}I;rq`mWLT~c zs}Uhxs7J~=bv5-g)uN(&wBNksc*VV~$B3^*oSXjuvhCUKWf?nUXN;T?o=Ec3=m8@I zg?4ht(8kUafVjg9epL>WrW-vyRYt&AnH#e=9B0@lA53~wvpcZ~BJWbXZ#k``k&9wV z$TWvNe|MHcfO#3O8`dI8JY_6)@%bpRWQO!9v>&IGBqDkWktz@w=@-dv1h(9%%|v(u%wOWQV@IReS{n}*DUWBqd+d!9R2 z6|3Gv+P%v&s>V+5GqRkM$FEwsaf1w*`4xj%#ycH!n(aLP}Tfld10jA>QP;) z2aX~}(AZ{R9QQo`04n6+x`NW{?CQ?qG65`enl#(SwZ3+N{HF7K*uWhh(?5W%CX+H! zvF|!R!@WO3wR@;lrMV?pgolmUISuLv6}fFMlN;LL{H?hZbR*M`<;T{m_3q zpfXlKqI4MC92|7^!OcmkCy^|2j58tmiktzSwdYHjZhBGWwa(VxQCJKCBobQ9^KUsV zlD#){;hCfEYKU(rABof$9W-?h=Nfe6PKm>9z z{>yRr*H5C)6t>YZLJ`PUOr67=9)DlJS3OARby0er*{kVy5Zr*0NK^#|GI|VUdChTp zoxQA9b3rh4LAk`If`j|tsp7jm2H@+uCzU+E={AGQkca!|IabepJJ+9h_g|XAONjJ) zQz;O-%XM%=MH-&C2djJ56q9FM+U2?C9w^o{tr}^Cv03Fimnp#OjQwknwDA4sitd_c zjp4S!F=;+r1>7^9{N!<-L9V|{@HLuT&ws5*bql5p(b`1Y5l(Z-6~8!=;z1;ljk(yd z#4#F-lYl;?pT@SHAyQUH9B6a9vm3(NL^=aW6}VYsBLd=7t3bdHxSyKBexk= zZr_!0qfGNA5l525;B$)WbZePy0)3t}c#G}aPS6+Lyc|rWQ+9gPAfd7~yl11ghKXUfu59h zC3(z|5x?$^RC9rXf2DXo?Qx}8+W2b9@tj4dJdF%dXDsRn%V*FA$K&l@=C-YH{!A`6 z$S@9i@PEd>HvObLay(<>`YsJ z;TYj|?Fs(*KjT_gLH0y9at|)Y98+xIh2u@*X>R_QIaB>9E?E7*+r#wA=DpT<7iA($ zb8ic`jARp!=S@q8S!7_NV+=O)nwH{4OLRb)^4APcb5v6ym>e99*e8mFiy3`BWGuZ$ ze^Xli51K*Z2wh;pk%GsdIL1YDw+^mV0{p5tBlwMOct23`uaX%t(;&Y!E!@+T^oxKM zmn+{V9P#g5)~P9bh9l)x9C~KDOU79v5~}sWkep($ukBTk76nlbGFq{NCZfK_J#!?o zHUwe6u>88<-a<+iL#`U2_eHXw7y!9 zHud(aS{gbb$tZKk>CHCo(>9Vi;bc%5c;x=IV@8o#VVik$Bj??N%_$@iW|Kg%{GgJO z!;qkL`qixm<&~FvAeJ#M+<(hJHn9AZ8hypO2;wO#WJl3>z^Zc-VQ{`%vluT3%8Y!j z0ngJpA4=xAS2`BBb}`5Tf(`&Z>eMA8VYu$jGoIC*ad6>&>hd@r;a42Ik_BP3oMRpS zwa+wk$70Ttc9yz}TTa-9d;l}Z>&W-%Q0w**SW3HJ&W9&#i^lJ-rBjs_>NwHSi~+TW;a#nUsUL{!nWNhk z)L?nB9!h7p&*k(rz{eiR5=n6!Zg6&-WTxB9s3w6ge!|OgSw78B~Wtl$CRFsULPrKTyYQimQ1$(bGg zc?YL|#=PfPhD#grha`1f<98KMrih%|I%{7PS_f!imx8$JM?YH6mtD8>m86tNM{qI) za;oAsV!0XVjH@&AV+289VsM?NLqQNaGktk0%8B)>in< z%tS^&VYs$>H?>HPULt(4fOE*HX%Cs3Fzd5T988LX<>hFDQJz@_`;}EAJSH<)24NZp zQJl6Z9{0_N?IU(E#TG%#%a^hUSC9@9Cf>^Nt=Zum4D=oDjEJlPD zDgpU;tCrV-EiFp2tgRUUA7W4St)gJjR`z(j)VEduDa(P-)y*=_JPBk4%Vmf@rm<|J zGhL%QteN9JwbJfuWb%rFK*mr109u$i8=76Uo%{_5w;yOwaK!PRKgOuuS}nMVnLrFP zwHp-e6fL^3G3{prk_pJ=NBR01j`SH9$zTaqz+v99dhASDPFN09hZ{%msNmE=$l(w% zRphrddF>uW669~rzidx^*e1(T{rBpF$*3E3BezS706!d zXqS_(97x0Nk6N*9Fo7NZ@0JKd*12irmPw?4KkqR-ue}D#LgKk#t>pm(NsRYmI%c4q z<-L8Z0w&+$13GIPI-GT~&c@=ldGtUw!$zlNi z0CWr!RaM4WVB5UsA~ zjl}$iZy4gV;s$%JNZf!n?@kFn%B$(Nv90<5;Dk~M9cjgrCXDG^x;7VcsM*QwNpPlQ z1s8!T4jUQg2A_8r`%4j=Zs)crkZgg8;3KPLS3GAmz07QT{{X=~K@-3>?xS%_$zhlT z`HZSTXI|2M^&NTvUZDld7k2S94zDR8Q~~!30te%kKljafFTlSnd^Xfg)XyB2TA65V z)ZmjYaQXiLYhv-oL7t+!v*I7Hs#)5qB*zmr(T3fef%>1$zGo8;-bngfG{1KfTKaY0 z91zM(A((Ch+@4S2Q9*F9LnX`ZU4ln)4mTlOfIah(`BfFW*{4RzM3PG6k&JZ&kI?3# z)c1Xo;us}9d%(aG(>MqD=klznM)HZhS7U}5i7VZrzaqs)&Bs9HVtLr z?F0?a1$ANpo`WdXdz9PdNHkH;=Bq$8T+Ebir-|$l^m7 zF8BZ*eYXSkucFU+}!ucIRFy4s+XzeP88))S0ztM zu>1aLA{dj*RV~o6)^dExNhrvjNJgY2?M;aeMaj02uC^shMh zIU25^t*{%Jt>RTU>GL-oI`jHcauYPN~4M_6k6V<*Gc&F@oiPPMw+h1}+x7?9_Z^?rH0OP0Dy@N;# zudLRV*AUBb4>64O{{R~KNiL?-H3XRm2pL>Ar_KHV52x0>emgt3<1C%0M@@KZ0rMHVH0`VIYga_Al15~;et#uXTR>g4og9U{qAyuEI z*KgrDuM)KJcZsx#q>61)?IC=krgAo(NIt%q>?*#c;+hU62t*+0S26zOHe&vA1>|u>ph0*7bX?E9}q%I6k2#K~YUzb0fcRven zcj@(G+Z=QC0Qdg@_3>I(w&^0| zF*EKb<~z`_>P8Z#0)w7_8x&SDta2e}9W{?D1}bN-g(?{x6-DznXT@lm^k@c z1L;iKWO4w(#{l}%tuVj3?k-11*0mtKyQ|B_v+=iS=e{$=WLUu+mE4xj##?;h$n9O8 zi(uBee)I?>07%70KEKMjIpAn@8_=isnGL&+L8Qn=?WMz_q!R6S3$+-x48F9~nV#cQ zK^__6A};wTP7mZiQCULgY*zC8r)cBp=~{MLs>iNc&kSSAA$WfO04NQQUUybDEVp3s z-8H)Tg<3_}GuYOmxllJq9I3%!T)niCe`*MdhGn@JQP>`p)7!$JGlg9518Hvlm5kCy zL>nxG+JX5aZwDQD2l~_Pget`}V{EZ9;GylGt!Mp}_Ph?HD9#HUHhLy!AxLhgH(h&B|zP)MIBl~Hyja+ z`qTlQK3iRmSRL8H?Nf-_D|rqSZ0JXNlgSG-iHr|3C68fKe62A+K@an9^fXxkpLZ&l zP_8n=1gPeyyrtV{I2%C8?dwwCskhn(%Z}9yQHh3m&1i?Add^g_3Q86wh6;?;pSL`d zm&A-u@TkQ{u$V>|qGS+i{++|A^A8+XOE~5` z_Y7nTSZ8Ck)Z-@}^#-7U*yLqN^zBC|%2x6lnAO;H`@Gf2uNi?4oB}{6wMR4?QC2)J z8K+yo%>Wz6T(Lc?7F29myVRW8vi!=Up(CwPSy_}E^%?uMHS}*Bal(brH&RYJR6$;GCpncqQKUz|f%Ge4>UikLSO=)h6BxZ7UF~-kY z>$%J-Vl|U!FSu+gkGd;P(j~UDL%R&jcJ6^Hw(zgHDghq%ZqQyy#VKu zJJKq}a6>aN+^SQ9(>xLVYCSLaI*=}bI{G=T;;bM=BAcx+nJ@ehB%2-^1_uQb|mM$TDGz>GEFLvGyZs=>qewf z;@O~QSvRoU8~9h(@a0VXz48VBP?ice71%Z1S)hbj{g9U zKcznQ`Z%;$entKD{7__dakRTBk%`<*D>_Bv5@1fS8xOc01s%`$t7AX?}k}-_187K5P{Aoku0S}?y-LF_5VxAi_g#k!wGGFU zID-p`9rhLtj({KQR$X2lxJ(Wd6WX*giHWFBaUPqf!xST7iG+ww2fJtbd)F86W-#|2 zAl_a#mOFW4*mH~)E%=&3P8jFDzYGcfLUS{{Voj%jc9`rNo093r6viz#MWtM_|WgglGT%RZZ|IH#w(S&xBkqXp_VeS zTrZfrxaTB)Fh|r8T~wv5jw-b6v5R+Rl3TJnHbFf)V{pgwuE)ThC%Br#*%ngKpO7d3 zILuSOdUcTIzoB&lk(F>LT(}Gl>aM$aWU_ zx%_G=Eo7ehGYTwaSy^y;pL(|hPkK02+si3q(Rlv=5&6@S+EH}2$QEalg=Gh+>5r#+ z&HK6<^K%@gwP^P81!mfh7UfVnR@cE_8p+^49b3sX-{^BXiOBnyBz)U@;J5xf)u-d% z4jUah+HzH;Mdd-~7|nUb<}K&6xQ`%)T#`E$rJh;IFEBsj}`qvBa55_0Nz6(eue===7{E$c9G2<$0*r2_D zr;#G0`;^G?^Z?^H{$HJZRMe@}a6Yb*_ORwhDRSOXRcP6WQri=PK6ou6&roc@aCww+PKJ`H!#GoA-)w zXmzaYv4xFRdxCISX9}f%`qb@W&f~>i9n`coXSkaB*5cYHQa^Vgh_rYa>*hwFf4@bv z4HkAvB30SBk8;BbKO8sZRwJBQTgvtTnWNbv$+Q#ZqpXXLdXj*CCY=ZD=b;n#;bwf( z;M?JIpjciq5bL;ZTyp5eTa5R{N4KSY0pQOO$9JX01nY^`AlPht+gk(luL0AdveW!; z;?ECT<||8mPU34tBpt9jw2_<+d5O`u^%dz}4%%#Of-?EAxmJ*qoNaDDBB#@u`gz%_ zqr;^6U776gK_%>sBn~5uwz94V80%I1NVc|9*xNMmVp03IRV8+)$j3aE$A3?H#S+N& zj3b=J6P&3%D+|Ti#0SG~iNtps3tKpBgq42j_2{EN?waC~Jy^>_lh$p2;U&~yySfc(~oLyx1m^Sb~9PZt(K4li^}s7Wo&04_T%q${CT6py3z42muqu! z(f=%KpIC)y#+Na>|i24=in9*kZl9;}zmmok-Quo~$X;oi68$=(o-(eaMEVA_4;_DLMEY;WI3G082D<-;Gsu^C%AD}o6F-oBssweZ|{#LcMbu_d(e ze2}VO35GY}iT*9ykEd+c%{P|v*t3R}F0K5}zOtcZBcb|MP8!^V^s&v~AXi)`4auw> za?lu-Q=SeEde>7TEUtb-jl2`ut7;a$d{PDE1D~6U)XgT20o|7zZD8FiZo$RO3XHg9 zILRPfTbrv88ZF*it`D{WAJVaJtv=DK zeXKZ+Db-Pk=-C-L^u=vjDo%lB$x_GV&qL4Y%|onOExo^&zzF5l{-YJ4k%vA6VKUoE z$=ET2?OF|UD!@ZBD~vMAzc$h{_kA;tX#I_Le7QJum(7Am~_qrF9G zJa>{uE4Kh({c=t}TD&RJdZs79>%CTS%Tvsn~a~Gb^9+M5cG5-Jq$nt+$ z#=6u&a*Y+NUH2q=a5J8Vnj(q04aLHPDgu&5xfnx?)kKq#hv&QErjGJhS%SzgNB2is ziS8MLGU0R8aa&xX+)~6t2AhoMaUABKEbF=@jg*1eI5n8kd5*^eAaXkcR&BPTD+D4= z`5(%qq%z{l43oLza0dY9qmJccWo1q=f<65yf>2uxAIiWFm$f&{SCJMrT$79({&gua z749=}DeB~YT{BOD-)_}(CM@BWtzC( zakCv1RF=fHH#-npErHsla3qO0FlHwd$f8(MLyj?!bJyunA}n9J(xRD(iJ6{Qx5mtQ zSz{r<{DJ(dM@XOTHqlJO1(OUHoF7Wg8p2zBf=66=GZp}M%ChtLJARda!=@#@y|fMv zF>x5d=RIpS+c%0NytG?4H!JeMPFxrJtyE>yt)`t&&Bimwr>$!0Q6omM%O23`9nrg* z#2ydaUP!J4Z=9@5Ffr*>8=1C=O~ANP-b93L$jai0b!lzl+SbK(fT#DX^Aqb#n|y*N zON)Z1g0hTb>;7?3y|GxOcb9(EB$gcyZq*hXY(sIV&vhs!1Q_kQtgN#IA0&M}fBPDv z1*DT4DvV!0+uPhLhiRrvB4-#iHl3%486He|#N|fZ;;Y!% zK4iSSlq~MQA~KFEN!n0VL!LS_4r=0OCP#NOP9sGus!3NX!Tc*e`^bjSjk}}_4qW8@ z54A~iBevbLD-Fee8qSy-iHu4^ZOGhvnq+Ue>tD0hujDqrrmSjR?S%0t8=K4%24XqK z-6x;}o|Wig=}P%b%PTZ;D!he&h_Zl6jN^h7jw{GNXUA{&PTEH#q_%R#h`bQYo&Nv< z`PZiEHt{T_C4b$@rG9Q3aX8~2QhtZpyqfV|(D!IRaHo391d;hLs>e4fa!D!)8T~k} z=E}u?wjO4MfVQ%V*d&CIg||+_>RM3WQpc_{=a&;(`HG%%%d`RJFe7lm$o$QEuY%=K zqw4eAIh4zCQA6bC%8Dh=1J?`x00ADgbYt%mUENsJg3%(he>AFqxRWFA5533ZUVE+R ztsV8qX_i-s$Wp6;f~)+qw0$eRzMR6lo%Xp>bL+-9u0K(|%#unzWob&W$D@Dsg>~JW zZ5;SJ+8AXd6=q?L+2}g+?Ni&&5Vs~xuBzfvSZDqNQr%tobFhe#c7O)ajAx#HwMs=u zA#XEw!a(DpCbhlENfbOnP3DEHJ5A5oNqHHt+h2@>)9(y){v+#Nd*BFOR@7B9nC+D^ zt;ruZKb|YD@wK3D1?yQm&+hiI`EuCX`E5n~Nv>z%p;d!ZV5US%G>-uN;&)b3e&FNr zoh*su+&s(3Q;@mqRd24=R97llS18B^a;F^obroXpvs}s*MtW`sp19(xEOE_X(g>vB z?JB2f-dhBo-Q(i8H9Z7~91aFf zamW4g2Ylw;&DoYgYT%h5fH74(IOp>mR*bTw(7a8#m6K}#-9Om}9Xn^zx@p4B?BS^^ zyCcryw@Zjw*+%RK!5A4ms~b>WjuURh+my%#y1ipf{=wBhb2yS2VMazdVxfxyj5=f$ zVc(@@+(jHuB86ud&hJ|4i#X2T!85j};rn6^`PKf-ARRy$$L0Y)@EY!{VH$ak@HSiq zh#Y;;d#|CdG5BA6Y<0M-8*1C%;x`<8vJO;_%#We3YQEBj79|20vaG=M9)MPJe(}8| zLwRXwtm(4PBx+}nZ1VBQIqmDkd3LkliLLA|<&YhaG;QTK9&~&0yV%#TrOH^mcGkA) zW*nQD2`asL9C6aQ9}(&n_cl|;=W%2A_Yz#^uh1T~QIS)MApM-YP`(uSh?;fO?rg5W zfwJEJ08Y5Ct~Cp2v#kr$?Qj_Y17@?X?IBi!z56=OBKZO zI!UlHm1ayT;YjMIJx?_@wWl2p_T~$lh}~8Ameq?VB$Xo|9i)tmfN{wHSCQ=HO!TLJ%md%rK2FW zf7u)y@zesfX5UtkqJ}n^{>+LHv}|%ch$A_!bt^qe zc;}H;*5UHoL5wy4`MAozC;gG>T*rp>xP+Q5+_T7&qnKnuhwi%UE$_=G@s8E>t=tgnN*)=+f>jJoK-hgc^sk8T%Ua#sz+xyePn9si z{(fE^0k4Tb3_UmNF%^@aK{PVi8#Nm{^^ozpcTwfz+X%32SFCN)n%mzam z7dy8R>(ZI4+S^A2`dkvM?o^AJQ1QpTL9ApS_q*3wXQ}@HV;8(P$qW&K(n%=bfC0(;>%#nh@k-;w7gumtnWDC7 z01g{1C{i0g?BtF>uTG2|?s?T(N1RI++0M*xILPParbe#J6$dShRhyVXusO=IX$%juUPs+hMA{hj- zk~e)bn$_@S(!r|BZU@Zj0QB6T1N^D9#TTj4UBs|TL2uoY!S<>*IPx2hFp@#Z&1p1d zZPF0L08ab{=rhu=T78VbpOv=|MPivu<1gfm<@r;~8P8nR-woQO-l~?e50c>wU+$7P zKb0_;PDvyG|O!Y!*)hl z%Mp|65A>@rYMQn6ly>W;+ubx+WpbePIQ6bpC73G~0Io1poPJfMrQbEIs*bM?0l`DY zF~{puVy?~moqt}mSf_@;Z52?V@}^V`k6og$HGN9%`thfd-T-76+!&HRhL~@B$YmTb z1d_vw#+{2q!+ALN=}7@Bu+JkMkR&)6`FW_NwUrzKxF)NXAmo+@KE0|;yAoN7`g2!d zT(tyZEy&{o-os(E>FwTjAp3DyQM(v*zQMKo$Y1YfbFylug%t~#aw;j zeV#QNaf6P&*{Z3|ST+FR4tdQ@c_H7hfgtFaJx@&jhN7M{DySC$ILJNfN)eU*MFgGW zA!>0UAOJ8&rB=6Q1*Oj{X z<%+1lR`u^o1hUC;O9H9sN<<95w|VX5aDe$hFv0aWt5$lO#dD|KsQI#CVom`Wz@`Ue z-4I}7e1<=VAFWe@6D_33S%XG*t_K^j{xva3(lR^Ao$qJM5D;;<9fei8kVf(D<%rMB zbDECdQTsxz!f+%&6fx`TS@%~g^7)L*_jm_6A4;d8lT5Y=nZvMlRa}zCAfNE|s+xoE zQ3)XLZRZ{RsyplH?QMsa;EZE_I{p=%1>}owF*#{junET%nMx(SK26sI%67)25I(ii z?X>>l{{Y`cu(XXSW{f#gxUnSe?b@__hxGkv*sBoO;@aO0gS#WRDC^E@oJ$c<%)w$ETWxNRa%r6!wt>F#mp!c32`R}1oNC{*1XJ8 zw61#CSIrw35ZRdTBLUuEhi(to73y9FxKX4oH;JxgH%dX{1oZqs1pC*XX}(HdNpmr8 zDlsO&r#p_^jQ%z0J|4H8-%8WsDYi>^)Pg-W1CPv)&aQ^n9Z}k)%sPxgWIjyLlByJS zu3J*WN94XDav8$rj*^(=iC8Ety;y>y-}~QSWwXx0;omvbs$5)^M;-8^gZWl+(j~*=HYbe@+=}epR`5cP zLO;f;%Xrqd&mtBOh}HgO+!jz2GJrboeWTkIqcyH&l~Gzp&JW|$6`8EgVlY~xZ1SA3 znH%pu@W+q#L1q5{^;bM2%+9&VMH$w1Oko~qFx<(KqF z+!EZbN9f1@0M}fNx|^F(I$>gwys()W&-hn-H3)%6S0RAdK|CGY5!2K8n#YdD3rpQb z#RD(e&|O)OPD75W25J)z*Sk*`a?Np5-G z<}sb@aIC~LX1NQSN190%7?|K5TY;MAOz3gBs?g&;FsZ&X`q(UIP|%>w_zUU10{EZ zoM%0~tJuqDcycq&#>ZY`(QbTSb*gwE1k)wQpM6vz!`g)IV*0*QXVX&~4CX;+y*v0^0RAi3KJuA|kCv%Q6 zy43Q$a_HV&s+`F%jWVT!GZEIE;k|Ng15>uXw__Z!zwn$AKD9ofi}_`^eXRI&19KEY ze(qG0{ObyK3Z;+AjB*EY^{o_ivoY59N74Eoryi*S>H5{H^Iat|4X9L_1&Z&6+0LJ&8GrU;f zL#JrL{zNU4wDe~^2|k#vCrjS3*sF5KQa>E{-ft6M3uqmp(&HOc4m{F6QpejWc=}g2 zYiNR2c5F7%5R-y_p1*}l1)9pu<;R$J2J6Efr2BR0RPJS2rYfkSH9spebs5J${cKlV zxmJfWyxrMW;pDlP2VIDq0ve*8*cZZ|kw!@-oin&AiIk)K$0|6c5aD@W-Jh2@t3zeo ztFvxF7|lhe-8G&3S1X*Of={=4mJ4y^65Db(#%i6#y2CjIu^SFX?)2_w6WBDXXs4cb za#dac%g?Nlf`9LZd{-3FxwmCKKw*wE*A=bdZA9E?vAN8$D+S5*0|5U3^{TSQ7_%@? zM2(-Ob6BORq?9!-cXo{Ad6QeLBxkw}qx1{<*5pz=vF-S~dkk})oWSlr1b360J(ReZt=VYwV0D@R*mE!ssu#bg;% z(y^nMC}Hy$$<8xTAc*^@+HaY;ZQ056tlPCQ$oL?U*0hu6+!%JqsSRO$9Csw1xd~h<5jKg)>g^E3d9kd8kj<{A=-hM@^Wg5CY(&J>$)@a zs_%O;I)Vb6VEUBs>Fg@9jc<>Hh5Ahds#L+F>Wcei7iM{Af>ru z*mkN-E7?mZMBJ_1nxAozz+uVn+M>OIL`uO9{oi<=qLM)qOG>};;YjxaA(WAk{3oqx zm=&id0}IIA#zk|QebT{ljJ*%aL9KapK+&)td!px>hFcqWo_wp2e5m>|o_{LNhSSfI zR~ZN}dgDCewiOvI!Uf4SBwAcz(5op>3W9p~%}Ft3-8$Yl;0lD2diz$8Sit+gty8sz zNG+T%Dn@qJlSLXaQd=P7?vL@Rx-BD}oy5`*p_FXoA5UMUW;tuyksi{JDZw3$TDqJ~ zxlnDv#(h`PoWU4^Kvl;$J?mqbmf4UjNSNAxyHY7g-yn>(Mtf$fsS%Y4fIgL756Zxk z!y0!JEUbriF^qG^f29+!N#bE9eqKlT(yJ2SgT!n3)F&anf4&dR>?-0zOqNAM{o!!}&E0K0Lhcf8iXn=X{R} zmu46{zHXoQ*XS$eTeyG(GZD3neS3DVaquMf8jpvpOpaal1;caAapI%y-1IP#v$Hp} zSPjvahAhAUxWa>;PJWf^R?byYEvz^|52)k%571YdXko6eVOB#4$WRv>NcF{g9F~C$ zEI`~}%Z)+gf!`f#It$%8YVLDd%KuD(ZZglx>?I(7wp9vZ!vJ}a6=xyN``eI!hw>^yks0! z(Y0d&w4}6(GOf3*P?Fe6WK>P6GTT*ik4jgtMkk*9W04oYFt3!}WV(!Ok~3#&EN}9C zPVD^%sy_$TYp)OLC|`U_aH|@zz(co^KbO|Dem+Sv_LG0q4B9>bcNYQa%4(@I(bKyBE-P%4_-312*?I0@Nm#4|L~h7mph$;V2b6hu{x zRZETG*o<@9wIM{UB+Q!`BrdE9x&e?5a0sfp?cKr~Tc}@U+%5a5Ke$Ix_}0C;#S9tG zlP2&Pv5s?`S3RmYdud<;aUh+*^lrwMgz9Nn`937mrPM9<{jypA0DS@;5wJ2(=zri< zg{oK??cUwcmIo{c-sj)8J5>)2Yj2>-C;?s>n=(c3jlF(^{{WF&_xk0#VptR-1q~#9 zb6VMw?0c8Oe;p@{yeAYlV>;EM7%qm!K4#D{H%{d9T{QZC+9N2epcctgIRJfY<^KQ! zYp>w|vp`J0kBCuyk>ifq>-Q`tOrtB~X&`puzLj4P%VbjDUbam&$PAJw#4>S`Bam^& zqisHb*F55)byIxDCE`6QLvE$*iB5&~<99jYy!%=3@sTB(6$-3L!8konKU`o}v}+OC z>-r>9z~X44lK@6@jx&Y;)h+sc5x9TQCfz=oM4m2eGWgJa9#n{pTpk~EQ=B3-Y40P zIw?GYeuu4G@kXSY#=CubB#im28w#a%sK8Kp=Zt=|LYMC@4zKRHO}_&@wU3A`=8_X| z5c!D-C3ijv=h%Ax0JUD<1gBNAjvEjqSYq=eWL$Yo-)a6VcLKg~Ml8z9C|Ne`4UCWq zjN_i1^!63#o)hr|Lr+Tx72}21%krZvB$Kf$!=NCZm7XS&tGHq6xE{ZCq01hh<;d?U zTdZT z2P&#C2|eqV_|K?E7lj*8w1VMHw8d_OK&zO@3P9WG(EG6VuOAMb8n)B%IK>CJl9nWI~4cDGlT z6A84&VDst6?l>prCm7EFbiwzfzxaQnc-KVpU}VxfOpWEF3nt@r3jNZo+a5`%czR;tp^ieV0RRpa_{a0EW_i}-vdQJS`{WAg^-U*I@b0RXcj3gS`!K|X z+A*GU$QbG~R&?(OORKuHXc`p)!^fk-s z7S?u_%eFBk)Za8AMhTGp=01RX)^S&Qmunp5t?I!5vy*w5Uj(-doB@XP=~gAMEbMWZ z+t34#m+Mt8>^H?E=-Z@)i#(6c_8Y!s{{UyFC)Td$ck43wGpNfU!2`Hj(nt#C*i5%B zknSgpe4_)7ocq;7AC|F~M-pu$mp*TL3sVr$NxsP`|U{%;$*f8)fD!nxK`vS4J4;+@9=7+U~F z4oL1Rzp|hELrAurK6ztJtB&MX0|Z%>0Xs`|uA{?OGA+H#A@Zw+%Jfsv{*5+b#H!@!4o7w)MIfN9@Uy`EX>%L+TIlaHsRmau1}Il zA1Z~YZ_&7z5rA-Z_N#akAdjfq*0ey(J^Q zIw=eMJo{6mu?WoB!NyKIQ_%vZa0qqzx#>~_NXAq#hAWZ|dt;yQrrq9o_+5fHV;ey3 z#-_ZH78W71=5R68b^ibwpjk;7QH*h)T7zh!JRI!+5`F6VbwDFV$sW~O*;Ghhjy~@d zF5-+Gd12Oqi5a$9Y~h$3^HDG;!G})5nla`vV}VcFV`Lk052>bzkPx{54EpAPps*Mv zd*X!$B!R&0YG@9?`5gA4k#aTMt+qsAlY#SdS`B3phYZA$x#zV~^2X$rLVD(%9nLqg zV8KscK}fiiiUbHj?bqJ5EK#Mn-di|ajySB=*veEKfHTKxxux7lw+qf99Flz~u~PU= zsP_lSe4jyDWSL_qGO?%ynRy4CR$a7fBNj&8xZqbyrD@_DR%Sc5whEAO>scAa64}I) z+`=*PZOHTmwcL*&bYLZij5jb;!K|~ zXXYI#^C8&FfK=lc`r?~)62r90pu)$A22KSh+D;5f7(aKO`wMbz!(|$E`+B zl)I1;8kX&*QIL?U`rA7uhT$btb|?2+Hi8fP#lIn5bkWT_vZ({NJXZI@`-X?a zk7p?f6tNdzdBZ6zKbKE>&a%7>Xgf^$6TpyLYTCWKmL+2VN(Oit#zlKPaKw-j;X~sQ zD-Hl1Px&?FzXE`75f;n6%!w`--A6)+cO68;Q9+dYo zZ?4bUBS};he8$)uV;JVS-DOf4T~`@(1h4YChGG?t-_o|bIiw`lsRU*je|WuArw0{EX)tD%8P$2}+z-a7O+4CZ22tj~ z$AQ<6YR%Lw+D6Ja+c7N431A21Rcr1?HMTe}+6Ps))F9Fp-y`hdlW1@CV3Ki^r^S8;D7eo%t|0C{Yxw&H$` zTnS=H1Qp%sS|z27`L((4Q~0tw`)ez^K_EqCnICdsmQGhV=LbKZwR8UfYXrs_Rl_M} z-g>rk#dH1$)`ixP`fJMu^d@K-rciqGfO_%in$U=eRY7tT5Zf4cIpBBgS*2~t=DDGD zcjOI_#__1ZEE}k*>2&B?Rg4rYN6vbCnr@?SBP0w9Na2BX?T<>&H*fY+lY)6=Vh-a< zI_^&~r*){L_Qp8Z=Pats-1GhF=dRu3whNLN9B2F0XYFN?LXDp&Axhv8?^%~h@~Rvb zz|KL)6{<#Unf^!(6pE@*`8$R_YI~pEGkIV!TyDrc2ulHCv%1xD_<=9%}Qk2v!D zybuq1+Qv*Hfv)EVao&xwoqLSneKFd;u=ss-sYT$6dtFE8Ss2Y6GLq$q3>rg{M(1ZP zf7w(67_XT;E2T}Xc!y1}wak)TO%U9;=jIvCezo>J-i|EqCbqzVq4_ogkXQ_Y57Q&l zgI-29uOmqwmJXjYZ0EHf16ka|B$^EN+6JUTMI4253k=}10&=9_Bg zP|;<$2{Xo*_7C!GQd`Ve&iOX7?fFhwa&S$0+*a~HHnqDFg#5TF820=s)LNXHbg3i( z)zL^HPA~}jypE@)eJg@Eo?XuA(7wmXw{ExhDDe~Wqg)nw2@E4rcI0CNC3EZ#ek$#} zTdk&f9nSOAp4@f(t83#2f;7Je>ru(1PRsqOf_ZH-^4f0BVyKc!(>P3CEX_sUpd zNcA=J6xx(Ij|R2lE>mwvmFJDg3aBv{WMiaENEw!4jStKVP_s@F9Xxg5OQq|Xp=UcO; z+DeyUAueT8gr5IySee+P+KMLv;TDDKervvIgiz)sdBtLZdm{ zwdI~RkZ8IR={Ah1e|A>X?HhPsynVi6e=6bc{6iIu_)r#WduT<1;~<7&H^>jJ3F95e z^sJo(-Obf!1!H$d)b4b*luNc0EJC2=9D|IHPo;F{O*ZzwYPWF{OS32je6z>Qr>{I` zp4G^Uc(2<}W-M0YYry0mFPx2}`tBZ;t!=1WSVbkY*9jt=aTww;&I2CDrFGdYj%42K z-0_~Lr)yphpY745)$N)hThBaX27T(jUgZnJ0^IwG;^+8>ZQ(s0dq^x1FG#}1$@9eH zf(Z4>;jA5I1PnC** zJC;6&9Bzf-*1Wg9nWg>wOXkZnxXg`=@$KzWq*IBFVWEArB#{Jj96MGlaOeOff0)mx z702q@a%kG5H&(Uts~IVra5IX@Xjd@0jAUBKz=bEBzO?IbWsW9N0Xz^o)5vL5ZaCaW4l#l2RPCAU z=4hA|AmkIqN3CcVMb%%G44*SMUMUTt$T7*wU{Wl;ZbFbqrB-(&Dd!wx)}@jTp_LbQ z-Z8}_akrQkB!D`fTCmWT3A7I|b@ZhnB0@39IHuOlf3IU9x!d(?3NW7t>k3?FKr z5zV)8#@@9~==l#R=on`i=Bg}lC`q^N2V#JTc8s3&9FDIV;AH0!2`ugATsH%pkbae)A(7vF zqvdb9DViiChs;%e69?u4t|@J0`%LKoI}o0})fU1eD&q_~ulUvLD`hhWCxtj0d7!tk zxoEcW$q51EJFl1KcJ|Q zgk8?E?h$X{^M=fm$TR2#b4%xi9FCRLc!*-&7(gVfStF-4%ZE%32mSR~#yraAvzDWk z;gZ#(mHtKTw*Xc=Rxd2l#}P9|pLj29{oZQyW6ikoA>8Bn*5#ye>EJ;YTX8PW$IMSo z{j09$F~~_KzzIXi!Q!Hk5ma@_{wC)YrL0>#5;U>rY^NkJ;VU?t10H`aXaI{0;R_xQ zUc^>(s!h`(s0z3OwOR=TimLfNsF*zId?vJ-!s-ZK2)}*jw562! zr;K1XKstVP)oZ$Nd*HCS@^*|idivHM?9ZyUt>Fk{ns_f=U}>azTjv4E1UI4Mx1g@u zQL~2NmWUY_cp;n+RBgi?W1&4Ojy~!=FNqu`qZCkFsM{Xwe87&U{jXZ)^?P{diJ~Q# zs0h6L#0u@TJs0=V&um$M$vGRpLtKWW$pWw&{KWZJh0Z@ZzGE3g=Zp$rk|&U+&$*s` zKgP4|qiAE9rIGxRk{LZQo-3l%RuggojEDQ#J8|FN*ish5c)gX&I4$ZWp5p=zLVqXX8Cs%u%ID zB;3k#oxlY>JB(HVj4)&$GP>Y0@G<^1(;bX#KTN)xO1g&P+?19)WM>4P<2>>7#dze936RR%vu$uv2bdfk>rC*Go`u6_- zcBV`b(`r)9Wdta!p?KWH^UY>V7DMudF^`!WXL`4&satSVdBA7ybv~6|)=@g`+q514 z&qW<7VoZt!o6j!8k;c~KRcWL}g@k3Y++ldBX1W3?BPt6P!j9jaW&+GfEC$n)irFM) zCB_qEQL8d%1aAJc#fiMw(0qlM0!KgR{{XE|UBnVls3$*{N|MA(*U98YW|Xojz*P)EC+-^qCwp(OC6bgaJ?>s}wyY|t$0b*j9F z$q@{Q$pHM{Oo3P)BiHpUO2R~jS!^xah~vOnWIyeYPCpFS3Vgd_^2m}*EtI=Voe!xa zzaSr_ML2VARZ5oB;XWd2v+CN-?4}tRi}`UVgnm+RPuiUK8CfmJH0zbfo`s1g}%(WiG08K_Zh$m{lkOXy=Pi# zcUJMd*GV9NIgDI53Ih)8x!@kh*z~MF7it&!mYpxy?b7np6?vpq9H`_Urf`0M{VB9s zc3Pfu;tf_t)aJRIvxWWO-f~c}I0w}AHOOi3$$PEZS*bEi<&0T8?rad==kIe@b!h;% zx3_r%Zc!|ZN!)V0Dfa*Y^(M4_8$?rHZBe5rBSzZ$$^<7rycq}XuI@dku{T{#^TrEh zq0Og78ptiI;#5$%$C?S`9*3@Y?^$wQt;}0pKobyUP&S!;+t$0g_;j1#l3c-css%Z- zhA=mj_2hsDzgqME01@fG@Q+NER?>raakVY2gKw1;*E_S*9S78pd^coY#iAQ+rdj+z zo=*-~Sq8Y3#_OexPm)w)IXq(@DPG{3_Ezwgn+A^_lc(E-vxkh2TpyVGoaCQc(fCd9 z<_`vVK51k_9rfT7yso+0qmVxn)}gu8hLVetlmEu0|RYAm4wJ%>-$yvZ~TM_9R+Q_-#3JGgfif!Tp)0Q{-O3CXGD zV|F&~w9Cx~TZpc1AI+9DXUc#hP%s961wT+vRyF3@#;R{_BDaX**#|k~vFH42OT_*& zyw_~f!burrRaKa=Ae?Xk_x}J2(YV|+UX3bK~z^sN0pNZ#SMlGy8k#cA8FWqh5gP66x5uB40`FH0Xs^E1vilZ@1B zI1}ypU3U$+?L#zhEM-RWtDF;1IoRchUhD|=ry@?Pz?P8e8s))~He#JN6OM%Cy?$T? zbXu>&%WL^AC4qKhos0ez(EKm(RMxs2v#c#8?ZGz_lED>YQiK3cvuEpDT8vJnSGI&k z^0&$xBZ}p6eCI-sx_ExA@b>E7X;Bk?dKM%R_+y&ne#Fq-Tw2S+Mlh(u8Ly;unVvfd zND&*7q$@W-Ph9^1O8L(5b0L+|L%KofT160JT-$#6omb0@^aB-D(|B%AJu2P9 zgYq(wv)Z6XB2-Yr2h>#>MYEvO$(GGshCumAt8m+!)5yW> z=}(%Gvk=ZZ6yt_o!=dhLem@0I9pq7oz)3OYI7687%m*0$B|v_p(&t@F+-}Zd^F&p6 z_L9y=PTBg^=xrsjR#+lw1D$~a?#ZtH(?ipsi5Oet+ptzLAC^u)$sLC^Uh3VZnIwc_ z>9P!HFc@bU_BE5JhUod6hM>24cZcs2%FVi!Glp2p1M9f1@4y=4FPa#%}&B{l0$P7t2W)?)Es`i*O+S-O{HGCDLKx5elu4Y5~InXOglpPPyq)Anz?Oo zyMquFk3-s^ixb6?7a-+^I#XUYVSq>nq4cdRC#j~F46HVgcXg$>QvhICoScJ-$wz#y zK^f_ay#?XAK$3r~UX_fHq=%AYhju|H*0U}oGRWkd;A5J%eRh$iI%lP3k|a>v?mej^ zNFrE>QbFVa)Oys#KnM>}-iV@z?FEQc8Tz$0P3ql<9+d8DVT`_BIXE=bW!}ocPt;VA z+%mI9VrC=hOt{+e0;)rI;){reW|L`*_WQU!DOBwM^fSK&|VUXXA4??+?#6f(gCyAyc7IV(r z)%f(O13e8XzjSbo**A!8QLp$J3NU%= zSQ|Zu0ClS9{358eHLRdbJr75j#!Grq`D!Kew<@9ea2FeM z)YLB{T|AQp0kgpapr;$Qc#d}PF^=9^2kK$RGfXs-a%n-=( zO1o84lKzX1*!=3G7bPKMyk$cl-VEpE?_F_v9DPcq^RR9uzqmi8J~om$jFIJDG97s6 zM}O4PT&OY@i55KcP;`vH7FyTsUWTjDdZY-cOukp1d>M3 zG1|FJCs8)nTM*$K;AgdMwBQ0+OKmtgY7!KQ?k%Hxc9gcpFOo6HtoxQ!CM4uzImh0s zHy28X31Y>E10T+)MLL2Djp#-)f2C0YeK(W@YNd%89R3yU-?JaXsWq)xyhlBk+idLt zw}~8vMv;dsf88I<*N^IA%%Xjtd4>G6#Dgd9?j(C|;^IjqG2gO?S zw>I)5%%&GN7Wr?w4qOB31~K%hAozuO;wx)NuCEQuNEpYEK4xyEnv+@6<<$``q}eo2 zjsW5&1MY*=f(geJ=F!8(kH&XZtmmW-=S0^c@s5oZ+L)D#GD#MBR$iZ-VtAT4S4+8D zd$DPCH|~K^oTmYfK8Co@3u?*XO?F#3UuBKoG0yyL{{Uo;-&*T6twsx-bVV7CXR&vR zMN^R*WCQga8p1RjYWW(vsnQPzTBeI_Yy=3YZrDj*ssKRUk6wCLrAw|ysKBN?kPg_? zuHvVzPrhr-)#Qm-EP^+fl3jjmjmyXtQ~P84ECFwEE`0AbqgTLkdV}aI)>SQ`!5FR1 zlf_e8S~OO&`G4toLXtPjD}blw_8y+*yr0Edq#8fxkMncjCUk$1O2Ym z++FG?#Z4TIZz4UTNRdw?0<6VG7!Pc7UPrE2Hl1g270jmWyqK2lIORr99;EZ1P%CvL z_A-(=9bR{qXoQ1o*MGltUN);4<~2H(YB5H59-81n4x;-Oi%zuWyiSTBPZ{e*L4L&$lCnQ`;vy;0E z#(RJQW8WveWmZ-?F!b-KU&ZoEZ)0t12k*Sl5r%MCbBrHBTu!%WHCyzGXAKl&w2lux zrvs?wyAKmG-`x3ScgZ0NtPVDS-K&z+#Cl$nHQZ``(8+vni*{Ehk{$7jVF5t`BdmaC+~EX0l1bk>^}6 zb|rhL`Frufe2(}P=Q_5zD#Bjo<)mByRog3$_!zB}8HA*apBi~-e>Lh5o8RS30-nCk8WI$9jxl@o2 z=U4ncrJYM#vbB8qO(yvP;OrlJujNZ?r*CcF!WVfXv99ajmxXR;@lwq!VeDj)qBG~_ z%xk>h9P!A{)DE?UN~$r`+7NP;l09zFMQ5dbns1a!%OfKWv4Mn4f3?Pbl<2Km?IwA0 z8 zomDAH3sZ^l+O4&*Lm=}c=(zOhUJI+)xM=2O2(2L;9uGL?y;sH>#M+J9ubZ|d-M0fc z<2B}5yu04s922oVVtrWVyW-9!VcmIBPVCKto%r{v(y0Jr8?Z4~?ju=UU#!v*l~LNF zxrIW!UDd;G{{U;#=}yeGB7Y3rn6;}}JlEXp6&+3pImhK*;iGt*YLZ4BkK18iFMe>d zCUT?j$>zKUUDk*vj50{4at!b=b6%Tma30u1 zgbj!44nDk8DH*n#a|}k=+Ix<3QTTsVU0&U-?iED+8A9#zeaF3Nn{fXCBd+Mk+$s?*q#;92WBds*DYhERODlZK6l-EVdFuQKrtmKe2^@T)o;WtSw6P<<=6aXpRO zz}a)RYB6pdf5xfn&^^RRL?T=_1fFr}P7RSHt#ilqEhN~h%Nmmz>cgkwS@AnaikuQo z4lA|QZDe^}T0BRbJY%JDSNdGmPV+>h#-~2wyQ3qCXjGJOmCqlgM6x=_tXCeUwJcBD zLm2k69-^VSiK1ps7o2@6(z#MIuIFi)Lnz1@I28nT?<2D=atdO+h@(3|cLcY!Pj28S^HX+4YGhs6NWmhY zkOCyeNHuENeE8%chW)s~>}lchH|Hlk`wFK!2wkL)E*q{ZFe{SK$W)wT(yP0j+kmS# zIFen_>_^U{0p3VR#j7o5%^YpF#IuSkM+r(8+ws1QN=Zu+*XD0(WuH(bj zUR{}(qOp_!MmhsZa?mWUTfB_kVORZBWU2c0s%Bp13Mn4+NceImAL?Qz^j_2*V;^zVdfg+O((}qib1)<+Y-iHCzYoi^!)UDCsOs22$UU5BwJpVFdnZ5qX>km@eZIU$JaimEP@58?c(8@tlQ#xc9C zXB(5Ag02b08BSMv8p|^%!m9u}o;m$$yA};EWbKkE>fCuGs32sLI@Ni~s}X{BoMejZ zIIJ|tK$B_C=I>Nya__OS2t2k&N_?{bn|9%e&lOz8BVt>T>xvCgKH#}KfKlsAmB1je zQHlZn5O#tyOnk|ajl@xNQbi;<`F?UiEGby#a=$i5IP|FCJEc;mBLMv>dq+19BZ#)H zakNrxEy61DG^dhBML^`Fd<^>2t*zYztQ|07D9Xg=l#qt$-wZyus7L{sf}Ee21JKhF zSAl2AE@z3;kKxV;{IU9b*Bn*Z)h2!U`x0oe+x!-q_7@3rJ?xJuV8D&41`bEBKSN&E zJgU~0rdebwBdUQcdBH^r+fSh?e=+Zl8}>)An)CK`@bvOb__~sK@y8$#xcmZ)=bG#M zK_$hKwS~kfx;?D9Bpl~2&-@GZ_NLw0*-uCwZ>wCr+>%Q1vmK56AdUuoYmJ`H;Mv1@ zDr}ByF@pn$&ImiY^kLh*dM>S|!xg>6fZ=0>*nr>cXYl-STq4|E+}Xz!(uBOm_eWgq zW0HR$a0mETJmsk^9M+|46tjSX8;(9+)r;awXl?XeRi+AKhSFDIAwMwKN2fh&sQWuZ zEW3e=gS3&sHOqWg)FH6&RzpMb4#A$OJePU9@<|gb05mG`*fxzjR}g&DA>bj>6Rbk zKvV||#L0}S0x|sQu)fol9FBvEXp|&}Y8{|qbBYNx!o2qTmNU0vl)&f#$#24;Vhn(? zN#zl^jihF(CDXg@XL1}V3Jx$vG5KewdXUbf1zdz2ADGZB4GjxgxbV%xASO#G4$_B` zpYGSR_(#SHU_MllT*W9M4l*)EKQWQTcqO4}8~2B1Kwt*~w)8z-T?XULMOT2FNe~2e z#!s>NM>U+`%!*R6?0zHg)IKM&e<8L^2K8gu`&M7W`Q*8|xw4&G?KbjmearK2&Q9;l z)^CUWRGNfQ#|)cXCq`_O{(bAQ(yc_d_jl1EL3b>emPJ2%jOD#+k$bzHR_9hmq|h5n zg?oX43x;J;j0_$J;wzjD=DA&@FdV)(&2(2*w-+}H9P&2T>db!j0~&Ak0;+`bHcIQb zyKq4D#bV`ibxg;XTZ+QTrIC^rh@G5`rJD+S`kM1Uh}w_YG^=YE&+nu~nnv6?B>jCW ztnpT>28k?*DhaLBAoE`va&mYcybAIycUhBN)EwlqGX#tf3C3^-`Sq^aXylqjzYf_; z_F1l>xBEgzB7)ff4cNi@9FNYuH%1EcGhd%7+C8P!fX35|o(~

MsrH_IfUbcQnO? z)L<|ULWNKil1IPkfk%S0*?uDUo(&EeAc`Bz8=I7nD#n|>W0T(j%jvEBO*<*ElPwZ5b0YLc9*d51*PfN?5MA!OehPxjt`9&8jC1%NmA|d&mO2fs zzMW?iTU*=2kVK(zwNvF)$IuGpuLu_ABp_h?$lQ`~jy*aL%+~{#Gd*}mkqnaFCK8h; z@3vtXc?xryFr$vy{{T3zJ@KZd);bv9 zbFw#tNLffA{$ID% z&%|x3UKpce@;&939LjuenGD`o&LZb%9V-S~hh;=y~3kY4E~kr`pw!|*huY!c0!?hoa2wD74xT#8f`DsU5EX7?DcKAumtO(czxmF@ahcjK1(HiVYR!_6R24+M;7KT6;pQ4CMW z85s4iWh2NLF<4|I92HOkj^?T5q1rHaG2QD=hDMp%K;BZH;>Ld}y{ADH+~<6$99cV^ zn{F}bS9CiRq=#*xGJekvj8B~~r zg}?;$=~|x+E@NwZ`%V^UpKx#Q4_~ckAhFepgBd{)^4)&!58e9LYvGMn4R$#oaH*&w z{DVL17u(*rV%mbfsH0$xQWgf0iO8J)84%St&1~2;FnX#2O0WS z2CriAz{6qLkC1mf=Lg%T9`&-B&fs}QtEC+(;co1rV-|4XY8#1SzgC(v)N`J>uJZQI zB@ZlWSDr>Y{VR>stqe!)5igWL%CHRB~&2D)tW1j9znH{#mm>oT9p1gcW>_$p1 zc9UIkIMGtb@6;1^Hu;(3(z;3SIr|VGN z$Q2joC0KO_y+^rRtNg$do|RpkTwD+^6*|5vb1M#9NTG--2c<9(l|l;v)SfDE?D7P1 z5l?YZFeDrhaB5m(wEI;xj5-ez5f76@?$_+GIui{;RBQXe=3+Fq(qrBw4|vnQ*nX9Y9WRY~_X_NA|j?R;Hj9pV@#w!2v8SuJE9 zOuNDP<8pZi%f>i41Q0NB`BkRg$nMS%6jmR9rbp%1&=Fp<@VnzB*TbiZCV6i>Gc=o| zvL`KVAsBav{{RR*O5+58PfDjr^a^#k9hvtAzZcodsq(G~59Ns0B#v+~*x(+yt`Abw zW4gTic~7%6r*k17E=lBd$m7lz!Dj`Z^V5$uQvF}r9-KB z3sr&LQb^<6gka)8eqOldyWK-oFMSLOgu7*st{80uWRu(P0gkxsN%0fGdZmxTJ#$b^ zHLi6#n@=v<31SS*8*o`cJZ&B7Tjgd>Jo_IC#_{d5j%i*Y94@jb$1?en91m~1{{VcC zTCXL(R5!|FX35yuAbvHctXOK7w~Y5Tww6yZkgHELRGBXj^ePDFoDTRn=~kx~vllG;99!ENfMr&}@0z)9Czjz9fS%`(UWIA!W8yWk zM(N-!RpXcL84)ml0@)vpRFmN^jaN~I(r*Ro_Sa4HdCu1iQL%%N4tO*`Tp`E+1K<4cD#TiR zlMu)`CzW%W^!;nV*VeY5YWoI}ZvuqcTKTs#5C;IL>_&Y%D5uUVS?Lb+a~^sx*PE^ysb< zUQ^1MWjOD^Ipe>jCaI&}SOt=8F7nwKRZ7cl;f67iMmllV+*NZRvoH~_wE2YA_Xx6% z0|eTC5#G8ez9#spz=%)y(GReM(lJ zZr$6mNb~}_Pxw^6ExMdq+S%DjBQKc33!vc_u_SOa*mbMWd=1rY<%w_ZZ0&9&kIV$j zvNzKMy<(IRt?C|ee?8Ww@xGxvh@GH0h;wdxSt4VL31VL_-HLH;k#r-fR$2MZTZ8nXTx;byw_>2X?pj>ODl*kZtkSCjoK-1 zp^ZXgJ4gy~fx~qlGaNYXn=BrnpAhV8Gbm_|3w5pRN zlQOEvo`C0*?Nqhv==6Oa>h4lwhA6i*tVX~+NzYGu%_|);k;i;M*GGpVNldBdGXl9h zD(4yY;=W1viSa_?T#RX!r^wU!-?OVR>OP+RPoS=!<4?w<@hoen>28k#qub*Wv8+lk`RroLAtywN;y zs2}hmE7U$EYGUz33Ph61#WoJT`hHd8el+nUHgFw22|Uh!dF5jOeQ;~&@Kn-bjWCxqw9$#f-k+HK+E+=zD`#F761#QsLGY<4lv zhGt3Sw>Os2#Uhis))S0+*vTxKvxILOU5<{2QAvpB)* z2NdT%Q;7fsmG4qb#~G!!xmlr^p`CD6ME(QnJ${wy-VX7UULu6s+@dAs-<~7VQ~mM& z731pVu_XThA_0O&H8fXpTHVKSZzDr85JRZ%>;C}OteiQHigg@xXRvBoVTxp62-)*z zt7G344gQ*~Ze=lVjrk+GpYIQUZ(&zFA>t(Qj+B9bhWS$xgdlGERPsY5yv8sy6r3IT z`F;MC%X>4eB=$VZR?+7y?2(*x+8gHl>z>p!_^p*~*#fgJ=8tysoRR+k0qfemcIw3v zMM+mpccQfdXH0<$h@{4WWPk*Ih_>;o2S|Z=r0?i(L)Eu%OP&beF0;Tjj0K*N3sO~E@U&vzU%%lwFv?oI& z70+6O?CQ?3xA$;q6q_ruwf)j$V82tI4K37y;@~LS5C9duD&?J^NSTSo{{WFag;u<= z7V>XK9!WR{Jkn;jM_u70N%W zF@o60n^sA_NdExYUrOXIo6dZbyC)x7-YDk#L#(4`lWOz1un4Y}D@D3l5!+#2hqr(5 ztnUlkw708<8v<@Qt-VSWw?-o&nX))Mj(^6EVw%3kKYd`ZHo=dS=WsPc?XrJ({{Y`L zvv(fl8?)|lR6flQzvWe-o!Kn2x|9pZ#(1eB+^dkkc;g4Z)}yzLZY1+EKr6!4;Q?ZS zR1yYo4Q+EWtH+Ev=RG|scAOjlGILihVDfC^kE;`m9<@~zsoKkf>6)SjD9+M&`qGdK zj1BqdDf6I}Mmr4CT2mIrjTM|@0lU(W;+^bRSq@`%!GU{3zypq%scvs$y^-W(8{4Vr zRy7M3wz0L0sB+1@!8^J9g=Z{HBb7NA86`$*iu}y(w<565n+N91jB$bKS@TFGEuHL6 zP!-^j{9pL!IQ(ly=!TM0pO_qizb#TlWD>X}7^0F~AkXkH$JCO4D%}wyf53VZc)MGV zPu{RXn3_gr$ik8aez>dpzLh73HA$@HM%WuI*@x8EYw#0AXr6oF7U4X?1t5cvs!9Ag ztv?lLtKtoMWnaF}ms9DC^{rDdYfGGWg1l3!csBK)OSofgAuRA5WD*8gdY(3(#B~+x z8fS|w{9PxV8XE_M6SN#EvwiSARQh(W9@lh$Ulx6h)s#sK zV>`&mPpwR;##*B|PEpaHPw8znk&YW{{kx}+7L!N1DWn2Su+LML`MLD#Uhm;A82y6D zEj2)3fs~ZqG9V)z6kswO^u>ODuZ-VUo+cN1gmT9F(Av7nig@K$cNxWc6#oDnHH&w$ zTRmkA&=BC?Y4bop<0Cs*9+ksa4=1tdP{c+XpL$%`#cH<`!*07~a%PPYK6cEHU=G39(_+EgWDDJrKjz&e3n*g{{RTU ziP0PFGr@Nq*xcM7DM3a-jGvibmOhoatp3%S9+BbbuHwC#Mv;TT3n=qaPUmUte@fu? z6?>k7#L{QIM{5icLAJSEP2%pLy#Fa#cl1aXph$REPJ zInRVye$rnL?W|$A)%6dy+q!Ow))@lK&DYCpoOSL_E35Dq?LXmv8;O!@<*iZZxrm03kF9nQ_NBP1cEc|`eLYR zdPG``5QvnpgOraya2$fFzrqJhE(cry1_m~%@r%Lsq^;J!ZDDqsKFxD5OL!!I`s*t$ z0qvC=d*ZU6h`Cq)T)iOxPpOmt{o zNN!||6_AyYpA5jT027?>dt`JV^{UU{_@Mh_DupFR-tmlp-~b14*F5vbwPYv79UA9P zpHtK4lH$ft1L>)gbgJ&7&B+@WBKe;vf179U@gn0KEIH08LC{(DP#cL0urWNinJl%57i_dMq(B=NfE!>uyX z?n!J>ZN!o8VFYRrpUA1lw@%ot>#y27;oSD^aQcn!oaFf$Lt0C>?;oA+0+a)fx)`2M z9Io#a*MDhf^-XAvG6-8v+2uOioEImlQ}>X5HMAj7qncGHqt$eO4Gl3Rk!>MKZboF8 z1IA=u?&NTD-);%5D?Jh`tMs;eMUqlRWs!;CoCPdN9PJ-1eEZ^`+80*w6cAe2>lS(> zP#}scc-BQ29SaaxmiIZXQv2dZiuDwVN7pWHLcvdE2(Qp`E` z+gW{DuXAUJr)`g~p}z26hqZ}J(`lyZlqlE&sc*a)nR<|W;;=k*@ngbv8i2L4vD39U z=7uvQ>n`Pqhh4Z#vLZXBZt#F#JW-5=_8dSYs(3)E~;VjR~%% z4!V-q`iD>Wvu~wc%l1tSO&o-f-)Hi{BeJs)K;)i>pQT-Yj}X~;F-LEr>5m(En2Z;j zc5~2TzD)ku*EOjX7HL_@B}8gYKOAH8HK%3p=JN0ZcQeA>PUF=3V<+{Y(t_Nm)Ne~4 zNNPVDWYq1TTkS^D-V0|lMr0A&pniD>q?SDQ1bbFCr~5@|cQV5Twf)|v@<@a1`g|Bi zA-533=dJ+b99NXt_(ww1PzjN*t=r4XMqwh2c=%dgCPpKK`+ zU`kuZz>Da6)+~k0+pt5PxanHjo#1IzRR9Bwp7ojYVrsDwZ4VMLaE>})9$fwbTlJuax!Ogh$7P`-mBqx#l~dcP{#g90(Y^$DGC6gN zy?S7N&t^QUX506qBQAg7KPdW>UVWw6-uSn~Ceu%dPGxZvV4MVAnfh1K`VNzC;J**1 znJQdBVN-=v$YMz>bDsHc&)U3t(t93_7@JbwNgPHs7*U;wYahivD7De_(IwRApDs%u zrg-)IE1U5rjPC3eyodzE0%C?HGECms?0?=OyuZX29w70><6G(@`##hVUIlW}Fb9D0 zV+7;-nXYP7lX{!gsb!(d_}Aib)n}bFv$feu{iuu-7%PSR4_fmpn~jmOD=}v{Y&R$I zuA9adcA7KJX)H&S_0FA)v!JMCyRC1JDd( zAN_j5?&j7;Zka3G!x01(W!f|A>0L&Sl5LSl%& z>0JhgsYNuNU^rsDW*0*U93XiNRg)l&Krk3_32rc7m!Ge8v*9$0oJR&wy;}C z=Ws)AAoIm>xB8n$c#N#uS+Jo`CbCgtIQ4fn9H}PcRykZ`)jNx(x3Pl4 zHWSF8G_no}G}|kflI0?i&c|)IQNZ_O+O|lSCU59^gxWRw-deWzP*}G>R=uCY8vU1y zY~OAYVyAk!+Eo7lg?MYBRw(27WSDy6`@iQEUeCt&njWe4yJPlQ6EqtSv&oPj0N3CvTR(uN`q(9vAUt zmyNVE^4LRlfMkV_EA_=qC9t-dAo-3!1~>OP&(r?^)m$mK$n-9y6U?=ZK3jHHNcKAY z zY$TX1hn;AU9lmU^J4WO3=DEwQ8RK!NSa~-Q6NzAfhFp7&#=7FpM-sjLnX_2rQnEuQ zDpccg`X58>S;69r5T7XRnzqwLaJ%6w_CcnOen!Rb|TSaf7;+z^b8| z-JmhbgML2tJJroaib_BQit1%VQ|Y z1Lr9|zJ|Hkbb}gVU}h93~d}%)zDW)jk!VC z_WD&l0Y1&8B%~a_JC1*qUT0Q`%eWQ9SQ9Yak9d z`=nM^o$h}s)h1xksRU*?pCAlzQ^o{Hq?|WiI^vdK0xGE_=OmF)+QJ%W*@kksY?|!3 z&dKjtRy6sE1<3)0Q@txQ31Y&N*czJtFhZ*wE?A1A9@motARd)su;n{}3CRN=&-^Pw z!q{Ew5d`^RR>?kq44QgJ_B&>rssXXOp5;g%sjaOGNPQ;Vt{p?kG&u@;7Cioyg-Kc_ z^kCfXRn%lNmzp=)<3BDqz^rs-GqV%5oMUqFTGud1vEyKOx&#zkXYAOI>It{=Ch zaY3E67Pl%~N*VFHF2@_qLmZ6|mN>x?f6GY};P?Lk;*D8x8!Hw75Z!8#O^n4$?U3y( zM_TNQIb@HgJ{4(4P56JJsstAkM>WB~9B<$rzLg%U47z%-+E!T`dmMJgI{Npfd@b?j zm*NdN9V!_7o4ZS48~KU=W6%V_IT-cDY50!ugpVVxI2gj7Gr-P$sJN`Gd6uav-s&=1 zIu^q2WnsV{TIcm!HJ0L5j|$94C$)NI-HpBdkuA0417f<2Z699s!rthQZ8L;U2_ziW ziI_N?{1y>Hf6^jLe4H8N8Q^7mp_fP0AeRx| zM6oEzLxu-Ec|3~fq`Q{LNG>ieWp?A`NcMnz1!u^XV3v1cB|kE<;{)4nDMnY_7U5$P z`!4ck@+XegE#q!jBJt=+?d?!o=#yK`8;D_PqJ7CMGXlev#tNP|2b@;w3s_X6N~h2g z$Mq(lXzk{d2PokC1N`drGt|h`7Hf;m_o)@m+@{%Fu?l*EaC4sKsNJ+La`JgNcIA{3 zLC;+0sK*%mYkFH)0Byz%opyv#pVF#-XIO4=BE~^nfJvwB_6s6AUlM9|mWgW}#OrMv z`HK>;3R@TjhjG)6IuA;0-X+#=H+i>L0AS>21D?L)J-1~*bvU-Fn640Q`A^;zjHM+LN@+P*)3TX# z*cRH|L@AtMc+c0?q8I${&(sg;Q$ub;vouF4I49Pu+1uMb3nnqwHRriEyFCR>BPxBR zAqN0$7{xvjv}NNv$nyU?c>YndKkFoPs#~O?2}lOEW5+!*@CBis8I<402thKpdzfF+Vrn2b1&_ zwJ(U}sO`DKUCt$JDd+p$suW|p9{H*6AU4+|gOxo6MHmk!9=OeSvz{peWH|%9IYTo8 zj1}veVuBg|nFf?Fa@Yr&k+S88^ERWH!+4PzEz3*+KdiA5&bt zn;2qZ+yFY_yRQW4cUp=;ZE6hi>T&>LVUiU?2%Gac{HaZ-#8FOCJ%8Yrgs1yO#;C*# zb^XkHmzE%MN8?`EsamF{BaY@G@Pxoc&Izi%8_=TAbUR%-5~#5%ra!TgoOeM-)C*2HoD(T2b>TTwu5d2b=-^HFDP5FB;@7Hr%7(n%L;(zRyY8jdS<4`cQZ9hLljZZ zu$XskcU}PeX@+QoaRrru3);JlPSqR2vP97=l58ap+$w+I7|nU*yiMn}@D&lZk}Faq zT)ow1jF8MuPuC))(X{)SXBQF=kPXUTrA4LK4MzU}<|VDu_>FYa2V$uQ?$iPm&p^JW zosf+8yPI)HlpVkhJD*A?xsuuhSy4uRg^vt9NbgbGBz7%3#$Dz91}Bg^Ha+U0j754S zxs_#&i?vuUZ$K)y+TyoIV%zRcaC-J5`qc-%kUX-; zM3XSdA-0A3R&>{n$EFt<+lmboPOrm0FPFnQ!d|mHsUI@JjkGYX{{UJ2przC;?S-bE zsW5HL=N>pY>GdYOpG5H0&c8F6+w7KU5h8XNR6d^ddf)7p9(q}+jy7Yp+m^@O@1Feq zD>n$eO6PG_x#&J0_|JEFJ?Gl3W0QrEV~?wR%1Gn30jo>#Cey__A-1|}g}yD6xnxkS z^%dtPRS-0d3hn?fU~qH&-YNvTBFqE0spz=QDJpSWlkJ(&>T<*O$soeBqURuOZ^PH} zsK2vPaI-;j(8@aSGJh;q0_tvUBtq=l&)s2>kM>Vd`PJPsSdCy+xKe-|34!PGtu1Yc z=P~t5OPvP&n|{DphGiL3>_@Fjr(H#HE4zi>2mv$vt6x>W`+RXnA}p+fXp%AkW%u|0 z0BW(dzYpAKvc)F_AWnQ&U* zgV!8jejMVoZM7R~?HR5fG3H7_y8i%l`ijE7m9H)y<0ohySO>5jDpDMtGXln( z=MNt|4@$2D#0+C6&{c_NS7c3q4`Wn7>58ljcJI#uvLHsYoD7#o8OL!~2`WG&kZ39l zgKiGf`cp-f<+^KYmry@-BIR-GQs~I?+=WoVn;?B^E1Bbw2m|H^KToA^_$~t_=a|_c zN0@l;N>0OqZP@E4j4Vj8<-)ICpQUEpFwBTg-ut;7YgS-Yh+&LzipIM;8+P1~GBVvO z&%0-Im5fV>837CkP;dzqSIA#cS1)g5F@z^MIpkJuMRcgMk(%trGa9=w8@9F&U!_e3 zNy!+*eLW2_BvRQqc@eTbDrUploDvIXEt={%iuZXDz)lG+!pL4)S%k8Yxfh+Q)cq@@zsxcI>=_Z% z7nO2yf2DJ}AzS@gTcS2c2=dVfA6ng6ClCvc60NI?r&s5 zS%5iJJZFqlI*yRG_X`&*F#ExWEADHcir7!773Dyb{N94IbxVl?zm+GMJAAJ0GD!BU zp!GFs>rs_&Z zPBxhhmG$0i+=8xMYSMg8&a= zYsl`Qj#N^>?#6kn>xeLL2wu3)`0@DXIiQ(D_f1-HXQ$iVt^K@lGl=md=&zH}2+s}m zk3F+xY6!SG7dB zu!0%XeDcEB+>@b)#a@clV6p zsbX!}=ZG05csG!k3II}mty4a)48*<6Ht!DX@^VP@tmhL%(O!K|Pq(?aW=RBYp;aA; z+4aV2GEG)CehF_(RgF&K!f73??VyA($O^|MsoPvzATd3<$nyUH%SIp-q9nNywc-U~ z<(3DbJ!-RDp;jJIR~?2=@~if`l3U7z+s7acle^{^s~4J`)GWbZj@?P!g+cdzwV|EN zXuSDQ?TSnhfPALG{79xWHzcaGat2;LVT#jzTJq)~9$AY201wZF{HlbvQZZmyw4MHJ z;(#J+XJk2I4s-PSQ6fheQjA7>5t?1>>4iBXs*}x2WovnSBl4pd&fJC>s%#462Vl)3 zXQ3SN&MATwKf7km20+I@mtWGfE%Zqscb0iFpnT{UoE+z_D@AnGkfZI|gn)21{=I)% z5Lm+d6wFLZ8DbC2z+?kc+vppae$vJ>+B5SreLX)MR`4$l*hvtF=W_5(Re9HQ0kumU zHU~U(%@-KUdk8Fpe2X`5int5-)srjAPX~@ZU+}8tS1j2%B;bKoE#hQ|N?&R3Q+5SY zY&^hLK)G)Ip7p7w0ck8(ak&zI5`k1BafupC=NVE5TDNU*<~1q2y8i$UIW;d;U`?iX z#zme5DBvu;%~LR(INOtuDbVkfMo-E6WZtYWDPk++c@p;B^%iET=RgIV5e| zhOa#Fl&?0mF9iJ98wX1CE7E+@FaoeW{j*+uB=2>r-AXdiuOhl=?s8W!FKr6OO9SPv z@b|3wZOjO=Bl(dL_q`9H_pYwyHF)r2F2~Zd?sUJikP4THxn{?ITJAX$XFjes4uEH$ zr7D2gBaBn^4L(aJ!6d5>m9fF7<5AFaj<~5dxu>Sfx(Q<-VDK|vQ+~|e26-c!UXd0k z%w{WebNBjkJu${V8u@=lx0d?q3rGg(Cy`M`JClK5puQUL#C{0Sw96Rw;u~aH3oc1x zj(~sYT|An zZ9-FbG^9%-?b4=R5}_V&^Fk@(C#bFMN_nn)t7If5 zaKv}3utjfdj8$^#0ZqrfTuhtP%Z-1tA&$;9{p6p1tX^KMF0dyuF+O7h`VVSjx?7nh z+FEALJL46HB)|y>8J12ZWueU`Q%ZM?ceED zmF1bp-ggBDpWz;rJaLPc3RQ$?FbMuCKrktl{G}n7bs%s}bp9Ifv=;ze-8aaMhRGk} zS^7&_*vRd15oqz1jE+y%w6wPq+{JNlf#F8T1Z`3NG)S&vrPOTK`xM6bA|gtFI6q%n z^{2>P? z(DtIpsFPc{u+(R^L%!LY<@Lo_k#^;H+nk!BuB5`m{OY?w6+1JKLqIUPIy0EHGs z-I2!-iQZgs+NMa0uJ8%ZOjLew##^Bo#agzE7)+h}DCttdxN%wB%CM?A%7uaHgtTDhr)3jiQxMhP_soA$wHX&gj(j%9zFupob0nM%s$kX+NTzv0_dv(lGx zlRShlHx zQD$Rq938C1vY~!#)^?!@nI^h|F%(e`-NoFWdeOLlDiUJ^F6RP5JB_*j0MM87s#h>M z1Q5ueX9Jq)Nat+B$CP7P5H|6hxj!#&5oQWEEVR*p3+{xO=Gq^7DbkI`2wk zk%LK{;aWZ1@sK~QQk|WGH13Q$>t~JB0!YRyG2P6yJP zH1fs70dP(>x@Mg$#AhrRwM3G!a$hUXLC+POKybdiGZBSSdZ74Bq#$NhZtlb&$7#y>jHEM1ZbJhhY0+s9V0Xx$Pb zN(Np&^+EQDUQk(fu*#A>bB=0KFnqup3P&C23zcD$cGqH9ZSV79pCfKus0SS9xu$a- z=+it5eJO@UXxNUOGAJhGhyswwhG24Xc%_o&IU`U@tM1QU2&ko&HqIBJ$6mEoNh5@1 z&i6eO5m#WjD4CQU(gjjRWoG-Lx(KZn6@`>Yt&Eepu%n7KR6?I?yphC-ZyN3j-lx~- zYo@+t(=C=e_{?{WgXJGj!j_0GP;^(`$MECx718)kc%p`T8-x3}$D0#L^6}}w(@yu;QX)Do};h$Q^W!vEFfbTBz&9`Rvco`g#f;AKGSw${e~0K99qyJ9rUnHg}*HV%8&BXX+>paTjC z2Q}4ASmdj#BeqsV-dtdQwW=dkSAmMifb9ehze>kh0ci3VgzN_c>T6=evjtTJgkT)w zlU)s*@kU39w5N&^#Rrgoc$w|&E1I~|TS>i)HZEaqL!#g^{{W48HT|)?wP7lqvEY^< zk`Linn$?Mh&N$fopllc%gG61$FLdnx0A~*c4-))Q(^$jiSm~22(>Uk?hR%N#{Y8D1 zuHCi7(-Hv%KsfE`UIF_eXmUeit?R?)a;6oH2OySQZ|`1G%CTgJIAj=M{O2>w;% zVr9)z$E$^3ySbMmnWH0XDh0+^^{!_A_*lGzpalj-e-H7lzgdlb%<&fGW56T1uPxWF zC67;$ZW*oRX%ueScmp~9m7OK;SgEtezCC!tOWT-q=;viJAT!3DG4EbL6tIdWd}Xpm zJ6A*F>(A{!7+%OpWcz`L!8oj~HCd$+z7HAZy|~AcHb;*q%*kDRwPUv!IK?$#0EGdI zpZ2;`(z2@uB%atKf@(#PV~S^yij&+9Xt<ZA0zrP7|a=p%}m-tc$YS+sLsNPMtpWK+hyE^KKbABdDta+Ym>=!5PIw zb|*PGs0*@tY1#h%RA)Z*aP7B%T(Kp}{_RggS=Qz$7^YZ|Hxr&IB)Tr*W+!S4A```0 zA&T?7Q*Z~JwEW$9tf|^DxFN~L1x*}`@=VT8nIOO(m0UsQO2tM3bgH-_7!j^mfDRi0 zi0fJgK?`h+epTvEYDl8{I6F?#21%j?MlqHcQOM8nA9|9>poB{%`JK2t4{F(l+H1E` z7$i~hBL}b6s9VGKsZoJD6TUw6Yex}*-IdTN7$Y?jVrE_K<2>*NU8P2MW}=$l+$jWf zIQfoh9Y#++@DM->Flrlz+6L3xpRF`26GL*3=Bn{7a0N=gbP{mJvW#*cm=YWu}+)9C-l(wwdJGwFbDZqt2LgfCHB+4agh0IuN z{M|)SXu?Svs=AZej|b~p5=13_a53vtuA);YEcjtp5@JOGZ?#9}Z5eFfbj4Y`oF5@! zQm-5UKchD0Kgd;kYj z+4bhCscFazITVb`o|&jB!908U2XG&T>Z2vWEZsO5=~m=JuoIP!x%8~LfX+ubtBEE3 z0^jYrj1aaIZ^=FS1O02ix`pJGw_wJj0=ZuWS!QYOh~4CRv7_4HaMh}24kZYTafQKVVwoZNsm!2qI2g_c zZlB{-<8=;9i@mmDo_B>p0LU04*QfYbQ*7rh-!n@JrohDXZKH~*;p<;E`gx^A+A$n` zxWLID@E}LjRN938L`xtbGDHZ^0GfW0;u|dn*%DiMZnq}YNSrR*@^hble-l|w@{N(b zCZkg9>)^bGDdP*aQyLalAaZk#XuF!yWkwA0GMr^d8T>1Wo8ml?K`+?sVTZ8uUl4 z@~Uz!#(36KB5uav$86R-elfigh~&ABXhW%y3Bqf2JWp$NezHk#_FzZNf?Fp) zmrr`v*+|RU#M_z%wzIdG5=GYLRO&DZAJVI|rr{uBsFEt|Qd=VdhhNI8MJ2PJLb6+;PpFHl-1Ev`Bm-4_1fXOj`fRo z8pv1>HtoT0U#)50mx$wYFi;jD8c&l1E9U#}epqTo5FgraV76j4lEE zOs)R_eA6A$W+X1pL0U3F9-pRYrysp*Np595e*3o3k6?rIsw|>O8Gvqj3<`HKB8YBg z+9}Vn{{XF9R&iS2P2{_ljF5_s!Wlz)ETnDE*8-v- zbH)&?Bn61hIBpNplGZks`aTKtABJmpS@6})mn&f&;siO67|P8F{{W_u{{RowNgnl^ zqHKL;2=H48BOG@a01@gr>Nu?cdpjeX08hVahtFwT41u2DRF^TBWn!quoP5Ts;6|V^ z10IzTU<@}Y&Ts|+_o-)RF{yQDBc4w`;m+b8ov_lTu6Wi-iJfabTLr4w;YgK2xm2!yXRgWZpOm(ZW8CXl@ zG1^NoY#h|KP@DOR;p92S;g6P!9V%T|vnes!j#_2!^!2WXPqU7~5RobJ%WylI&T~lC zF}Sct7K;`v(n4FACC(X-wMfxVdmN16&dfPcfSYv}71n&@VL zP38l@$K%x38k28RtC3jeWsPp4mf5n+v79Lx2OlW_dJqGC6;DCDlF$~3^Ddsj32sZo zdysR|w>1djv7T|}!yJ*WBb2xWl$@{r%s!Z|INnF0>*7|GB!Wfqqzb_08@+YCQOcW( zJu=ejSL1lw!31{(uQclMtY|RAWS?5*^gTk|?d6B_yCy+JadZYA8e736c;RW&20FN;yG^8DB4C~q@Bt} zD-{5YvMzGmf@^4;@|E6$?b0|SkQmD=jF35_P##1=YKMN(U-#6XkNcUsa7C389I?2ofP2x$$V{4lb6lo3mRy77bcC`nZ5=%62@ zwYdb3xM8xp@}Q0g_cfy&FvkJY?bs|fs4b(9Zwje_fPFiC0H@2RTgPZsa;>*+S-$Of zH67=3-lrZ~X(Jn2hGT4#LgGis+@8awdB2UPwLTqd*)s(8!Gknwft+QxWS)Y(2UnO` zwje~_nS5=^#OEQUoO70gH5-xPve zw$?!EI3#tdEi(ey7-CCv-|JQ&49cK63dff3yZkDQ%{HGPWA5kW`c-M9!{CARqBmTu zWH%iTr`DMu*)IHsKJ@@hh*PM^ehoo$Igp~r+msCODS~&GBw&HuW}>-lBub;_+s;K; z?1*KQ0^vSX><3!EXKs+&mt(nz&suCY`=1Sspk$h|N~vH9kWUIa;){;N1p|S(_yf0b zR^YNTIC(IrKIo?#WO&I}1(fmbDq}Glmui(B^qVBK8_PnhM+Xc5;8Wi04aKvyzE&WP zp1)e8vw5w`9=ZC{FOJlZ1pC7{&1Buc&AXJe`+>+KKT1g5qz5a|?(5pC%EmU3x5x;_ zT7_mw3Eh#N-j!fmEgEe7(@0O<2*7MV-ut|tYR-{%ZfxRDQC6h5K3F|e9BmZIZHJqZ zPa`!~0?nPtkcilT_|G+HLN<-rWi^_xWlNLDI~9knGgkEb(i_T@6=Cb%m6$fk#=$Et z;z;$VXCMsh-O{c)WIKX4WDHen_=5u)0md>x=|a%TC27MgrVz5ctPqbu+|+FlW>N+) zc){&gXIR54DG%6f^s4y-cHh>9YEK%F3-ggoXCZ@n`%|s1VTppx{;#&Ve0UnTdU z^`u!^W^_R1$z$^L?Nm&DXH0d*1zox52VgR4leW;!(>Mbjv@JI!9fyRigj!6}s3gm9 zxE-*A{7J3(A9&iwpK6Olv^KhByep7ZK_HI(>Gv)o*@ovhJXZ{z+33?&T9J|xRSW^? z)}y)8h1tr<*wm1%5Dh5XqQ4BZH8zk(f0e=d!(ld@Ks{y;EI}>piQJ&IL zQHqAfM`8XyPCA;>PZOCV*?DdpY{YV*#tmfL4DF^c$YIzH1!pR8X0+-_I~!M*W<)Hl zyEwr;YK-r>-I-Lj0T}vXql5iiRTFa0T+Rs{4sDrt=s`IMaxurH11)gM{Bw-stupuh zdw;u8+7j1r3XC=t$G7r>r{AsDg85|#4cX9NpiUR$YjBhlO zsU|M%lkUYk639Ij`mf=~wP@N|#o{~Wywb>7JgFK)Z~8UksxMc1>9OM5UiHKmGJ-AW0`DKU&r%CCga+yfJ0QF_Tsb0zx6)hmF-;#}N+I zVg@25!6S-^GfNcwT2Uj73mFsT>T`MV`X<)os>bp@#+NL}S3OQeMR}?P zkVZ;v0G7z>PeX@M+vVb@&Z83WKIyLw>m%LTCne$?Q~N$dxZb1{+@p|BwS1}Zds8}> zi9Dyz`Zy+KW70;$WANIFKAznDE#r<_)b|@Of8cfT*NLQgb*rX!XJ&RR%yLQdJJ+j* zinY%kyqRN3wpiga=Pvx>tw5V2-pa(}436|50@-je^7g4L&StjUqkpNU+w(11 zx%=%Dzy~!BI0wB)Zca}%oxrgiyuLB*Rp%w^Rd|O&FPgd*gP|3`KmI3>wtB?|qjKpLB8?dP^;PWLatVqr>J5yr= z&2B2ir!YtF)`)=ANg+`f# zR6WVhBxa|_29pEOecFH`iZ+nt`5uO($WA*`+pRSH>H_6^kah*g$vpikODkr()GeUe zN*Mt>b`-<@wQEzuU-B7RQus|GuOqZ4%aO{E2wZ_tJ1N>!FHc1b{NLd(1XoHwiC&763sWrPVJ|hiqO=gc-eNbU!lcN(jA`Wa?DtP!6&6@ t6B!t~-RQp7q*Wu!hvgXLW}!0R{o(mlI}TFdo=$qyTaT&y>d}qQ|Jez*@KFE& literal 0 HcmV?d00001 diff --git a/examples/tensorboard_pr_curve.py b/examples/tensorboard_pr_curve.py new file mode 100644 index 00000000..7f153e18 --- /dev/null +++ b/examples/tensorboard_pr_curve.py @@ -0,0 +1,237 @@ +# TRAINS - Example of new tensorboard pr_curves model +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Create sample PR curve summary data. +We have 3 classes: R, G, and B. We generate colors within RGB space from 3 +normal distributions (1 at each corner of the color triangle: [255, 0, 0], +[0, 255, 0], and [0, 0, 255]). +The true label of each random color is associated with the normal distribution +that generated it. +Using 3 other normal distributions (over the distance each color is from a +corner of the color triangle - RGB), we then compute the probability that each +color belongs to the class. We use those probabilities to generate PR curves. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os.path + +from absl import app +from absl import flags +from six.moves import xrange # pylint: disable=redefined-builtin +import tensorflow as tf +from tensorboard.plugins.pr_curve import summary +from trains import Task + +task = Task.init(project_name='examples', task_name='tensorboard pr_curve') + +tf.compat.v1.disable_v2_behavior() +FLAGS = flags.FLAGS + +flags.DEFINE_string('logdir', '/tmp/pr_curve_demo', 'Directory into which to write TensorBoard data.') + +flags.DEFINE_integer('steps', 10, + 'Number of steps to generate for each PR curve.') + + +def start_runs( + logdir, + steps, + run_name, + thresholds, + mask_every_other_prediction=False): + """Generate a PR curve with precision and recall evenly weighted. + Arguments: + logdir: The directory into which to store all the runs' data. + steps: The number of steps to run for. + run_name: The name of the run. + thresholds: The number of thresholds to use for PR curves. + mask_every_other_prediction: Whether to mask every other prediction by + alternating weights between 0 and 1. + """ + tf.compat.v1.reset_default_graph() + tf.compat.v1.set_random_seed(42) + + # Create a normal distribution layer used to generate true color labels. + distribution = tf.compat.v1.distributions.Normal(loc=0., scale=142.) + + # Sample the distribution to generate colors. Lets generate different numbers + # of each color. The first dimension is the count of examples. + + # The calls to sample() are given fixed random seed values that are "magic" + # in that they correspond to the default seeds for those ops when the PR + # curve test (which depends on this code) was written. We've pinned these + # instead of continuing to use the defaults since the defaults are based on + # node IDs from the sequence of nodes added to the graph, which can silently + # change when this code or any TF op implementations it uses are modified. + + # TODO(nickfelt): redo the PR curve test to avoid reliance on random seeds. + + # Generate reds. + number_of_reds = 100 + true_reds = tf.clip_by_value( + tf.concat([ + 255 - tf.abs(distribution.sample([number_of_reds, 1], seed=11)), + tf.abs(distribution.sample([number_of_reds, 2], seed=34)) + ], axis=1), + 0, 255) + + # Generate greens. + number_of_greens = 200 + true_greens = tf.clip_by_value( + tf.concat([ + tf.abs(distribution.sample([number_of_greens, 1], seed=61)), + 255 - tf.abs(distribution.sample([number_of_greens, 1], seed=82)), + tf.abs(distribution.sample([number_of_greens, 1], seed=105)) + ], axis=1), + 0, 255) + + # Generate blues. + number_of_blues = 150 + true_blues = tf.clip_by_value( + tf.concat([ + tf.abs(distribution.sample([number_of_blues, 2], seed=132)), + 255 - tf.abs(distribution.sample([number_of_blues, 1], seed=153)) + ], axis=1), + 0, 255) + + # Assign each color a vector of 3 booleans based on its true label. + labels = tf.concat([ + tf.tile(tf.constant([[True, False, False]]), (number_of_reds, 1)), + tf.tile(tf.constant([[False, True, False]]), (number_of_greens, 1)), + tf.tile(tf.constant([[False, False, True]]), (number_of_blues, 1)), + ], axis=0) + + # We introduce 3 normal distributions. They are used to predict whether a + # color falls under a certain class (based on distances from corners of the + # color triangle). The distributions vary per color. We have the distributions + # narrow over time. + initial_standard_deviations = [v + FLAGS.steps for v in (158, 200, 242)] + iteration = tf.compat.v1.placeholder(tf.int32, shape=[]) + red_predictor = tf.compat.v1.distributions.Normal( + loc=0., + scale=tf.cast( + initial_standard_deviations[0] - iteration, + dtype=tf.float32)) + green_predictor = tf.compat.v1.distributions.Normal( + loc=0., + scale=tf.cast( + initial_standard_deviations[1] - iteration, + dtype=tf.float32)) + blue_predictor = tf.compat.v1.distributions.Normal( + loc=0., + scale=tf.cast( + initial_standard_deviations[2] - iteration, + dtype=tf.float32)) + + # Make predictions (assign 3 probabilities to each color based on each color's + # distance to each of the 3 corners). We seek double the area in the right + # tail of the normal distribution. + examples = tf.concat([true_reds, true_greens, true_blues], axis=0) + probabilities_colors_are_red = (1 - red_predictor.cdf( + tf.norm(tensor=examples - tf.constant([255., 0, 0]), axis=1))) * 2 + probabilities_colors_are_green = (1 - green_predictor.cdf( + tf.norm(tensor=examples - tf.constant([0, 255., 0]), axis=1))) * 2 + probabilities_colors_are_blue = (1 - blue_predictor.cdf( + tf.norm(tensor=examples - tf.constant([0, 0, 255.]), axis=1))) * 2 + + predictions = ( + probabilities_colors_are_red, + probabilities_colors_are_green, + probabilities_colors_are_blue + ) + + # This is the crucial piece. We write data required for generating PR curves. + # We create 1 summary per class because we create 1 PR curve per class. + for i, color in enumerate(('red', 'green', 'blue')): + description = ('The probabilities used to create this PR curve are ' + 'generated from a normal distribution. Its standard ' + 'deviation is initially %0.0f and decreases over time.' % + initial_standard_deviations[i]) + + weights = None + if mask_every_other_prediction: + # Assign a weight of 0 to every even-indexed prediction. Odd-indexed + # predictions are assigned a default weight of 1. + consecutive_indices = tf.reshape( + tf.range(tf.size(input=predictions[i])), tf.shape(input=predictions[i])) + weights = tf.cast(consecutive_indices % 2, dtype=tf.float32) + + summary.op( + name=color, + labels=labels[:, i], + predictions=predictions[i], + num_thresholds=thresholds, + weights=weights, + display_name='classifying %s' % color, + description=description) + merged_summary_op = tf.compat.v1.summary.merge_all() + events_directory = os.path.join(logdir, run_name) + sess = tf.compat.v1.Session() + writer = tf.compat.v1.summary.FileWriter(events_directory, sess.graph) + + for step in xrange(steps): + feed_dict = { + iteration: step, + } + merged_summary = sess.run(merged_summary_op, feed_dict=feed_dict) + writer.add_summary(merged_summary, step) + + writer.close() + + +def run_all(logdir, steps, thresholds, verbose=False): + """Generate PR curve summaries. + Arguments: + logdir: The directory into which to store all the runs' data. + steps: The number of steps to run for. + verbose: Whether to print the names of runs into stdout during execution. + thresholds: The number of thresholds to use for PR curves. + """ + # First, we generate data for a PR curve that assigns even weights for + # predictions of all classes. + run_name = 'colors' + if verbose: + print('--- Running: %s' % run_name) + start_runs( + logdir=logdir, + steps=steps, + run_name=run_name, + thresholds=thresholds) + + # Next, we generate data for a PR curve that assigns arbitrary weights to + # predictions. + run_name = 'mask_every_other_prediction' + if verbose: + print('--- Running: %s' % run_name) + start_runs( + logdir=logdir, + steps=steps, + run_name=run_name, + thresholds=thresholds, + mask_every_other_prediction=True) + + +def main(_): + print('Saving output to %s.' % FLAGS.logdir) + run_all(FLAGS.logdir, FLAGS.steps, 50, verbose=True) + print('Done. Output saved to %s.' % FLAGS.logdir) + + +if __name__ == '__main__': + app.run(main) diff --git a/examples/tensorboard_toy.py b/examples/tensorboard_toy.py new file mode 100644 index 00000000..5f044a43 --- /dev/null +++ b/examples/tensorboard_toy.py @@ -0,0 +1,76 @@ +# TRAINS - Example of tensorboard with tensorflow (without any actual training) +# +import tensorflow as tf +import numpy as np +import cv2 +from time import sleep +#import tensorflow.compat.v1 as tf +#tf.disable_v2_behavior() + +from trains import Task +task = Task.init(project_name='examples', task_name='tensorboard toy example') + + +k = tf.placeholder(tf.float32) + +# Make a normal distribution, with a shifting mean +mean_moving_normal = tf.random_normal(shape=[1000], mean=(5*k), stddev=1) +# Record that distribution into a histogram summary +tf.summary.histogram("normal/moving_mean", mean_moving_normal) +tf.summary.scalar("normal/value", mean_moving_normal[-1]) + +# Make a normal distribution with shrinking variance +variance_shrinking_normal = tf.random_normal(shape=[1000], mean=0, stddev=1-(k)) +# Record that distribution too +tf.summary.histogram("normal/shrinking_variance", variance_shrinking_normal) +tf.summary.scalar("normal/variance_shrinking_normal", variance_shrinking_normal[-1]) + +# Let's combine both of those distributions into one dataset +normal_combined = tf.concat([mean_moving_normal, variance_shrinking_normal], 0) +# We add another histogram summary to record the combined distribution +tf.summary.histogram("normal/bimodal", normal_combined) +tf.summary.scalar("normal/normal_combined", normal_combined[0]) + +# Add a gamma distribution +gamma = tf.random_gamma(shape=[1000], alpha=k) +tf.summary.histogram("gamma", gamma) + +# And a poisson distribution +poisson = tf.random_poisson(shape=[1000], lam=k) +tf.summary.histogram("poisson", poisson) + +# And a uniform distribution +uniform = tf.random_uniform(shape=[1000], maxval=k*10) +tf.summary.histogram("uniform", uniform) + +# Finally, combine everything together! +all_distributions = [mean_moving_normal, variance_shrinking_normal, gamma, poisson, uniform] +all_combined = tf.concat(all_distributions, 0) +tf.summary.histogram("all_combined", all_combined) + +# convert to 4d [batch, col, row, RGB-channels] +image = cv2.imread('./samples/picasso.jpg') +image = image[:, :, 0][np.newaxis, :, :, np.newaxis] +# image = image[np.newaxis, :, :, :] # test greyscale image + +# un-comment to add image reporting +tf.summary.image("test", image, max_outputs=10) + +# Setup a session and summary writer +summaries = tf.summary.merge_all() +sess = tf.Session() + +logger = task.get_logger() + +# Use original FileWriter for comparison , run: +# % tensorboard --logdir=/tmp/histogram_example +writer = tf.summary.FileWriter("/tmp/histogram_example") + +# Setup a loop and write the summaries to disk +N = 40 +for step in range(N): + k_val = step/float(N) + summ = sess.run(summaries, feed_dict={k: k_val}) + writer.add_summary(summ, global_step=step) + +print('Done!') diff --git a/examples/tensorflow_eager.py b/examples/tensorflow_eager.py new file mode 100644 index 00000000..5a9599a5 --- /dev/null +++ b/examples/tensorflow_eager.py @@ -0,0 +1,358 @@ +# TRAINS - Example of tensorflow eager mode, model logging and tensorboard +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A deep MNIST classifier using convolutional layers. +Sample usage: + python mnist.py --help +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import os +import sys +import time +import tensorflow as tf +from tensorflow.examples.tutorials.mnist import input_data +from trains import Task + +tf.enable_eager_execution() + +task = Task.init(project_name='examples', task_name='Tensorflow eager mode') + + +FLAGS = tf.app.flags.FLAGS +tf.app.flags.DEFINE_integer('data_num', 100, """Flag of type integer""") +tf.app.flags.DEFINE_string('img_path', './img', """Flag of type string""") + + +layers = tf.keras.layers +FLAGS = None + + +class Discriminator(tf.keras.Model): + """GAN Discriminator. + A network to differentiate between generated and real handwritten digits. + """ + + def __init__(self, data_format): + """Creates a model for discriminating between real and generated digits. + Args: + data_format: Either 'channels_first' or 'channels_last'. + 'channels_first' is typically faster on GPUs while 'channels_last' is + typically faster on CPUs. See + https://www.tensorflow.org/performance/performance_guide#data_formats + """ + super(Discriminator, self).__init__(name='') + if data_format == 'channels_first': + self._input_shape = [-1, 1, 28, 28] + else: + assert data_format == 'channels_last' + self._input_shape = [-1, 28, 28, 1] + self.conv1 = layers.Conv2D( + 64, 5, padding='SAME', data_format=data_format, activation=tf.tanh) + self.pool1 = layers.AveragePooling2D(2, 2, data_format=data_format) + self.conv2 = layers.Conv2D( + 128, 5, data_format=data_format, activation=tf.tanh) + self.pool2 = layers.AveragePooling2D(2, 2, data_format=data_format) + self.flatten = layers.Flatten() + self.fc1 = layers.Dense(1024, activation=tf.tanh) + self.fc2 = layers.Dense(1, activation=None) + + def call(self, inputs): + """Return two logits per image estimating input authenticity. + Users should invoke __call__ to run the network, which delegates to this + method (and not call this method directly). + Args: + inputs: A batch of images as a Tensor with shape [batch_size, 28, 28, 1] + or [batch_size, 1, 28, 28] + Returns: + A Tensor with shape [batch_size] containing logits estimating + the probability that corresponding digit is real. + """ + x = tf.reshape(inputs, self._input_shape) + x = self.conv1(x) + x = self.pool1(x) + x = self.conv2(x) + x = self.pool2(x) + x = self.flatten(x) + x = self.fc1(x) + x = self.fc2(x) + return x + + +class Generator(tf.keras.Model): + """Generator of handwritten digits similar to the ones in the MNIST dataset. + """ + + def __init__(self, data_format): + """Creates a model for discriminating between real and generated digits. + Args: + data_format: Either 'channels_first' or 'channels_last'. + 'channels_first' is typically faster on GPUs while 'channels_last' is + typically faster on CPUs. See + https://www.tensorflow.org/performance/performance_guide#data_formats + """ + super(Generator, self).__init__(name='') + self.data_format = data_format + # We are using 128 6x6 channels as input to the first deconvolution layer + if data_format == 'channels_first': + self._pre_conv_shape = [-1, 128, 6, 6] + else: + assert data_format == 'channels_last' + self._pre_conv_shape = [-1, 6, 6, 128] + self.fc1 = layers.Dense(6 * 6 * 128, activation=tf.tanh) + + # In call(), we reshape the output of fc1 to _pre_conv_shape + + # Deconvolution layer. Resulting image shape: (batch, 14, 14, 64) + self.conv1 = layers.Conv2DTranspose( + 64, 4, strides=2, activation=None, data_format=data_format) + + # Deconvolution layer. Resulting image shape: (batch, 28, 28, 1) + self.conv2 = layers.Conv2DTranspose( + 1, 2, strides=2, activation=tf.nn.sigmoid, data_format=data_format) + + def call(self, inputs): + """Return a batch of generated images. + Users should invoke __call__ to run the network, which delegates to this + method (and not call this method directly). + Args: + inputs: A batch of noise vectors as a Tensor with shape + [batch_size, length of noise vectors]. + Returns: + A Tensor containing generated images. If data_format is 'channels_last', + the shape of returned images is [batch_size, 28, 28, 1], else + [batch_size, 1, 28, 28] + """ + + x = self.fc1(inputs) + x = tf.reshape(x, shape=self._pre_conv_shape) + x = self.conv1(x) + x = self.conv2(x) + return x + + +def discriminator_loss(discriminator_real_outputs, discriminator_gen_outputs): + """Original discriminator loss for GANs, with label smoothing. + See `Generative Adversarial Nets` (https://arxiv.org/abs/1406.2661) for more + details. + Args: + discriminator_real_outputs: Discriminator output on real data. + discriminator_gen_outputs: Discriminator output on generated data. Expected + to be in the range of (-inf, inf). + Returns: + A scalar loss Tensor. + """ + + loss_on_real = tf.losses.sigmoid_cross_entropy( + tf.ones_like(discriminator_real_outputs), + discriminator_real_outputs, + label_smoothing=0.25) + loss_on_generated = tf.losses.sigmoid_cross_entropy( + tf.zeros_like(discriminator_gen_outputs), discriminator_gen_outputs) + loss = loss_on_real + loss_on_generated + tf.contrib.summary.scalar('discriminator_loss', loss) + return loss + + +def generator_loss(discriminator_gen_outputs): + """Original generator loss for GANs. + L = -log(sigmoid(D(G(z)))) + See `Generative Adversarial Nets` (https://arxiv.org/abs/1406.2661) + for more details. + Args: + discriminator_gen_outputs: Discriminator output on generated data. Expected + to be in the range of (-inf, inf). + Returns: + A scalar loss Tensor. + """ + loss = tf.losses.sigmoid_cross_entropy( + tf.ones_like(discriminator_gen_outputs), discriminator_gen_outputs) + tf.contrib.summary.scalar('generator_loss', loss) + return loss + + +def train_one_epoch(generator, discriminator, generator_optimizer, + discriminator_optimizer, dataset, step_counter, + log_interval, noise_dim): + """Train `generator` and `discriminator` models on `dataset`. + Args: + generator: Generator model. + discriminator: Discriminator model. + generator_optimizer: Optimizer to use for generator. + discriminator_optimizer: Optimizer to use for discriminator. + dataset: Dataset of images to train on. + step_counter: An integer variable, used to write summaries regularly. + log_interval: How many steps to wait between logging and collecting + summaries. + noise_dim: Dimension of noise vector to use. + """ + + total_generator_loss = 0.0 + total_discriminator_loss = 0.0 + for (batch_index, images) in enumerate(dataset): + with tf.device('/cpu:0'): + tf.assign_add(step_counter, 1) + + with tf.contrib.summary.record_summaries_every_n_global_steps( + log_interval, global_step=step_counter): + current_batch_size = images.shape[0] + noise = tf.random_uniform( + shape=[current_batch_size, noise_dim], + minval=-1., + maxval=1., + seed=batch_index) + + # we can use 2 tapes or a single persistent tape. + # Using two tapes is memory efficient since intermediate tensors can be + # released between the two .gradient() calls below + with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape: + generated_images = generator(noise) + tf.contrib.summary.image( + 'generated_images', + tf.reshape(generated_images, [-1, 28, 28, 1]), + max_images=10) + + discriminator_gen_outputs = discriminator(generated_images) + discriminator_real_outputs = discriminator(images) + discriminator_loss_val = discriminator_loss(discriminator_real_outputs, + discriminator_gen_outputs) + total_discriminator_loss += discriminator_loss_val + + generator_loss_val = generator_loss(discriminator_gen_outputs) + total_generator_loss += generator_loss_val + + generator_grad = gen_tape.gradient(generator_loss_val, + generator.variables) + discriminator_grad = disc_tape.gradient(discriminator_loss_val, + discriminator.variables) + + generator_optimizer.apply_gradients( + zip(generator_grad, generator.variables)) + discriminator_optimizer.apply_gradients( + zip(discriminator_grad, discriminator.variables)) + + if log_interval and batch_index > 0 and batch_index % log_interval == 0: + print('Batch #%d\tAverage Generator Loss: %.6f\t' + 'Average Discriminator Loss: %.6f' % + (batch_index, total_generator_loss / batch_index, + total_discriminator_loss / batch_index)) + + +def main(_): + (device, data_format) = ('/gpu:0', 'channels_first') + if FLAGS.no_gpu or tf.contrib.eager.num_gpus() <= 0: + (device, data_format) = ('/cpu:0', 'channels_last') + print('Using device %s, and data format %s.' % (device, data_format)) + + # Load the datasets + data = input_data.read_data_sets(FLAGS.data_dir) + dataset = ( + tf.data.Dataset.from_tensor_slices(data.train.images[:1280]).shuffle(60000) + .batch(FLAGS.batch_size)) + + # Create the models and optimizers. + model_objects = { + 'generator': Generator(data_format), + 'discriminator': Discriminator(data_format), + 'generator_optimizer': tf.train.AdamOptimizer(FLAGS.lr), + 'discriminator_optimizer': tf.train.AdamOptimizer(FLAGS.lr), + 'step_counter': tf.train.get_or_create_global_step(), + } + + # Prepare summary writer and checkpoint info + summary_writer = tf.contrib.summary.create_file_writer( + FLAGS.output_dir, flush_millis=1000) + checkpoint_prefix = os.path.join(FLAGS.checkpoint_dir, 'ckpt') + latest_cpkt = tf.train.latest_checkpoint(FLAGS.checkpoint_dir) + if latest_cpkt: + print('Using latest checkpoint at ' + latest_cpkt) + checkpoint = tf.train.Checkpoint(**model_objects) + # Restore variables on creation if a checkpoint exists. + checkpoint.restore(latest_cpkt) + + with tf.device(device): + for _ in range(3): + start = time.time() + with summary_writer.as_default(): + train_one_epoch(dataset=dataset, log_interval=FLAGS.log_interval, + noise_dim=FLAGS.noise, **model_objects) + end = time.time() + checkpoint.save(checkpoint_prefix) + print('\nTrain time for epoch #%d (step %d): %f' % + (checkpoint.save_counter.numpy(), + checkpoint.step_counter.numpy(), + end - start)) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument( + '--data-dir', + type=str, + default='/tmp/tensorflow/mnist/input_data', + help=('Directory for storing input data (default ' + '/tmp/tensorflow/mnist/input_data)')) + parser.add_argument( + '--batch-size', + type=int, + default=16, + metavar='N', + help='input batch size for training (default: 128)') + parser.add_argument( + '--log-interval', + type=int, + default=1, + metavar='N', + help=('number of batches between logging and writing summaries ' + '(default: 100)')) + parser.add_argument( + '--output_dir', + type=str, + default='/tmp/tensorflow/', + metavar='DIR', + help='Directory to write TensorBoard summaries (defaults to none)') + parser.add_argument( + '--checkpoint_dir', + type=str, + default='/tmp/tensorflow/mnist/checkpoints/', + metavar='DIR', + help=('Directory to save checkpoints in (once per epoch) (default ' + '/tmp/tensorflow/mnist/checkpoints/)')) + parser.add_argument( + '--lr', + type=float, + default=0.001, + metavar='LR', + help='learning rate (default: 0.001)') + parser.add_argument( + '--noise', + type=int, + default=100, + metavar='N', + help='Length of noise vector for generator input (default: 100)') + parser.add_argument( + '--no-gpu', + action='store_true', + default=False, + help='disables GPU usage even if a GPU is available') + + FLAGS, unparsed = parser.parse_known_args() + +tf.app.run(main=main, argv=[sys.argv[0]] + unparsed) diff --git a/examples/tensorflow_mnist.py b/examples/tensorflow_mnist.py new file mode 100644 index 00000000..cdb56362 --- /dev/null +++ b/examples/tensorflow_mnist.py @@ -0,0 +1,171 @@ +# TRAINS - Example of tensorflow mnist training model logging +# +# Save and Restore a model using TensorFlow. +# This example is using the MNIST database of handwritten digits +# (http://yann.lecun.com/exdb/mnist/) +# +# Author: Aymeric Damien +# Project: https://github.com/aymericdamien/TensorFlow-Examples/ + +from __future__ import print_function + +from os.path import exists + +import numpy as np +import tensorflow as tf +from trains import Task + +MODEL_PATH = "/tmp/module_no_signatures" +task = Task.init(project_name='examples', task_name='Tensorflow mnist example') + +## block +X_train = np.random.rand(100, 3) +y_train = np.random.rand(100, 1) +model = tf.keras.models.Sequential([tf.keras.layers.Dense(1)]) +model.compile(loss='categorical_crossentropy', + optimizer=tf.keras.optimizers.SGD(), + metrics=['accuracy']) +model.fit(X_train, y_train, steps_per_epoch=1, nb_epoch=1) + +with tf.Session(graph=tf.Graph()) as sess: + if exists(MODEL_PATH): + try: + tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], MODEL_PATH) + m2 = tf.saved_model.load(sess, [tf.saved_model.tag_constants.SERVING], MODEL_PATH) + except Exception: + pass + tf.train.Checkpoint +## block end + +# Import MNIST data +from tensorflow.examples.tutorials.mnist import input_data +mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) + +# Parameters +parameters = { + 'learning_rate': 0.001, + 'batch_size': 100, + 'display_step': 1, + 'model_path': "/tmp/model.ckpt", + + # Network Parameters + 'n_hidden_1': 256, # 1st layer number of features + 'n_hidden_2': 256, # 2nd layer number of features + 'n_input': 784, # MNIST data input (img shape: 28*28) + 'n_classes': 10, # MNIST total classes (0-9 digits) +} +# TRAINS: connect parameters with the experiment/task for logging +parameters = task.connect(parameters) + +# tf Graph input +x = tf.placeholder("float", [None, parameters['n_input']]) +y = tf.placeholder("float", [None, parameters['n_classes']]) + + +# Create model +def multilayer_perceptron(x, weights, biases): + # Hidden layer with RELU activation + layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1']) + layer_1 = tf.nn.relu(layer_1) + # Hidden layer with RELU activation + layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2']) + layer_2 = tf.nn.relu(layer_2) + # Output layer with linear activation + out_layer = tf.matmul(layer_2, weights['out']) + biases['out'] + return out_layer + +# Store layers weight & bias +weights = { + 'h1': tf.Variable(tf.random_normal([parameters['n_input'], parameters['n_hidden_1']])), + 'h2': tf.Variable(tf.random_normal([parameters['n_hidden_1'], parameters['n_hidden_2']])), + 'out': tf.Variable(tf.random_normal([parameters['n_hidden_2'], parameters['n_classes']])) +} +biases = { + 'b1': tf.Variable(tf.random_normal([parameters['n_hidden_1']])), + 'b2': tf.Variable(tf.random_normal([parameters['n_hidden_2']])), + 'out': tf.Variable(tf.random_normal([parameters['n_classes']])) +} + +# Construct model +pred = multilayer_perceptron(x, weights, biases) + +# Define loss and optimizer +cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y)) +optimizer = tf.train.AdamOptimizer(learning_rate=parameters['learning_rate']).minimize(cost) + +# Initialize the variables (i.e. assign their default value) +init = tf.global_variables_initializer() + +# 'Saver' op to save and restore all the variables +saver = tf.train.Saver() + +# Running first session +print("Starting 1st session...") +with tf.Session() as sess: + + # Run the initializer + sess.run(init) + + # Training cycle + for epoch in range(3): + avg_cost = 0. + total_batch = int(mnist.train.num_examples/parameters['batch_size']) + # Loop over all batches + for i in range(total_batch): + batch_x, batch_y = mnist.train.next_batch(parameters['batch_size']) + # Run optimization op (backprop) and cost op (to get loss value) + _, c = sess.run([optimizer, cost], feed_dict={x: batch_x, + y: batch_y}) + # Compute average loss + avg_cost += c / total_batch + # Display logs per epoch step + if epoch % parameters['display_step'] == 0: + print("Epoch:", '%04d' % (epoch+1), "cost=", \ + "{:.9f}".format(avg_cost)) + save_path = saver.save(sess, parameters['model_path']) + + print("First Optimization Finished!") + + # Test model + correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) + # Calculate accuracy + accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) + print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})) + + # Save model weights to disk + save_path = saver.save(sess, parameters['model_path']) + print("Model saved in file: %s" % save_path) + +# Running a new session +print("Starting 2nd session...") +with tf.Session() as sess: + # Initialize variables + sess.run(init) + + # Restore model weights from previously saved model + saver.restore(sess, parameters['model_path']) + print("Model restored from file: %s" % save_path) + + # Resume training + for epoch in range(7): + avg_cost = 0. + total_batch = int(mnist.train.num_examples / parameters['batch_size']) + # Loop over all batches + for i in range(total_batch): + batch_x, batch_y = mnist.train.next_batch(parameters['batch_size']) + # Run optimization op (backprop) and cost op (to get loss value) + _, c = sess.run([optimizer, cost], feed_dict={x: batch_x, + y: batch_y}) + # Compute average loss + avg_cost += c / total_batch + # Display logs per epoch step + if epoch % parameters['display_step'] == 0: + print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost)) + print("Second Optimization Finished!") + + # Test model + correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) + # Calculate accuracy + accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) + print("Accuracy:", accuracy.eval( + {x: mnist.test.images, y: mnist.test.labels})) diff --git a/examples/trains.conf b/examples/trains.conf new file mode 100644 index 00000000..aa5d7a6f --- /dev/null +++ b/examples/trains.conf @@ -0,0 +1,131 @@ +# TRAINS SDK configuration file +api { + host: http://localhost:8008 + credentials {"access_key": "EGRTCO8JMSIGI6S39GTP43NFWXDQOW", "secret_key": "x!XTov_G-#vspE*Y(h$Anm&DIc5Ou-F)jsl$PdOyj5wG1&E!Z8"} +} +sdk { + # TRAINS - default SDK configuration + + storage { + cache { + # Defaults to system temp folder / cache + default_base_dir: "~/.trains/cache" + } + } + + metrics { + # History size for debug files per metric/variant. For each metric/variant combination with an attached file + # (e.g. debug image event), file names for the uploaded files will be recycled in such a way that no more than + # X files are stored in the upload destination for each metric/variant combination. + file_history_size: 100 + + # Settings for generated debug images + images { + format: JPEG + quality: 87 + subsampling: 0 + } + } + + network { + metrics { + # Number of threads allocated to uploading files (typically debug images) when transmitting metrics for + # a specific iteration + file_upload_threads: 4 + + # Warn about upload starvation if no uploads were made in specified period while file-bearing events keep + # being sent for upload + file_upload_starvation_warning_sec: 120 + } + + iteration { + # Max number of retries when getting frames if the server returned an error (http code 500) + max_retries_on_server_error: 5 + # Backoff factory for consecutive retry attempts. + # SDK will wait for {backoff factor} * (2 ^ ({number of total retries} - 1)) between retries. + retry_backoff_factor_sec: 10 + } + } + aws { + s3 { + # S3 credentials, used for read/write access by various SDK elements + + # default, used for any bucket not specified below + key: "" + secret: "" + region: "" + + credentials: [ + # specifies key/secret credentials to use when handling s3 urls (read or write) + # { + # bucket: "my-bucket-name" + # key: "my-access-key" + # secret: "my-secret-key" + # }, + # { + # # This will apply to all buckets in this host (unless key/value is specifically provided for a given bucket) + # host: "my-minio-host:9000" + # key: "12345678" + # secret: "12345678" + # multipart: false + # secure: false + # } + ] + } + boto3 { + pool_connections: 512 + max_multipart_concurrency: 16 + } + } + google.storage { + # # Default project and credentials file + # # Will be used when no bucket configuration is found + # project: "trains" + # credentials_json: "/path/to/credentials.json" + + # # Specific credentials per bucket and sub directory + # credentials = [ + # { + # bucket: "my-bucket" + # subdir: "path/in/bucket" # Not required + # project: "trains" + # credentials_json: "/path/to/credentials.json" + # }, + # ] + } + + log { + # debugging feature: set this to true to make null log propagate messages to root logger (so they appear in stdout) + null_log_propagate: False + task_log_buffer_capacity: 66 + + # disable urllib info and lower levels + disable_urllib3_info: True + } + + development { + # Development-mode options + + # dev task reuse window + task_reuse_time_window_in_hours: 72.0 + + # Run VCS repository detection asynchronously + vcs_repo_detect_async: False + + # Store uncommitted git/hg source code diff in experiment manifest when training in development mode + # This stores "git diff" or "hg diff" into the experiment's "script.requirements.diff" section + store_uncommitted_code_diff_on_train: True + + # Support stopping an experiment in case it was externally stopped, status was changed or task was reset + support_stopping: True + + # Development mode worker + worker { + # Status report period in seconds + report_period_sec: 2 + + # Log all stdout & stderr + log_stdout: True + } + } +} diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 00000000..4750dd23 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,31 @@ +apache-libcloud>=2.2.1 +attrs>=18.0 +backports.functools-lru-cache>=1.0.2 ; python_version < '3' +boto3>=1.9 +botocore>=1.12 +colorama>=0.4.1 +coloredlogs>=10.0 +enum34>=0.9 +funcsigs>=1.0 +furl>=2.0.0 +future>=0.16.0 +futures>=3.0.5 ; python_version < '3' +google-cloud-storage>=1.13.2 +humanfriendly>=2.1 +jsonmodels>=2.2 +jsonschema>=2.6.0 +numpy>=1.10 +opencv-python>=3.2.0.8 +pathlib2>=2.3.0 +psutil>=3.4.2 +pyhocon>=0.3.38 +python-dateutil>=2.6.1 +PyYAML>=3.12 +requests-file>=1.4.2 +requests>=2.18.4 +six>=1.11.0 +tqdm>=4.19.5 +urllib3>=1.22 +watchdog>=0.8.0 +pyjwt>=1.6.4 +plotly>=3.9.0 diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 00000000..4c093ec1 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,4 @@ +[bdist_wheel] +# Currently supports Python2 only, +# Python 3 is coming... +universal=1 diff --git a/setup.py b/setup.py new file mode 100644 index 00000000..e13f384b --- /dev/null +++ b/setup.py @@ -0,0 +1,77 @@ +""" +TRAINS - Artificial Intelligence Version Control +https://github.com/allegroai/trains +""" + +# Always prefer setuptools over distutils +from setuptools import setup, find_packages +from six import exec_ +from pathlib2 import Path + + +here = Path(__file__).resolve().parent + +# Get the long description from the README file +long_description = (here / 'README.md').read_text() + + +def read_version_string(): + result = {} + exec_((here / 'trains/version.py').read_text(), result) + return result['__version__'] + + +version = read_version_string() + +requirements = (here / 'requirements.txt').read_text().splitlines() + +setup( + name='trains', + version=version, + description='TRAINS - Magic Version Control & Experiment Manager for AI', + long_description=long_description, + long_description_content_type='text/markdown', + # The project's main homepage. + url='https://github.com/allegroai/trains', + author='Allegroai', + author_email='trains@allegro.ai', + license='Apache License 2.0', + classifiers=[ + # How mature is this project? Common values are + # 3 - Alpha + # 4 - Beta + # 5 - Production/Stable + 'Development Status :: 4 - Beta', + 'Intended Audience :: Developers', + 'Intended Audience :: Science/Research', + 'Operating System :: POSIX :: Linux', + 'Operating System :: MacOS :: MacOS X', + 'Operating System :: Microsoft', + 'Topic :: Scientific/Engineering :: Artificial Intelligence', + 'Topic :: Software Development', + 'Topic :: Software Development :: Version Control', + 'Topic :: System :: Logging', + 'Topic :: System :: Monitoring', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'License :: OSI Approved :: Apache Software License', + ], + keywords='trains development machine deep learning version control machine-learning machinelearning ' + 'deeplearning deep-learning experiment-manager experimentmanager', + packages=find_packages(exclude=['contrib', 'docs', 'data', 'examples', 'tests']), + install_requires=requirements, + package_data={ + 'trains': ['config/default/*.conf', 'backend_api/config/default/*.conf'] + }, + include_package_data=True, + # To provide executable scripts, use entry points in preference to the + # "scripts" keyword. Entry points provide cross-platform support and allow + # pip to create the appropriate form of executable for the target platform. + entry_points={ + 'console_scripts': [ + 'trains-init = trains.config.default.__main__:main', + ], + }, +) diff --git a/trains/__init__.py b/trains/__init__.py new file mode 100644 index 00000000..2b549bd0 --- /dev/null +++ b/trains/__init__.py @@ -0,0 +1,7 @@ +""" TRAINS open SDK """ + +from .version import __version__ +from .task import Task +from .model import InputModel, OutputModel +from .logger import Logger +from .errors import UsageError diff --git a/trains/backend_api/__init__.py b/trains/backend_api/__init__.py new file mode 100644 index 00000000..c610f64f --- /dev/null +++ b/trains/backend_api/__init__.py @@ -0,0 +1,3 @@ +from .version import __version__ +from .session import Session, CallResult, TimeoutExpiredError, ResultNotReadyError +from .config import load as load_config diff --git a/trains/backend_api/config/__init__.py b/trains/backend_api/config/__init__.py new file mode 100644 index 00000000..a094fc30 --- /dev/null +++ b/trains/backend_api/config/__init__.py @@ -0,0 +1,16 @@ +from ...backend_config import Config +from pathlib2 import Path + + +def load(*additional_module_paths): + # type: (str) -> Config + """ + Load configuration with the API defaults, using the additional module path provided + :param additional_module_paths: Additional config paths for modules who'se default + configurations should be loaded as well + :return: Config object + """ + config = Config(verbose=False) + this_module_path = Path(__file__).parent + config.load_relative_to(this_module_path, *additional_module_paths) + return config diff --git a/trains/backend_api/config/default/api.conf b/trains/backend_api/config/default/api.conf new file mode 100644 index 00000000..a084bcbb --- /dev/null +++ b/trains/backend_api/config/default/api.conf @@ -0,0 +1,41 @@ +{ + version: 1.5 + host: https://demoapi.trainsai.io + + # default version assigned to requests with no specific version. this is not expected to change + # as it keeps us backwards compatible. + default_version: 1.5 + + http { + max_req_size = 15728640 # request size limit (smaller than that configured in api server) + + retries { + # retry values (int, 0 means fail on first retry) + total: 240 # Total number of retries to allow. Takes precedence over other counts. + connect: 240 # How many times to retry on connection-related errors (never reached server) + read: 240 # How many times to retry on read errors (waiting for server) + redirect: 240 # How many redirects to perform (HTTP response with a status code 301, 302, 303, 307 or 308) + status: 240 # How many times to retry on bad status codes + + # backoff parameters + # timeout between retries is min({backoff_max}, {backoff factor} * (2 ^ ({number of total retries} - 1)) + backoff_factor: 1.0 + backoff_max: 300.0 + } + + wait_on_maintenance_forever: true + + pool_maxsize: 512 + pool_connections: 512 + } + + credentials { + access_key: "" + secret_key: "" + } + + auth { + # When creating a request, if token will expire in less than this value, try to refresh the token + token_expiration_threshold_sec = 360 + } +} diff --git a/trains/backend_api/config/default/logging.conf b/trains/backend_api/config/default/logging.conf new file mode 100644 index 00000000..d21a0e87 --- /dev/null +++ b/trains/backend_api/config/default/logging.conf @@ -0,0 +1,9 @@ +{ + version: 1 + loggers { + urllib3 { + level: ERROR + } + } + +} diff --git a/trains/backend_api/schema/__init__.py b/trains/backend_api/schema/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/trains/backend_api/schema/action.py b/trains/backend_api/schema/action.py new file mode 100644 index 00000000..f2fe2bdb --- /dev/null +++ b/trains/backend_api/schema/action.py @@ -0,0 +1,38 @@ +import re +from functools import partial + +import attr +from attr.converters import optional as optional_converter +from attr.validators import instance_of, optional, and_ +from six import string_types + +# noinspection PyTypeChecker +sequence = instance_of((list, tuple)) + + +def sequence_of(types): + def validator(_, attrib, value): + assert all(isinstance(x, types) for x in value), attrib.name + + return and_(sequence, validator) + + +@attr.s +class Action(object): + name = attr.ib() + version = attr.ib() + service = attr.ib() + definitions_keys = attr.ib(validator=sequence) + authorize = attr.ib(validator=instance_of(bool), default=True) + log_data = attr.ib(validator=instance_of(bool), default=True) + log_result_data = attr.ib(validator=instance_of(bool), default=True) + internal = attr.ib(default=False) + allow_roles = attr.ib(default=None, validator=optional(sequence_of(string_types))) + request = attr.ib(validator=optional(instance_of(dict)), default=None) + batch_request = attr.ib(validator=optional(instance_of(dict)), default=None) + response = attr.ib(validator=optional(instance_of(dict)), default=None) + method = attr.ib(default=None) + description = attr.ib( + default=None, + validator=optional(instance_of(string_types)), + ) diff --git a/trains/backend_api/schema/service.py b/trains/backend_api/schema/service.py new file mode 100644 index 00000000..14592207 --- /dev/null +++ b/trains/backend_api/schema/service.py @@ -0,0 +1,201 @@ +import itertools +import re + +import attr +import six + +import pyhocon + +from .action import Action + + +class Service(object): + """ Service schema handler """ + + __jsonschema_ref_ex = re.compile("^#/definitions/(.*)$") + + @property + def default(self): + return self._default + + @property + def actions(self): + return self._actions + + @property + def definitions(self): + """ Raw service definitions (each might be dependant on some of its siblings) """ + return self._definitions + + @property + def definitions_refs(self): + return self._definitions_refs + + @property + def name(self): + return self._name + + @property + def doc(self): + return self._doc + + def __init__(self, name, service_config): + self._name = name + self._default = None + self._actions = [] + self._definitions = None + self._definitions_refs = None + self._doc = None + self.parse(service_config) + + @classmethod + def get_ref_name(cls, ref_string): + m = cls.__jsonschema_ref_ex.match(ref_string) + if m: + return m.group(1) + + def parse(self, service_config): + self._default = service_config.get( + "_default", pyhocon.ConfigTree() + ).as_plain_ordered_dict() + + self._doc = '{} service'.format(self.name) + description = service_config.get('_description', '') + if description: + self._doc += '\n\n{}'.format(description) + self._definitions = service_config.get( + "_definitions", pyhocon.ConfigTree() + ).as_plain_ordered_dict() + self._definitions_refs = { + k: self._get_schema_references(v) for k, v in self._definitions.items() + } + all_refs = set(itertools.chain(*self.definitions_refs.values())) + if not all_refs.issubset(self.definitions): + raise ValueError( + "Unresolved references (%s) in %s/definitions" + % (", ".join(all_refs.difference(self.definitions)), self.name) + ) + + actions = { + k: v.as_plain_ordered_dict() + for k, v in service_config.items() + if not k.startswith("_") + } + self._actions = { + action_name: action + for action_name, action in ( + (action_name, self._parse_action_versions(action_name, action_versions)) + for action_name, action_versions in actions.items() + ) + if action + } + + def _parse_action_versions(self, action_name, action_versions): + def parse_version(action_version): + try: + return float(action_version) + except (ValueError, TypeError) as ex: + raise ValueError( + "Failed parsing version number {} ({}) in {}/{}".format( + action_version, ex.args[0], self.name, action_name + ) + ) + + def add_internal(cfg): + if "internal" in action_versions: + cfg.setdefault("internal", action_versions["internal"]) + return cfg + + return { + parsed_version: action + for parsed_version, action in ( + (parsed_version, self._parse_action(action_name, parsed_version, add_internal(cfg))) + for parsed_version, cfg in ( + (parse_version(version), cfg) + for version, cfg in action_versions.items() + if version not in ["internal", "allow_roles", "authorize"] + ) + ) + if action + } + + def _get_schema_references(self, s): + refs = set() + if isinstance(s, dict): + for k, v in s.items(): + if isinstance(v, six.string_types): + m = self.__jsonschema_ref_ex.match(v) + if m: + refs.add(m.group(1)) + continue + elif k in ("oneOf", "anyOf") and isinstance(v, list): + refs.update(*map(self._get_schema_references, v)) + refs.update(self._get_schema_references(v)) + return refs + + def _expand_schema_references_with_definitions(self, schema, refs=None): + definitions = schema.get("definitions", {}) + refs = refs if refs is not None else self._get_schema_references(schema) + required_refs = set(refs).difference(definitions) + if not required_refs: + return required_refs + if not required_refs.issubset(self.definitions): + raise ValueError( + "Unresolved references (%s)" + % ", ".join(required_refs.difference(self.definitions)) + ) + + # update required refs with all sub requirements + last_required_refs = None + while last_required_refs != required_refs: + last_required_refs = required_refs.copy() + additional_refs = set( + itertools.chain( + *(self.definitions_refs.get(ref, []) for ref in required_refs) + ) + ) + required_refs.update(additional_refs) + return required_refs + + def _resolve_schema_references(self, schema, refs=None): + definitions = schema.get("definitions", {}) + definitions.update({k: v for k, v in self.definitions.items() if k in refs}) + schema["definitions"] = definitions + + def _parse_action(self, action_name, action_version, action_config): + data = self.default.copy() + data.update(action_config) + + if not action_config.get("generate", True): + return None + + definitions_keys = set() + for schema_key in ("request", "response"): + if schema_key in action_config: + try: + schema = action_config[schema_key] + refs = self._expand_schema_references_with_definitions(schema) + self._resolve_schema_references(schema, refs=refs) + definitions_keys.update(refs) + except ValueError as ex: + name = "%s.%s/%.1f/%s" % ( + self.name, + action_name, + action_version, + schema_key, + ) + raise ValueError("%s in %s" % (str(ex), name)) + + return Action( + name=action_name, + version=action_version, + definitions_keys=list(definitions_keys), + service=self.name, + **( + { + key: value + for key, value in data.items() + if key in attr.fields_dict(Action) + } + ) + ) diff --git a/trains/backend_api/services/__init__.py b/trains/backend_api/services/__init__.py new file mode 100644 index 00000000..6d86ff43 --- /dev/null +++ b/trains/backend_api/services/__init__.py @@ -0,0 +1,22 @@ +from .v2_1 import async_request +from .v2_1 import auth +from .v2_1 import debug +from .v2_1 import events +from .v2_1 import models +from .v2_1 import news +from .v2_1 import projects +from .v2_1 import storage +from .v2_1 import tasks + + +__all__ = [ + 'async_request', + 'auth', + 'debug', + 'events', + 'models', + 'news', + 'projects', + 'storage', + 'tasks', +] diff --git a/trains/backend_api/services/v2_1/__init__.py b/trains/backend_api/services/v2_1/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/trains/backend_api/services/v2_1/async_request.py b/trains/backend_api/services/v2_1/async_request.py new file mode 100644 index 00000000..8f466670 --- /dev/null +++ b/trains/backend_api/services/v2_1/async_request.py @@ -0,0 +1,414 @@ +""" +async service + +This service provides support for asynchronous API calls. +""" +import six +import types +from datetime import datetime +import enum + +from dateutil.parser import parse as parse_datetime + +from ....backend_api.session import Request, BatchRequest, Response, DataModel, NonStrictDataModel, CompoundRequest, schema_property, StringEnum + + +class Call(NonStrictDataModel): + """ + :param id: The job ID associated with this call. + :type id: str + :param status: The job's status. + :type status: str + :param created: Job creation time. + :type created: str + :param ended: Job end time. + :type ended: str + :param enqueued: Job enqueue time. + :type enqueued: str + :param meta: Metadata for this job, includes endpoint and additional relevant + call data. + :type meta: dict + :param company: The Company this job belongs to. + :type company: str + :param exec_info: Job execution information. + :type exec_info: str + """ + _schema = { + 'properties': { + 'company': { + 'description': 'The Company this job belongs to.', + 'type': ['string', 'null'], + }, + 'created': { + 'description': 'Job creation time.', + 'type': ['string', 'null'], + }, + 'ended': {'description': 'Job end time.', 'type': ['string', 'null']}, + 'enqueued': { + 'description': 'Job enqueue time.', + 'type': ['string', 'null'], + }, + 'exec_info': { + 'description': 'Job execution information.', + 'type': ['string', 'null'], + }, + 'id': { + 'description': 'The job ID associated with this call.', + 'type': ['string', 'null'], + }, + 'meta': { + 'additionalProperties': True, + 'description': 'Metadata for this job, includes endpoint and additional relevant call data.', + 'type': ['object', 'null'], + }, + 'status': { + 'description': "The job's status.", + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, id=None, status=None, created=None, ended=None, enqueued=None, meta=None, company=None, exec_info=None, **kwargs): + super(Call, self).__init__(**kwargs) + self.id = id + self.status = status + self.created = created + self.ended = ended + self.enqueued = enqueued + self.meta = meta + self.company = company + self.exec_info = exec_info + + @schema_property('id') + def id(self): + return self._property_id + + @id.setter + def id(self, value): + if value is None: + self._property_id = None + return + + self.assert_isinstance(value, "id", six.string_types) + self._property_id = value + + @schema_property('status') + def status(self): + return self._property_status + + @status.setter + def status(self, value): + if value is None: + self._property_status = None + return + + self.assert_isinstance(value, "status", six.string_types) + self._property_status = value + + @schema_property('created') + def created(self): + return self._property_created + + @created.setter + def created(self, value): + if value is None: + self._property_created = None + return + + self.assert_isinstance(value, "created", six.string_types) + self._property_created = value + + @schema_property('ended') + def ended(self): + return self._property_ended + + @ended.setter + def ended(self, value): + if value is None: + self._property_ended = None + return + + self.assert_isinstance(value, "ended", six.string_types) + self._property_ended = value + + @schema_property('enqueued') + def enqueued(self): + return self._property_enqueued + + @enqueued.setter + def enqueued(self, value): + if value is None: + self._property_enqueued = None + return + + self.assert_isinstance(value, "enqueued", six.string_types) + self._property_enqueued = value + + @schema_property('meta') + def meta(self): + return self._property_meta + + @meta.setter + def meta(self, value): + if value is None: + self._property_meta = None + return + + self.assert_isinstance(value, "meta", (dict,)) + self._property_meta = value + + @schema_property('company') + def company(self): + return self._property_company + + @company.setter + def company(self, value): + if value is None: + self._property_company = None + return + + self.assert_isinstance(value, "company", six.string_types) + self._property_company = value + + @schema_property('exec_info') + def exec_info(self): + return self._property_exec_info + + @exec_info.setter + def exec_info(self, value): + if value is None: + self._property_exec_info = None + return + + self.assert_isinstance(value, "exec_info", six.string_types) + self._property_exec_info = value + + +class CallsRequest(Request): + """ + Get a list of all asynchronous API calls handled by the system. + This includes both previously handled calls, calls being executed and calls waiting in queue. + + :param status: Return only calls who's status is in this list. + :type status: Sequence[str] + :param endpoint: Return only calls handling this endpoint. Supports wildcards. + :type endpoint: str + :param task: Return only calls associated with this task ID. Supports + wildcards. + :type task: str + """ + + _service = "async" + _action = "calls" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': { + 'endpoint': { + 'description': 'Return only calls handling this endpoint. Supports wildcards.', + 'type': ['string', 'null'], + }, + 'status': { + 'description': "Return only calls who's status is in this list.", + 'items': {'enum': ['queued', 'in_progress', 'completed'], 'type': 'string'}, + 'type': ['array', 'null'], + }, + 'task': { + 'description': 'Return only calls associated with this task ID. Supports wildcards.', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, status=None, endpoint=None, task=None, **kwargs): + super(CallsRequest, self).__init__(**kwargs) + self.status = status + self.endpoint = endpoint + self.task = task + + @schema_property('status') + def status(self): + return self._property_status + + @status.setter + def status(self, value): + if value is None: + self._property_status = None + return + + self.assert_isinstance(value, "status", (list, tuple)) + + self.assert_isinstance(value, "status", six.string_types, is_array=True) + self._property_status = value + + @schema_property('endpoint') + def endpoint(self): + return self._property_endpoint + + @endpoint.setter + def endpoint(self, value): + if value is None: + self._property_endpoint = None + return + + self.assert_isinstance(value, "endpoint", six.string_types) + self._property_endpoint = value + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + +class CallsResponse(Response): + """ + Response of async.calls endpoint. + + :param calls: A list of the current asynchronous calls handled by the system. + :type calls: Sequence[Call] + """ + _service = "async" + _action = "calls" + _version = "1.5" + + _schema = { + 'definitions': { + 'call': { + 'properties': { + 'company': { + 'description': 'The Company this job belongs to.', + 'type': ['string', 'null'], + }, + 'created': { + 'description': 'Job creation time.', + 'type': ['string', 'null'], + }, + 'ended': { + 'description': 'Job end time.', + 'type': ['string', 'null'], + }, + 'enqueued': { + 'description': 'Job enqueue time.', + 'type': ['string', 'null'], + }, + 'exec_info': { + 'description': 'Job execution information.', + 'type': ['string', 'null'], + }, + 'id': { + 'description': 'The job ID associated with this call.', + 'type': ['string', 'null'], + }, + 'meta': { + 'additionalProperties': True, + 'description': 'Metadata for this job, includes endpoint and additional relevant call data.', + 'type': ['object', 'null'], + }, + 'status': { + 'description': "The job's status.", + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + }, + }, + 'properties': { + 'calls': { + 'description': 'A list of the current asynchronous calls handled by the system.', + 'items': {'$ref': '#/definitions/call'}, + 'type': ['array', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, calls=None, **kwargs): + super(CallsResponse, self).__init__(**kwargs) + self.calls = calls + + @schema_property('calls') + def calls(self): + return self._property_calls + + @calls.setter + def calls(self, value): + if value is None: + self._property_calls = None + return + + self.assert_isinstance(value, "calls", (list, tuple)) + if any(isinstance(v, dict) for v in value): + value = [Call.from_dict(v) if isinstance(v, dict) else v for v in value] + else: + self.assert_isinstance(value, "calls", Call, is_array=True) + self._property_calls = value + + +class ResultRequest(Request): + """ + Try getting the result of a previously accepted asynchronous API call. + If execution for the asynchronous call has completed, the complete call response data will be returned. + Otherwise, a 202 code will be returned with no data + + :param id: The id returned by the accepted asynchronous API call. + :type id: str + """ + + _service = "async" + _action = "result" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': { + 'id': { + 'description': 'The id returned by the accepted asynchronous API call.', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, id=None, **kwargs): + super(ResultRequest, self).__init__(**kwargs) + self.id = id + + @schema_property('id') + def id(self): + return self._property_id + + @id.setter + def id(self, value): + if value is None: + self._property_id = None + return + + self.assert_isinstance(value, "id", six.string_types) + self._property_id = value + + +class ResultResponse(Response): + """ + Response of async.result endpoint. + + """ + _service = "async" + _action = "result" + _version = "1.5" + + _schema = {'additionalProperties': True, 'definitions': {}, 'type': 'object'} + + +response_mapping = { + ResultRequest: ResultResponse, + CallsRequest: CallsResponse, +} diff --git a/trains/backend_api/services/v2_1/auth.py b/trains/backend_api/services/v2_1/auth.py new file mode 100644 index 00000000..7b7aa63c --- /dev/null +++ b/trains/backend_api/services/v2_1/auth.py @@ -0,0 +1,1112 @@ +""" +auth service + +This service provides authentication management and authorization +validation for the entire system. +""" +import six +import types +from datetime import datetime +import enum + +from dateutil.parser import parse as parse_datetime + +from ....backend_api.session import Request, BatchRequest, Response, DataModel, NonStrictDataModel, CompoundRequest, schema_property, StringEnum + + +class Credentials(NonStrictDataModel): + """ + :param access_key: Credentials access key + :type access_key: str + :param secret_key: Credentials secret key + :type secret_key: str + """ + _schema = { + 'properties': { + 'access_key': { + 'description': 'Credentials access key', + 'type': ['string', 'null'], + }, + 'secret_key': { + 'description': 'Credentials secret key', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, access_key=None, secret_key=None, **kwargs): + super(Credentials, self).__init__(**kwargs) + self.access_key = access_key + self.secret_key = secret_key + + @schema_property('access_key') + def access_key(self): + return self._property_access_key + + @access_key.setter + def access_key(self, value): + if value is None: + self._property_access_key = None + return + + self.assert_isinstance(value, "access_key", six.string_types) + self._property_access_key = value + + @schema_property('secret_key') + def secret_key(self): + return self._property_secret_key + + @secret_key.setter + def secret_key(self, value): + if value is None: + self._property_secret_key = None + return + + self.assert_isinstance(value, "secret_key", six.string_types) + self._property_secret_key = value + + +class CredentialKey(NonStrictDataModel): + """ + :param access_key: + :type access_key: str + """ + _schema = {'properties': {'access_key': {'description': '', 'type': ['string', 'null']}}, 'type': 'object'} + def __init__( + self, access_key=None, **kwargs): + super(CredentialKey, self).__init__(**kwargs) + self.access_key = access_key + + @schema_property('access_key') + def access_key(self): + return self._property_access_key + + @access_key.setter + def access_key(self, value): + if value is None: + self._property_access_key = None + return + + self.assert_isinstance(value, "access_key", six.string_types) + self._property_access_key = value + + +class AddUserRequest(Request): + """ + Add a new user manually. Only supported in on-premises deployments + + :param secret_key: A secret key (used as the user's password) + :type secret_key: str + :param name: User name (makes the auth entry more readable) + :type name: str + :param company: Associated company ID. If not provided, the caller's company ID + will be used + :type company: str + :param email: Email address uniquely identifying the user + :type email: str + :param provider: Provider ID indicating the external provider used to + authenticate the user + :type provider: str + :param provider_user_id: Unique user ID assigned by the external provider + :type provider_user_id: str + :param provider_token: Provider-issued token for this user + :type provider_token: str + :param given_name: Given name + :type given_name: str + :param family_name: Family name + :type family_name: str + :param avatar: Avatar URL + :type avatar: str + """ + + _service = "auth" + _action = "add_user" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': { + 'avatar': {'description': 'Avatar URL', 'type': 'string'}, + 'company': { + 'description': "Associated company ID. If not provided, the caller's company ID will be used", + 'type': 'string', + }, + 'email': { + 'description': 'Email address uniquely identifying the user', + 'type': 'string', + }, + 'family_name': {'description': 'Family name', 'type': 'string'}, + 'given_name': {'description': 'Given name', 'type': 'string'}, + 'name': { + 'description': 'User name (makes the auth entry more readable)', + 'type': 'string', + }, + 'provider': { + 'description': 'Provider ID indicating the external provider used to authenticate the user', + 'type': 'string', + }, + 'provider_token': { + 'description': 'Provider-issued token for this user', + 'type': 'string', + }, + 'provider_user_id': { + 'description': 'Unique user ID assigned by the external provider', + 'type': 'string', + }, + 'secret_key': { + 'description': "A secret key (used as the user's password)", + 'type': ['string', 'null'], + }, + }, + 'required': ['name', 'email'], + 'type': 'object', + } + def __init__( + self, name, email, secret_key=None, company=None, provider=None, provider_user_id=None, provider_token=None, given_name=None, family_name=None, avatar=None, **kwargs): + super(AddUserRequest, self).__init__(**kwargs) + self.secret_key = secret_key + self.name = name + self.company = company + self.email = email + self.provider = provider + self.provider_user_id = provider_user_id + self.provider_token = provider_token + self.given_name = given_name + self.family_name = family_name + self.avatar = avatar + + @schema_property('secret_key') + def secret_key(self): + return self._property_secret_key + + @secret_key.setter + def secret_key(self, value): + if value is None: + self._property_secret_key = None + return + + self.assert_isinstance(value, "secret_key", six.string_types) + self._property_secret_key = value + + @schema_property('name') + def name(self): + return self._property_name + + @name.setter + def name(self, value): + if value is None: + self._property_name = None + return + + self.assert_isinstance(value, "name", six.string_types) + self._property_name = value + + @schema_property('company') + def company(self): + return self._property_company + + @company.setter + def company(self, value): + if value is None: + self._property_company = None + return + + self.assert_isinstance(value, "company", six.string_types) + self._property_company = value + + @schema_property('email') + def email(self): + return self._property_email + + @email.setter + def email(self, value): + if value is None: + self._property_email = None + return + + self.assert_isinstance(value, "email", six.string_types) + self._property_email = value + + @schema_property('provider') + def provider(self): + return self._property_provider + + @provider.setter + def provider(self, value): + if value is None: + self._property_provider = None + return + + self.assert_isinstance(value, "provider", six.string_types) + self._property_provider = value + + @schema_property('provider_user_id') + def provider_user_id(self): + return self._property_provider_user_id + + @provider_user_id.setter + def provider_user_id(self, value): + if value is None: + self._property_provider_user_id = None + return + + self.assert_isinstance(value, "provider_user_id", six.string_types) + self._property_provider_user_id = value + + @schema_property('provider_token') + def provider_token(self): + return self._property_provider_token + + @provider_token.setter + def provider_token(self, value): + if value is None: + self._property_provider_token = None + return + + self.assert_isinstance(value, "provider_token", six.string_types) + self._property_provider_token = value + + @schema_property('given_name') + def given_name(self): + return self._property_given_name + + @given_name.setter + def given_name(self, value): + if value is None: + self._property_given_name = None + return + + self.assert_isinstance(value, "given_name", six.string_types) + self._property_given_name = value + + @schema_property('family_name') + def family_name(self): + return self._property_family_name + + @family_name.setter + def family_name(self, value): + if value is None: + self._property_family_name = None + return + + self.assert_isinstance(value, "family_name", six.string_types) + self._property_family_name = value + + @schema_property('avatar') + def avatar(self): + return self._property_avatar + + @avatar.setter + def avatar(self, value): + if value is None: + self._property_avatar = None + return + + self.assert_isinstance(value, "avatar", six.string_types) + self._property_avatar = value + + +class AddUserResponse(Response): + """ + Response of auth.add_user endpoint. + + :param id: New user ID + :type id: str + :param secret: The secret key used as the user's password + :type secret: str + """ + _service = "auth" + _action = "add_user" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'id': {'description': 'New user ID', 'type': ['string', 'null']}, + 'secret': { + 'description': "The secret key used as the user's password", + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, id=None, secret=None, **kwargs): + super(AddUserResponse, self).__init__(**kwargs) + self.id = id + self.secret = secret + + @schema_property('id') + def id(self): + return self._property_id + + @id.setter + def id(self, value): + if value is None: + self._property_id = None + return + + self.assert_isinstance(value, "id", six.string_types) + self._property_id = value + + @schema_property('secret') + def secret(self): + return self._property_secret + + @secret.setter + def secret(self, value): + if value is None: + self._property_secret = None + return + + self.assert_isinstance(value, "secret", six.string_types) + self._property_secret = value + + +class CreateCredentialsRequest(Request): + """ + Creates a new set of credentials for the authenticated user. + New key/secret is returned. + Note: Secret will never be returned in any other API call. + If a secret is lost or compromised, the key should be revoked + and a new set of credentials can be created. + + """ + + _service = "auth" + _action = "create_credentials" + _version = "1.5" + _schema = { + 'additionalProperties': False, + 'definitions': {}, + 'properties': {}, + 'type': 'object', + } + + +class CreateCredentialsResponse(Response): + """ + Response of auth.create_credentials endpoint. + + :param credentials: Created credentials + :type credentials: Credentials + """ + _service = "auth" + _action = "create_credentials" + _version = "1.5" + + _schema = { + 'definitions': { + 'credentials': { + 'properties': { + 'access_key': { + 'description': 'Credentials access key', + 'type': ['string', 'null'], + }, + 'secret_key': { + 'description': 'Credentials secret key', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + }, + }, + 'properties': { + 'credentials': { + 'description': 'Created credentials', + 'oneOf': [{'$ref': '#/definitions/credentials'}, {'type': 'null'}], + }, + }, + 'type': 'object', + } + def __init__( + self, credentials=None, **kwargs): + super(CreateCredentialsResponse, self).__init__(**kwargs) + self.credentials = credentials + + @schema_property('credentials') + def credentials(self): + return self._property_credentials + + @credentials.setter + def credentials(self, value): + if value is None: + self._property_credentials = None + return + if isinstance(value, dict): + value = Credentials.from_dict(value) + else: + self.assert_isinstance(value, "credentials", Credentials) + self._property_credentials = value + + +class DeleteUserRequest(Request): + """ + Delete a new user manually. Only supported in on-premises deployments. This only removes the user's auth entry so that any references to the deleted user's ID will still have valid user information + + :param user: User ID + :type user: str + """ + + _service = "auth" + _action = "delete_user" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': {'user': {'description': 'User ID', 'type': 'string'}}, + 'required': ['user'], + 'type': 'object', + } + def __init__( + self, user, **kwargs): + super(DeleteUserRequest, self).__init__(**kwargs) + self.user = user + + @schema_property('user') + def user(self): + return self._property_user + + @user.setter + def user(self, value): + if value is None: + self._property_user = None + return + + self.assert_isinstance(value, "user", six.string_types) + self._property_user = value + + +class DeleteUserResponse(Response): + """ + Response of auth.delete_user endpoint. + + :param deleted: True if user was successfully deleted, False otherwise + :type deleted: bool + """ + _service = "auth" + _action = "delete_user" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'deleted': { + 'description': 'True if user was successfully deleted, False otherwise', + 'type': ['boolean', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, deleted=None, **kwargs): + super(DeleteUserResponse, self).__init__(**kwargs) + self.deleted = deleted + + @schema_property('deleted') + def deleted(self): + return self._property_deleted + + @deleted.setter + def deleted(self, value): + if value is None: + self._property_deleted = None + return + + self.assert_isinstance(value, "deleted", (bool,)) + self._property_deleted = value + + +class EditUserRequest(Request): + """ + Edit a users' auth data properties + + :param user: User ID + :type user: str + :param role: The new user's role within the company + :type role: str + """ + + _service = "auth" + _action = "edit_user" + _version = "1.9" + _schema = { + 'definitions': {}, + 'properties': { + 'role': { + 'description': "The new user's role within the company", + 'enum': ['admin', 'superuser', 'user', 'annotator'], + 'type': ['string', 'null'], + }, + 'user': {'description': 'User ID', 'type': ['string', 'null']}, + }, + 'type': 'object', + } + def __init__( + self, user=None, role=None, **kwargs): + super(EditUserRequest, self).__init__(**kwargs) + self.user = user + self.role = role + + @schema_property('user') + def user(self): + return self._property_user + + @user.setter + def user(self, value): + if value is None: + self._property_user = None + return + + self.assert_isinstance(value, "user", six.string_types) + self._property_user = value + + @schema_property('role') + def role(self): + return self._property_role + + @role.setter + def role(self, value): + if value is None: + self._property_role = None + return + + self.assert_isinstance(value, "role", six.string_types) + self._property_role = value + + +class EditUserResponse(Response): + """ + Response of auth.edit_user endpoint. + + :param updated: Number of users updated (0 or 1) + :type updated: float + :param fields: Updated fields names and values + :type fields: dict + """ + _service = "auth" + _action = "edit_user" + _version = "1.9" + + _schema = { + 'definitions': {}, + 'properties': { + 'fields': { + 'additionalProperties': True, + 'description': 'Updated fields names and values', + 'type': ['object', 'null'], + }, + 'updated': { + 'description': 'Number of users updated (0 or 1)', + 'enum': [0, 1], + 'type': ['number', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, updated=None, fields=None, **kwargs): + super(EditUserResponse, self).__init__(**kwargs) + self.updated = updated + self.fields = fields + + @schema_property('updated') + def updated(self): + return self._property_updated + + @updated.setter + def updated(self, value): + if value is None: + self._property_updated = None + return + + self.assert_isinstance(value, "updated", six.integer_types + (float,)) + self._property_updated = value + + @schema_property('fields') + def fields(self): + return self._property_fields + + @fields.setter + def fields(self, value): + if value is None: + self._property_fields = None + return + + self.assert_isinstance(value, "fields", (dict,)) + self._property_fields = value + + +class GetCredentialsRequest(Request): + """ + Returns all existing credential keys for the authenticated user. + Note: Only credential keys are returned. + + """ + + _service = "auth" + _action = "get_credentials" + _version = "1.5" + _schema = { + 'additionalProperties': False, + 'definitions': {}, + 'properties': {}, + 'type': 'object', + } + + +class GetCredentialsResponse(Response): + """ + Response of auth.get_credentials endpoint. + + :param credentials: List of credentials, each with an empty secret field. + :type credentials: Sequence[CredentialKey] + """ + _service = "auth" + _action = "get_credentials" + _version = "1.5" + + _schema = { + 'definitions': { + 'credential_key': { + 'properties': { + 'access_key': {'description': '', 'type': ['string', 'null']}, + }, + 'type': 'object', + }, + }, + 'properties': { + 'credentials': { + 'description': 'List of credentials, each with an empty secret field.', + 'items': {'$ref': '#/definitions/credential_key'}, + 'type': ['array', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, credentials=None, **kwargs): + super(GetCredentialsResponse, self).__init__(**kwargs) + self.credentials = credentials + + @schema_property('credentials') + def credentials(self): + return self._property_credentials + + @credentials.setter + def credentials(self, value): + if value is None: + self._property_credentials = None + return + + self.assert_isinstance(value, "credentials", (list, tuple)) + if any(isinstance(v, dict) for v in value): + value = [CredentialKey.from_dict(v) if isinstance(v, dict) else v for v in value] + else: + self.assert_isinstance(value, "credentials", CredentialKey, is_array=True) + self._property_credentials = value + + +class GetTaskTokenRequest(Request): + """ + Get a task-limited token based on supplied credentials (token or key/secret). + Intended for use by users who wish to run a task under limited credentials. + Returned token will be limited so that all operations can only be performed on the + specified task. + + :param task: Task ID + :type task: str + :param expiration_sec: Requested token expiration time in seconds. Not + guaranteed, might be overridden by the service + :type expiration_sec: int + """ + + _service = "auth" + _action = "get_task_token" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': { + 'expiration_sec': { + 'description': 'Requested token expiration time in seconds.\n Not guaranteed, might be overridden by the service', + 'type': 'integer', + }, + 'task': {'description': 'Task ID', 'type': 'string'}, + }, + 'required': ['task'], + 'type': 'object', + } + def __init__( + self, task, expiration_sec=None, **kwargs): + super(GetTaskTokenRequest, self).__init__(**kwargs) + self.task = task + self.expiration_sec = expiration_sec + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + @schema_property('expiration_sec') + def expiration_sec(self): + return self._property_expiration_sec + + @expiration_sec.setter + def expiration_sec(self, value): + if value is None: + self._property_expiration_sec = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "expiration_sec", six.integer_types) + self._property_expiration_sec = value + + +class GetTaskTokenResponse(Response): + """ + Response of auth.get_task_token endpoint. + + :param token: Token string + :type token: str + """ + _service = "auth" + _action = "get_task_token" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'token': {'description': 'Token string', 'type': ['string', 'null']}, + }, + 'type': 'object', + } + def __init__( + self, token=None, **kwargs): + super(GetTaskTokenResponse, self).__init__(**kwargs) + self.token = token + + @schema_property('token') + def token(self): + return self._property_token + + @token.setter + def token(self, value): + if value is None: + self._property_token = None + return + + self.assert_isinstance(value, "token", six.string_types) + self._property_token = value + + +class LoginRequest(Request): + """ + Get a token based on supplied credentials (key/secret). + Intended for use by users with key/secret credentials that wish to obtain a token + for use with other services. Token will be limited by the same permissions that + exist for the credentials used in this call. + + :param expiration_sec: Requested token expiration time in seconds. Not + guaranteed, might be overridden by the service + :type expiration_sec: int + """ + + _service = "auth" + _action = "login" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': { + 'expiration_sec': { + 'description': 'Requested token expiration time in seconds. \n Not guaranteed, might be overridden by the service', + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, expiration_sec=None, **kwargs): + super(LoginRequest, self).__init__(**kwargs) + self.expiration_sec = expiration_sec + + @schema_property('expiration_sec') + def expiration_sec(self): + return self._property_expiration_sec + + @expiration_sec.setter + def expiration_sec(self, value): + if value is None: + self._property_expiration_sec = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "expiration_sec", six.integer_types) + self._property_expiration_sec = value + + +class LoginResponse(Response): + """ + Response of auth.login endpoint. + + :param token: Token string + :type token: str + """ + _service = "auth" + _action = "login" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'token': {'description': 'Token string', 'type': ['string', 'null']}, + }, + 'type': 'object', + } + def __init__( + self, token=None, **kwargs): + super(LoginResponse, self).__init__(**kwargs) + self.token = token + + @schema_property('token') + def token(self): + return self._property_token + + @token.setter + def token(self, value): + if value is None: + self._property_token = None + return + + self.assert_isinstance(value, "token", six.string_types) + self._property_token = value + + +class ReloadConfigRequest(Request): + """ + Reload auth configuration (currently supports blocking tokens). For user roles associated with a company (Admin, Superuser) this call will only affect company-related configuration. + + """ + + _service = "auth" + _action = "reload_config" + _version = "1.5" + _schema = {'definitions': {}, 'properties': {}, 'type': 'object'} + + +class ReloadConfigResponse(Response): + """ + Response of auth.reload_config endpoint. + + """ + _service = "auth" + _action = "reload_config" + _version = "1.5" + + _schema = {'definitions': {}, 'properties': {}, 'type': 'object'} + + +class RevokeCredentialsRequest(Request): + """ + Revokes (and deletes) a set (key, secret) of credentials for + the authenticated user. + + :param access_key: Credentials key + :type access_key: str + """ + + _service = "auth" + _action = "revoke_credentials" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': { + 'access_key': { + 'description': 'Credentials key', + 'type': ['string', 'null'], + }, + }, + 'required': ['key_id'], + 'type': 'object', + } + def __init__( + self, access_key=None, **kwargs): + super(RevokeCredentialsRequest, self).__init__(**kwargs) + self.access_key = access_key + + @schema_property('access_key') + def access_key(self): + return self._property_access_key + + @access_key.setter + def access_key(self, value): + if value is None: + self._property_access_key = None + return + + self.assert_isinstance(value, "access_key", six.string_types) + self._property_access_key = value + + +class RevokeCredentialsResponse(Response): + """ + Response of auth.revoke_credentials endpoint. + + :param revoked: Number of credentials revoked + :type revoked: int + """ + _service = "auth" + _action = "revoke_credentials" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'revoked': { + 'description': 'Number of credentials revoked', + 'enum': [0, 1], + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, revoked=None, **kwargs): + super(RevokeCredentialsResponse, self).__init__(**kwargs) + self.revoked = revoked + + @schema_property('revoked') + def revoked(self): + return self._property_revoked + + @revoked.setter + def revoked(self, value): + if value is None: + self._property_revoked = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "revoked", six.integer_types) + self._property_revoked = value + + +class SetCredentialsRequest(Request): + """ + Set a secret_key for a given access_key. Only supported in on-premises deployments + + :param access_key: Credentials key. Must be identical to the user's ID (this is + the only value supported in on-premises deployments) + :type access_key: str + :param secret_key: New secret key + :type secret_key: str + """ + + _service = "auth" + _action = "set_credentials" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': { + 'access_key': { + 'description': "Credentials key. Must be identical to the user's ID (this is the only value supported in on-premises deployments)", + 'type': 'string', + }, + 'secret_key': {'description': 'New secret key', 'type': 'string'}, + }, + 'required': ['access_key', 'secret_key'], + 'type': 'object', + } + def __init__( + self, access_key, secret_key, **kwargs): + super(SetCredentialsRequest, self).__init__(**kwargs) + self.access_key = access_key + self.secret_key = secret_key + + @schema_property('access_key') + def access_key(self): + return self._property_access_key + + @access_key.setter + def access_key(self, value): + if value is None: + self._property_access_key = None + return + + self.assert_isinstance(value, "access_key", six.string_types) + self._property_access_key = value + + @schema_property('secret_key') + def secret_key(self): + return self._property_secret_key + + @secret_key.setter + def secret_key(self, value): + if value is None: + self._property_secret_key = None + return + + self.assert_isinstance(value, "secret_key", six.string_types) + self._property_secret_key = value + + +class SetCredentialsResponse(Response): + """ + Response of auth.set_credentials endpoint. + + :param set: True if secret was successfully set, False otherwise + :type set: bool + """ + _service = "auth" + _action = "set_credentials" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'set': { + 'description': 'True if secret was successfully set, False otherwise', + 'type': ['boolean', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, set=None, **kwargs): + super(SetCredentialsResponse, self).__init__(**kwargs) + self.set = set + + @schema_property('set') + def set(self): + return self._property_set + + @set.setter + def set(self, value): + if value is None: + self._property_set = None + return + + self.assert_isinstance(value, "set", (bool,)) + self._property_set = value + + +response_mapping = { + LoginRequest: LoginResponse, + GetTaskTokenRequest: GetTaskTokenResponse, + CreateCredentialsRequest: CreateCredentialsResponse, + GetCredentialsRequest: GetCredentialsResponse, + RevokeCredentialsRequest: RevokeCredentialsResponse, + SetCredentialsRequest: SetCredentialsResponse, + AddUserRequest: AddUserResponse, + DeleteUserRequest: DeleteUserResponse, + ReloadConfigRequest: ReloadConfigResponse, + EditUserRequest: EditUserResponse, +} diff --git a/trains/backend_api/services/v2_1/debug.py b/trains/backend_api/services/v2_1/debug.py new file mode 100644 index 00000000..c5839336 --- /dev/null +++ b/trains/backend_api/services/v2_1/debug.py @@ -0,0 +1,194 @@ +""" +debug service + +Debugging utilities +""" +import six +import types +from datetime import datetime +import enum + +from dateutil.parser import parse as parse_datetime + +from ....backend_api.session import Request, BatchRequest, Response, DataModel, NonStrictDataModel, CompoundRequest, schema_property, StringEnum + + +class ApiexRequest(Request): + """ + """ + + _service = "debug" + _action = "apiex" + _version = "1.5" + _schema = {'definitions': {}, 'properties': {}, 'required': [], 'type': 'object'} + + +class ApiexResponse(Response): + """ + Response of debug.apiex endpoint. + + """ + _service = "debug" + _action = "apiex" + _version = "1.5" + + _schema = {'definitions': {}, 'properties': {}, 'type': 'object'} + + +class EchoRequest(Request): + """ + Return request data + + """ + + _service = "debug" + _action = "echo" + _version = "1.5" + _schema = {'definitions': {}, 'properties': {}, 'type': 'object'} + + +class EchoResponse(Response): + """ + Response of debug.echo endpoint. + + """ + _service = "debug" + _action = "echo" + _version = "1.5" + + _schema = {'definitions': {}, 'properties': {}, 'type': 'object'} + + +class ExRequest(Request): + """ + """ + + _service = "debug" + _action = "ex" + _version = "1.5" + _schema = {'definitions': {}, 'properties': {}, 'required': [], 'type': 'object'} + + +class ExResponse(Response): + """ + Response of debug.ex endpoint. + + """ + _service = "debug" + _action = "ex" + _version = "1.5" + + _schema = {'definitions': {}, 'properties': {}, 'type': 'object'} + + +class PingRequest(Request): + """ + Return a message. Does not require authorization. + + """ + + _service = "debug" + _action = "ping" + _version = "1.5" + _schema = {'definitions': {}, 'properties': {}, 'type': 'object'} + + +class PingResponse(Response): + """ + Response of debug.ping endpoint. + + :param msg: A friendly message + :type msg: str + """ + _service = "debug" + _action = "ping" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'msg': { + 'description': 'A friendly message', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, msg=None, **kwargs): + super(PingResponse, self).__init__(**kwargs) + self.msg = msg + + @schema_property('msg') + def msg(self): + return self._property_msg + + @msg.setter + def msg(self, value): + if value is None: + self._property_msg = None + return + + self.assert_isinstance(value, "msg", six.string_types) + self._property_msg = value + + +class PingAuthRequest(Request): + """ + Return a message. Requires authorization. + + """ + + _service = "debug" + _action = "ping_auth" + _version = "1.5" + _schema = {'definitions': {}, 'properties': {}, 'type': 'object'} + + +class PingAuthResponse(Response): + """ + Response of debug.ping_auth endpoint. + + :param msg: A friendly message + :type msg: str + """ + _service = "debug" + _action = "ping_auth" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'msg': { + 'description': 'A friendly message', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, msg=None, **kwargs): + super(PingAuthResponse, self).__init__(**kwargs) + self.msg = msg + + @schema_property('msg') + def msg(self): + return self._property_msg + + @msg.setter + def msg(self, value): + if value is None: + self._property_msg = None + return + + self.assert_isinstance(value, "msg", six.string_types) + self._property_msg = value + + +response_mapping = { + EchoRequest: EchoResponse, + PingRequest: PingResponse, + PingAuthRequest: PingAuthResponse, + ApiexRequest: ApiexResponse, + ExRequest: ExResponse, +} diff --git a/trains/backend_api/services/v2_1/events.py b/trains/backend_api/services/v2_1/events.py new file mode 100644 index 00000000..a466281a --- /dev/null +++ b/trains/backend_api/services/v2_1/events.py @@ -0,0 +1,2846 @@ +""" +events service + +Provides an API for running tasks to report events collected by the system. +""" +import six +import types +from datetime import datetime +import enum + +from dateutil.parser import parse as parse_datetime + +from ....backend_api.session import Request, BatchRequest, Response, DataModel, NonStrictDataModel, CompoundRequest, schema_property, StringEnum + + +class MetricsScalarEvent(NonStrictDataModel): + """ + Used for reporting scalar metrics during training task + + :param timestamp: Epoch milliseconds UTC, will be set by the server if not set. + :type timestamp: float + :param task: Task ID (required) + :type task: str + :param iter: Iteration + :type iter: int + :param metric: Metric name, e.g. 'count', 'loss', 'accuracy' + :type metric: str + :param variant: E.g. 'class_1', 'total', 'average + :type variant: str + :param value: + :type value: float + """ + _schema = { + 'description': 'Used for reporting scalar metrics during training task', + 'properties': { + 'iter': {'description': 'Iteration', 'type': 'integer'}, + 'metric': { + 'description': "Metric name, e.g. 'count', 'loss', 'accuracy'", + 'type': 'string', + }, + 'task': {'description': 'Task ID (required)', 'type': 'string'}, + 'timestamp': { + 'description': 'Epoch milliseconds UTC, will be set by the server if not set.', + 'type': ['number', 'null'], + }, + 'type': { + 'const': 'training_stats_scalar', + 'description': 'training_stats_vector', + }, + 'value': {'description': '', 'type': 'number'}, + 'variant': { + 'description': "E.g. 'class_1', 'total', 'average", + 'type': 'string', + }, + }, + 'required': ['task', 'type'], + 'type': 'object', + } + def __init__( + self, task, timestamp=None, iter=None, metric=None, variant=None, value=None, **kwargs): + super(MetricsScalarEvent, self).__init__(**kwargs) + self.timestamp = timestamp + self.task = task + self.iter = iter + self.metric = metric + self.variant = variant + self.value = value + + @schema_property('timestamp') + def timestamp(self): + return self._property_timestamp + + @timestamp.setter + def timestamp(self, value): + if value is None: + self._property_timestamp = None + return + + self.assert_isinstance(value, "timestamp", six.integer_types + (float,)) + self._property_timestamp = value + + @schema_property('type') + def type(self): + return "training_stats_scalar" + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + @schema_property('iter') + def iter(self): + return self._property_iter + + @iter.setter + def iter(self, value): + if value is None: + self._property_iter = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "iter", six.integer_types) + self._property_iter = value + + @schema_property('metric') + def metric(self): + return self._property_metric + + @metric.setter + def metric(self, value): + if value is None: + self._property_metric = None + return + + self.assert_isinstance(value, "metric", six.string_types) + self._property_metric = value + + @schema_property('variant') + def variant(self): + return self._property_variant + + @variant.setter + def variant(self, value): + if value is None: + self._property_variant = None + return + + self.assert_isinstance(value, "variant", six.string_types) + self._property_variant = value + + @schema_property('value') + def value(self): + return self._property_value + + @value.setter + def value(self, value): + if value is None: + self._property_value = None + return + + self.assert_isinstance(value, "value", six.integer_types + (float,)) + self._property_value = value + + +class MetricsVectorEvent(NonStrictDataModel): + """ + Used for reporting vector metrics during training task + + :param timestamp: Epoch milliseconds UTC, will be set by the server if not set. + :type timestamp: float + :param task: Task ID (required) + :type task: str + :param iter: Iteration + :type iter: int + :param metric: Metric name, e.g. 'count', 'loss', 'accuracy' + :type metric: str + :param variant: E.g. 'class_1', 'total', 'average + :type variant: str + :param values: vector of float values + :type values: Sequence[float] + """ + _schema = { + 'description': 'Used for reporting vector metrics during training task', + 'properties': { + 'iter': {'description': 'Iteration', 'type': 'integer'}, + 'metric': { + 'description': "Metric name, e.g. 'count', 'loss', 'accuracy'", + 'type': 'string', + }, + 'task': {'description': 'Task ID (required)', 'type': 'string'}, + 'timestamp': { + 'description': 'Epoch milliseconds UTC, will be set by the server if not set.', + 'type': ['number', 'null'], + }, + 'type': { + 'const': 'training_stats_vector', + 'description': 'training_stats_vector', + }, + 'values': { + 'description': 'vector of float values', + 'items': {'type': 'number'}, + 'type': 'array', + }, + 'variant': { + 'description': "E.g. 'class_1', 'total', 'average", + 'type': 'string', + }, + }, + 'required': ['task'], + 'type': 'object', + } + def __init__( + self, task, timestamp=None, iter=None, metric=None, variant=None, values=None, **kwargs): + super(MetricsVectorEvent, self).__init__(**kwargs) + self.timestamp = timestamp + self.task = task + self.iter = iter + self.metric = metric + self.variant = variant + self.values = values + + @schema_property('timestamp') + def timestamp(self): + return self._property_timestamp + + @timestamp.setter + def timestamp(self, value): + if value is None: + self._property_timestamp = None + return + + self.assert_isinstance(value, "timestamp", six.integer_types + (float,)) + self._property_timestamp = value + + @schema_property('type') + def type(self): + return "training_stats_vector" + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + @schema_property('iter') + def iter(self): + return self._property_iter + + @iter.setter + def iter(self, value): + if value is None: + self._property_iter = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "iter", six.integer_types) + self._property_iter = value + + @schema_property('metric') + def metric(self): + return self._property_metric + + @metric.setter + def metric(self, value): + if value is None: + self._property_metric = None + return + + self.assert_isinstance(value, "metric", six.string_types) + self._property_metric = value + + @schema_property('variant') + def variant(self): + return self._property_variant + + @variant.setter + def variant(self, value): + if value is None: + self._property_variant = None + return + + self.assert_isinstance(value, "variant", six.string_types) + self._property_variant = value + + @schema_property('values') + def values(self): + return self._property_values + + @values.setter + def values(self, value): + if value is None: + self._property_values = None + return + + self.assert_isinstance(value, "values", (list, tuple)) + + self.assert_isinstance(value, "values", six.integer_types + (float,), is_array=True) + self._property_values = value + + +class MetricsImageEvent(NonStrictDataModel): + """ + An image or video was dumped to storage for debugging + + :param timestamp: Epoch milliseconds UTC, will be set by the server if not set. + :type timestamp: float + :param task: Task ID (required) + :type task: str + :param iter: Iteration + :type iter: int + :param metric: Metric name, e.g. 'count', 'loss', 'accuracy' + :type metric: str + :param variant: E.g. 'class_1', 'total', 'average + :type variant: str + :param key: File key + :type key: str + :param url: File URL + :type url: str + """ + _schema = { + 'description': 'An image or video was dumped to storage for debugging', + 'properties': { + 'iter': {'description': 'Iteration', 'type': 'integer'}, + 'key': {'description': 'File key', 'type': 'string'}, + 'metric': { + 'description': "Metric name, e.g. 'count', 'loss', 'accuracy'", + 'type': 'string', + }, + 'task': {'description': 'Task ID (required)', 'type': 'string'}, + 'timestamp': { + 'description': 'Epoch milliseconds UTC, will be set by the server if not set.', + 'type': ['number', 'null'], + }, + 'type': {'const': 'training_debug_image', 'description': ''}, + 'url': {'description': 'File URL', 'type': 'string'}, + 'variant': { + 'description': "E.g. 'class_1', 'total', 'average", + 'type': 'string', + }, + }, + 'required': ['task', 'type'], + 'type': 'object', + } + def __init__( + self, task, timestamp=None, iter=None, metric=None, variant=None, key=None, url=None, **kwargs): + super(MetricsImageEvent, self).__init__(**kwargs) + self.timestamp = timestamp + self.task = task + self.iter = iter + self.metric = metric + self.variant = variant + self.key = key + self.url = url + + @schema_property('timestamp') + def timestamp(self): + return self._property_timestamp + + @timestamp.setter + def timestamp(self, value): + if value is None: + self._property_timestamp = None + return + + self.assert_isinstance(value, "timestamp", six.integer_types + (float,)) + self._property_timestamp = value + + @schema_property('type') + def type(self): + return "training_debug_image" + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + @schema_property('iter') + def iter(self): + return self._property_iter + + @iter.setter + def iter(self, value): + if value is None: + self._property_iter = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "iter", six.integer_types) + self._property_iter = value + + @schema_property('metric') + def metric(self): + return self._property_metric + + @metric.setter + def metric(self, value): + if value is None: + self._property_metric = None + return + + self.assert_isinstance(value, "metric", six.string_types) + self._property_metric = value + + @schema_property('variant') + def variant(self): + return self._property_variant + + @variant.setter + def variant(self, value): + if value is None: + self._property_variant = None + return + + self.assert_isinstance(value, "variant", six.string_types) + self._property_variant = value + + @schema_property('key') + def key(self): + return self._property_key + + @key.setter + def key(self, value): + if value is None: + self._property_key = None + return + + self.assert_isinstance(value, "key", six.string_types) + self._property_key = value + + @schema_property('url') + def url(self): + return self._property_url + + @url.setter + def url(self, value): + if value is None: + self._property_url = None + return + + self.assert_isinstance(value, "url", six.string_types) + self._property_url = value + + +class MetricsPlotEvent(NonStrictDataModel): + """ + An entire plot (not single datapoint) and it's layout. + Used for plotting ROC curves, confidence matrices, etc. when evaluating the net. + + :param timestamp: Epoch milliseconds UTC, will be set by the server if not set. + :type timestamp: float + :param task: Task ID (required) + :type task: str + :param iter: Iteration + :type iter: int + :param metric: Metric name, e.g. 'count', 'loss', 'accuracy' + :type metric: str + :param variant: E.g. 'class_1', 'total', 'average + :type variant: str + :param plot_str: An entire plot (not single datapoint) and it's layout. Used + for plotting ROC curves, confidence matrices, etc. when evaluating the net. + :type plot_str: str + """ + _schema = { + 'description': " An entire plot (not single datapoint) and it's layout.\n Used for plotting ROC curves, confidence matrices, etc. when evaluating the net.", + 'properties': { + 'iter': {'description': 'Iteration', 'type': 'integer'}, + 'metric': { + 'description': "Metric name, e.g. 'count', 'loss', 'accuracy'", + 'type': 'string', + }, + 'plot_str': { + 'description': "An entire plot (not single datapoint) and it's layout.\n Used for plotting ROC curves, confidence matrices, etc. when evaluating the net.\n ", + 'type': 'string', + }, + 'task': {'description': 'Task ID (required)', 'type': 'string'}, + 'timestamp': { + 'description': 'Epoch milliseconds UTC, will be set by the server if not set.', + 'type': ['number', 'null'], + }, + 'type': {'const': 'plot', 'description': "'plot'"}, + 'variant': { + 'description': "E.g. 'class_1', 'total', 'average", + 'type': 'string', + }, + }, + 'required': ['task', 'type'], + 'type': 'object', + } + def __init__( + self, task, timestamp=None, iter=None, metric=None, variant=None, plot_str=None, **kwargs): + super(MetricsPlotEvent, self).__init__(**kwargs) + self.timestamp = timestamp + self.task = task + self.iter = iter + self.metric = metric + self.variant = variant + self.plot_str = plot_str + + @schema_property('timestamp') + def timestamp(self): + return self._property_timestamp + + @timestamp.setter + def timestamp(self, value): + if value is None: + self._property_timestamp = None + return + + self.assert_isinstance(value, "timestamp", six.integer_types + (float,)) + self._property_timestamp = value + + @schema_property('type') + def type(self): + return "plot" + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + @schema_property('iter') + def iter(self): + return self._property_iter + + @iter.setter + def iter(self, value): + if value is None: + self._property_iter = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "iter", six.integer_types) + self._property_iter = value + + @schema_property('metric') + def metric(self): + return self._property_metric + + @metric.setter + def metric(self, value): + if value is None: + self._property_metric = None + return + + self.assert_isinstance(value, "metric", six.string_types) + self._property_metric = value + + @schema_property('variant') + def variant(self): + return self._property_variant + + @variant.setter + def variant(self, value): + if value is None: + self._property_variant = None + return + + self.assert_isinstance(value, "variant", six.string_types) + self._property_variant = value + + @schema_property('plot_str') + def plot_str(self): + return self._property_plot_str + + @plot_str.setter + def plot_str(self, value): + if value is None: + self._property_plot_str = None + return + + self.assert_isinstance(value, "plot_str", six.string_types) + self._property_plot_str = value + + +class LogLevelEnum(StringEnum): + notset = "notset" + debug = "debug" + verbose = "verbose" + info = "info" + warn = "warn" + warning = "warning" + error = "error" + fatal = "fatal" + critical = "critical" + + +class TaskLogEvent(NonStrictDataModel): + """ + A log event associated with a task. + + :param timestamp: Epoch milliseconds UTC, will be set by the server if not set. + :type timestamp: float + :param task: Task ID (required) + :type task: str + :param level: Log level. + :type level: LogLevelEnum + :param worker: Name of machine running the task. + :type worker: str + :param msg: Log message. + :type msg: str + """ + _schema = { + 'description': 'A log event associated with a task.', + 'properties': { + 'level': { + '$ref': '#/definitions/log_level_enum', + 'description': 'Log level.', + }, + 'msg': {'description': 'Log message.', 'type': 'string'}, + 'task': {'description': 'Task ID (required)', 'type': 'string'}, + 'timestamp': { + 'description': 'Epoch milliseconds UTC, will be set by the server if not set.', + 'type': ['number', 'null'], + }, + 'type': {'const': 'log', 'description': "'log'"}, + 'worker': { + 'description': 'Name of machine running the task.', + 'type': 'string', + }, + }, + 'required': ['task', 'type'], + 'type': 'object', + } + def __init__( + self, task, timestamp=None, level=None, worker=None, msg=None, **kwargs): + super(TaskLogEvent, self).__init__(**kwargs) + self.timestamp = timestamp + self.task = task + self.level = level + self.worker = worker + self.msg = msg + + @schema_property('timestamp') + def timestamp(self): + return self._property_timestamp + + @timestamp.setter + def timestamp(self, value): + if value is None: + self._property_timestamp = None + return + + self.assert_isinstance(value, "timestamp", six.integer_types + (float,)) + self._property_timestamp = value + + @schema_property('type') + def type(self): + return "log" + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + @schema_property('level') + def level(self): + return self._property_level + + @level.setter + def level(self, value): + if value is None: + self._property_level = None + return + if isinstance(value, six.string_types): + try: + value = LogLevelEnum(value) + except ValueError: + pass + else: + self.assert_isinstance(value, "level", enum.Enum) + self._property_level = value + + @schema_property('worker') + def worker(self): + return self._property_worker + + @worker.setter + def worker(self, value): + if value is None: + self._property_worker = None + return + + self.assert_isinstance(value, "worker", six.string_types) + self._property_worker = value + + @schema_property('msg') + def msg(self): + return self._property_msg + + @msg.setter + def msg(self, value): + if value is None: + self._property_msg = None + return + + self.assert_isinstance(value, "msg", six.string_types) + self._property_msg = value + + +class AddRequest(CompoundRequest): + """ + Adds a single event + + """ + + _service = "events" + _action = "add" + _version = "1.5" + _item_prop_name = "event" + _schema = { + 'anyOf': [ + {'$ref': '#/definitions/metrics_scalar_event'}, + {'$ref': '#/definitions/metrics_vector_event'}, + {'$ref': '#/definitions/metrics_image_event'}, + {'$ref': '#/definitions/metrics_plot_event'}, + {'$ref': '#/definitions/task_log_event'}, + ], + 'definitions': { + 'log_level_enum': { + 'enum': [ + 'notset', + 'debug', + 'verbose', + 'info', + 'warn', + 'warning', + 'error', + 'fatal', + 'critical', + ], + 'type': 'string', + }, + 'metrics_image_event': { + 'description': 'An image or video was dumped to storage for debugging', + 'properties': { + 'iter': {'description': 'Iteration', 'type': 'integer'}, + 'key': {'description': 'File key', 'type': 'string'}, + 'metric': { + 'description': "Metric name, e.g. 'count', 'loss', 'accuracy'", + 'type': 'string', + }, + 'task': { + 'description': 'Task ID (required)', + 'type': 'string', + }, + 'timestamp': { + 'description': 'Epoch milliseconds UTC, will be set by the server if not set.', + 'type': ['number', 'null'], + }, + 'type': {'const': 'training_debug_image', 'description': ''}, + 'url': {'description': 'File URL', 'type': 'string'}, + 'variant': { + 'description': "E.g. 'class_1', 'total', 'average", + 'type': 'string', + }, + }, + 'required': ['task', 'type'], + 'type': 'object', + }, + 'metrics_plot_event': { + 'description': " An entire plot (not single datapoint) and it's layout.\n Used for plotting ROC curves, confidence matrices, etc. when evaluating the net.", + 'properties': { + 'iter': {'description': 'Iteration', 'type': 'integer'}, + 'metric': { + 'description': "Metric name, e.g. 'count', 'loss', 'accuracy'", + 'type': 'string', + }, + 'plot_str': { + 'description': "An entire plot (not single datapoint) and it's layout.\n Used for plotting ROC curves, confidence matrices, etc. when evaluating the net.\n ", + 'type': 'string', + }, + 'task': { + 'description': 'Task ID (required)', + 'type': 'string', + }, + 'timestamp': { + 'description': 'Epoch milliseconds UTC, will be set by the server if not set.', + 'type': ['number', 'null'], + }, + 'type': {'const': 'plot', 'description': "'plot'"}, + 'variant': { + 'description': "E.g. 'class_1', 'total', 'average", + 'type': 'string', + }, + }, + 'required': ['task', 'type'], + 'type': 'object', + }, + 'metrics_scalar_event': { + 'description': 'Used for reporting scalar metrics during training task', + 'properties': { + 'iter': {'description': 'Iteration', 'type': 'integer'}, + 'metric': { + 'description': "Metric name, e.g. 'count', 'loss', 'accuracy'", + 'type': 'string', + }, + 'task': { + 'description': 'Task ID (required)', + 'type': 'string', + }, + 'timestamp': { + 'description': 'Epoch milliseconds UTC, will be set by the server if not set.', + 'type': ['number', 'null'], + }, + 'type': { + 'const': 'training_stats_scalar', + 'description': 'training_stats_vector', + }, + 'value': {'description': '', 'type': 'number'}, + 'variant': { + 'description': "E.g. 'class_1', 'total', 'average", + 'type': 'string', + }, + }, + 'required': ['task', 'type'], + 'type': 'object', + }, + 'metrics_vector_event': { + 'description': 'Used for reporting vector metrics during training task', + 'properties': { + 'iter': {'description': 'Iteration', 'type': 'integer'}, + 'metric': { + 'description': "Metric name, e.g. 'count', 'loss', 'accuracy'", + 'type': 'string', + }, + 'task': { + 'description': 'Task ID (required)', + 'type': 'string', + }, + 'timestamp': { + 'description': 'Epoch milliseconds UTC, will be set by the server if not set.', + 'type': ['number', 'null'], + }, + 'type': { + 'const': 'training_stats_vector', + 'description': 'training_stats_vector', + }, + 'values': { + 'description': 'vector of float values', + 'items': {'type': 'number'}, + 'type': 'array', + }, + 'variant': { + 'description': "E.g. 'class_1', 'total', 'average", + 'type': 'string', + }, + }, + 'required': ['task'], + 'type': 'object', + }, + 'task_log_event': { + 'description': 'A log event associated with a task.', + 'properties': { + 'level': { + '$ref': '#/definitions/log_level_enum', + 'description': 'Log level.', + }, + 'msg': {'description': 'Log message.', 'type': 'string'}, + 'task': { + 'description': 'Task ID (required)', + 'type': 'string', + }, + 'timestamp': { + 'description': 'Epoch milliseconds UTC, will be set by the server if not set.', + 'type': ['number', 'null'], + }, + 'type': {'const': 'log', 'description': "'log'"}, + 'worker': { + 'description': 'Name of machine running the task.', + 'type': 'string', + }, + }, + 'required': ['task', 'type'], + 'type': 'object', + }, + }, + 'type': 'object', + } + def __init__(self, event): + super(AddRequest, self).__init__() + self.event = event + + @property + def event(self): + return self._property_event + + @event.setter + def event(self, value): + self.assert_isinstance(value, "event", (MetricsScalarEvent, MetricsVectorEvent, MetricsImageEvent, MetricsPlotEvent, TaskLogEvent)) + self._property_event = value + + +class AddResponse(Response): + """ + Response of events.add endpoint. + + """ + _service = "events" + _action = "add" + _version = "1.5" + + _schema = {'additionalProperties': True, 'definitions': {}, 'type': 'object'} + + +class AddBatchRequest(BatchRequest): + """ + Adds a batch of events in a single call. + + """ + + _service = "events" + _action = "add_batch" + _version = "1.5" + _batched_request_cls = AddRequest + + +class AddBatchResponse(Response): + """ + Response of events.add_batch endpoint. + + :param added: + :type added: int + :param errors: + :type errors: int + """ + _service = "events" + _action = "add_batch" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'added': {'type': ['integer', 'null']}, + 'errors': {'type': ['integer', 'null']}, + }, + 'type': 'object', + } + def __init__( + self, added=None, errors=None, **kwargs): + super(AddBatchResponse, self).__init__(**kwargs) + self.added = added + self.errors = errors + + @schema_property('added') + def added(self): + return self._property_added + + @added.setter + def added(self, value): + if value is None: + self._property_added = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "added", six.integer_types) + self._property_added = value + + @schema_property('errors') + def errors(self): + return self._property_errors + + @errors.setter + def errors(self, value): + if value is None: + self._property_errors = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "errors", six.integer_types) + self._property_errors = value + + +class DebugImagesRequest(Request): + """ + Get all debug images of a task + + :param task: Task ID + :type task: str + :param iters: Max number of latest iterations for which to return debug images + :type iters: int + :param scroll_id: Scroll ID of previous call (used for getting more results) + :type scroll_id: str + """ + + _service = "events" + _action = "debug_images" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': { + 'iters': { + 'description': 'Max number of latest iterations for which to return debug images', + 'type': 'integer', + }, + 'scroll_id': { + 'description': 'Scroll ID of previous call (used for getting more results)', + 'type': 'string', + }, + 'task': {'description': 'Task ID', 'type': 'string'}, + }, + 'required': ['task'], + 'type': 'object', + } + def __init__( + self, task, iters=None, scroll_id=None, **kwargs): + super(DebugImagesRequest, self).__init__(**kwargs) + self.task = task + self.iters = iters + self.scroll_id = scroll_id + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + @schema_property('iters') + def iters(self): + return self._property_iters + + @iters.setter + def iters(self, value): + if value is None: + self._property_iters = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "iters", six.integer_types) + self._property_iters = value + + @schema_property('scroll_id') + def scroll_id(self): + return self._property_scroll_id + + @scroll_id.setter + def scroll_id(self, value): + if value is None: + self._property_scroll_id = None + return + + self.assert_isinstance(value, "scroll_id", six.string_types) + self._property_scroll_id = value + + +class DebugImagesResponse(Response): + """ + Response of events.debug_images endpoint. + + :param task: Task ID + :type task: str + :param images: Images list + :type images: Sequence[dict] + :param returned: Number of results returned + :type returned: int + :param total: Total number of results available for this query + :type total: float + :param scroll_id: Scroll ID for getting more results + :type scroll_id: str + """ + _service = "events" + _action = "debug_images" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'images': { + 'description': 'Images list', + 'items': {'type': 'object'}, + 'type': ['array', 'null'], + }, + 'returned': { + 'description': 'Number of results returned', + 'type': ['integer', 'null'], + }, + 'scroll_id': { + 'description': 'Scroll ID for getting more results', + 'type': ['string', 'null'], + }, + 'task': {'description': 'Task ID', 'type': ['string', 'null']}, + 'total': { + 'description': 'Total number of results available for this query', + 'type': ['number', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, task=None, images=None, returned=None, total=None, scroll_id=None, **kwargs): + super(DebugImagesResponse, self).__init__(**kwargs) + self.task = task + self.images = images + self.returned = returned + self.total = total + self.scroll_id = scroll_id + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + @schema_property('images') + def images(self): + return self._property_images + + @images.setter + def images(self, value): + if value is None: + self._property_images = None + return + + self.assert_isinstance(value, "images", (list, tuple)) + + self.assert_isinstance(value, "images", (dict,), is_array=True) + self._property_images = value + + @schema_property('returned') + def returned(self): + return self._property_returned + + @returned.setter + def returned(self, value): + if value is None: + self._property_returned = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "returned", six.integer_types) + self._property_returned = value + + @schema_property('total') + def total(self): + return self._property_total + + @total.setter + def total(self, value): + if value is None: + self._property_total = None + return + + self.assert_isinstance(value, "total", six.integer_types + (float,)) + self._property_total = value + + @schema_property('scroll_id') + def scroll_id(self): + return self._property_scroll_id + + @scroll_id.setter + def scroll_id(self, value): + if value is None: + self._property_scroll_id = None + return + + self.assert_isinstance(value, "scroll_id", six.string_types) + self._property_scroll_id = value + + +class DeleteForTaskRequest(Request): + """ + Delete all task event. *This cannot be undone!* + + :param task: Task ID + :type task: str + """ + + _service = "events" + _action = "delete_for_task" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': {'task': {'description': 'Task ID', 'type': 'string'}}, + 'required': ['task'], + 'type': 'object', + } + def __init__( + self, task, **kwargs): + super(DeleteForTaskRequest, self).__init__(**kwargs) + self.task = task + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + +class DeleteForTaskResponse(Response): + """ + Response of events.delete_for_task endpoint. + + :param deleted: Number of deleted events + :type deleted: bool + """ + _service = "events" + _action = "delete_for_task" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'deleted': { + 'description': 'Number of deleted events', + 'type': ['boolean', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, deleted=None, **kwargs): + super(DeleteForTaskResponse, self).__init__(**kwargs) + self.deleted = deleted + + @schema_property('deleted') + def deleted(self): + return self._property_deleted + + @deleted.setter + def deleted(self, value): + if value is None: + self._property_deleted = None + return + + self.assert_isinstance(value, "deleted", (bool,)) + self._property_deleted = value + + +class DownloadTaskLogRequest(Request): + """ + Get an attachment containing the task's log + + :param task: Task ID + :type task: str + :param line_type: Line format type + :type line_type: str + :param line_format: Line string format. Used if the line type is 'text' + :type line_format: str + """ + + _service = "events" + _action = "download_task_log" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': { + 'line_format': { + 'default': '{asctime} {worker} {level} {msg}', + 'description': "Line string format. Used if the line type is 'text'", + 'type': 'string', + }, + 'line_type': { + 'description': 'Line format type', + 'enum': ['json', 'text'], + 'type': 'string', + }, + 'task': {'description': 'Task ID', 'type': 'string'}, + }, + 'required': ['task'], + 'type': 'object', + } + def __init__( + self, task, line_type=None, line_format="{asctime} {worker} {level} {msg}", **kwargs): + super(DownloadTaskLogRequest, self).__init__(**kwargs) + self.task = task + self.line_type = line_type + self.line_format = line_format + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + @schema_property('line_type') + def line_type(self): + return self._property_line_type + + @line_type.setter + def line_type(self, value): + if value is None: + self._property_line_type = None + return + + self.assert_isinstance(value, "line_type", six.string_types) + self._property_line_type = value + + @schema_property('line_format') + def line_format(self): + return self._property_line_format + + @line_format.setter + def line_format(self, value): + if value is None: + self._property_line_format = None + return + + self.assert_isinstance(value, "line_format", six.string_types) + self._property_line_format = value + + +class DownloadTaskLogResponse(Response): + """ + Response of events.download_task_log endpoint. + + """ + _service = "events" + _action = "download_task_log" + _version = "1.5" + + _schema = {'definitions': {}, 'type': 'string'} + + +class GetMultiTaskPlotsRequest(Request): + """ + Get 'plot' events for the given tasks + + :param tasks: List of task IDs + :type tasks: Sequence[str] + :param iters: Max number of latest iterations for which to return debug images + :type iters: int + :param scroll_id: Scroll ID of previous call (used for getting more results) + :type scroll_id: str + """ + + _service = "events" + _action = "get_multi_task_plots" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': { + 'iters': { + 'description': 'Max number of latest iterations for which to return debug images', + 'type': 'integer', + }, + 'scroll_id': { + 'description': 'Scroll ID of previous call (used for getting more results)', + 'type': 'string', + }, + 'tasks': { + 'description': 'List of task IDs', + 'items': {'description': 'Task ID', 'type': 'string'}, + 'type': 'array', + }, + }, + 'required': ['tasks'], + 'type': 'object', + } + def __init__( + self, tasks, iters=None, scroll_id=None, **kwargs): + super(GetMultiTaskPlotsRequest, self).__init__(**kwargs) + self.tasks = tasks + self.iters = iters + self.scroll_id = scroll_id + + @schema_property('tasks') + def tasks(self): + return self._property_tasks + + @tasks.setter + def tasks(self, value): + if value is None: + self._property_tasks = None + return + + self.assert_isinstance(value, "tasks", (list, tuple)) + + self.assert_isinstance(value, "tasks", six.string_types, is_array=True) + self._property_tasks = value + + @schema_property('iters') + def iters(self): + return self._property_iters + + @iters.setter + def iters(self, value): + if value is None: + self._property_iters = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "iters", six.integer_types) + self._property_iters = value + + @schema_property('scroll_id') + def scroll_id(self): + return self._property_scroll_id + + @scroll_id.setter + def scroll_id(self, value): + if value is None: + self._property_scroll_id = None + return + + self.assert_isinstance(value, "scroll_id", six.string_types) + self._property_scroll_id = value + + +class GetMultiTaskPlotsResponse(Response): + """ + Response of events.get_multi_task_plots endpoint. + + :param plots: Plots mapping (keyed by task name) + :type plots: dict + :param returned: Number of results returned + :type returned: int + :param total: Total number of results available for this query + :type total: float + :param scroll_id: Scroll ID for getting more results + :type scroll_id: str + """ + _service = "events" + _action = "get_multi_task_plots" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'plots': { + 'description': 'Plots mapping (keyed by task name)', + 'type': ['object', 'null'], + }, + 'returned': { + 'description': 'Number of results returned', + 'type': ['integer', 'null'], + }, + 'scroll_id': { + 'description': 'Scroll ID for getting more results', + 'type': ['string', 'null'], + }, + 'total': { + 'description': 'Total number of results available for this query', + 'type': ['number', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, plots=None, returned=None, total=None, scroll_id=None, **kwargs): + super(GetMultiTaskPlotsResponse, self).__init__(**kwargs) + self.plots = plots + self.returned = returned + self.total = total + self.scroll_id = scroll_id + + @schema_property('plots') + def plots(self): + return self._property_plots + + @plots.setter + def plots(self, value): + if value is None: + self._property_plots = None + return + + self.assert_isinstance(value, "plots", (dict,)) + self._property_plots = value + + @schema_property('returned') + def returned(self): + return self._property_returned + + @returned.setter + def returned(self, value): + if value is None: + self._property_returned = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "returned", six.integer_types) + self._property_returned = value + + @schema_property('total') + def total(self): + return self._property_total + + @total.setter + def total(self, value): + if value is None: + self._property_total = None + return + + self.assert_isinstance(value, "total", six.integer_types + (float,)) + self._property_total = value + + @schema_property('scroll_id') + def scroll_id(self): + return self._property_scroll_id + + @scroll_id.setter + def scroll_id(self, value): + if value is None: + self._property_scroll_id = None + return + + self.assert_isinstance(value, "scroll_id", six.string_types) + self._property_scroll_id = value + + +class GetScalarMetricDataRequest(Request): + """ + get scalar metric data for task + + :param task: task ID + :type task: str + :param metric: type of metric + :type metric: str + """ + + _service = "events" + _action = "get_scalar_metric_data" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': { + 'metric': {'description': 'type of metric', 'type': ['string', 'null']}, + 'task': {'description': 'task ID', 'type': ['string', 'null']}, + }, + 'type': 'object', + } + def __init__( + self, task=None, metric=None, **kwargs): + super(GetScalarMetricDataRequest, self).__init__(**kwargs) + self.task = task + self.metric = metric + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + @schema_property('metric') + def metric(self): + return self._property_metric + + @metric.setter + def metric(self, value): + if value is None: + self._property_metric = None + return + + self.assert_isinstance(value, "metric", six.string_types) + self._property_metric = value + + +class GetScalarMetricDataResponse(Response): + """ + Response of events.get_scalar_metric_data endpoint. + + :param events: task scalar metric events + :type events: Sequence[dict] + :param returned: amount of events returned + :type returned: int + :param total: amount of events in task + :type total: int + :param scroll_id: Scroll ID of previous call (used for getting more results) + :type scroll_id: str + """ + _service = "events" + _action = "get_scalar_metric_data" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'events': { + 'description': 'task scalar metric events', + 'items': {'type': 'object'}, + 'type': ['array', 'null'], + }, + 'returned': { + 'description': 'amount of events returned', + 'type': ['integer', 'null'], + }, + 'scroll_id': { + 'description': 'Scroll ID of previous call (used for getting more results)', + 'type': ['string', 'null'], + }, + 'total': { + 'description': 'amount of events in task', + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, events=None, returned=None, total=None, scroll_id=None, **kwargs): + super(GetScalarMetricDataResponse, self).__init__(**kwargs) + self.events = events + self.returned = returned + self.total = total + self.scroll_id = scroll_id + + @schema_property('events') + def events(self): + return self._property_events + + @events.setter + def events(self, value): + if value is None: + self._property_events = None + return + + self.assert_isinstance(value, "events", (list, tuple)) + + self.assert_isinstance(value, "events", (dict,), is_array=True) + self._property_events = value + + @schema_property('returned') + def returned(self): + return self._property_returned + + @returned.setter + def returned(self, value): + if value is None: + self._property_returned = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "returned", six.integer_types) + self._property_returned = value + + @schema_property('total') + def total(self): + return self._property_total + + @total.setter + def total(self, value): + if value is None: + self._property_total = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "total", six.integer_types) + self._property_total = value + + @schema_property('scroll_id') + def scroll_id(self): + return self._property_scroll_id + + @scroll_id.setter + def scroll_id(self, value): + if value is None: + self._property_scroll_id = None + return + + self.assert_isinstance(value, "scroll_id", six.string_types) + self._property_scroll_id = value + + +class GetScalarMetricsAndVariantsRequest(Request): + """ + get task scalar metrics and variants + + :param task: task ID + :type task: str + """ + + _service = "events" + _action = "get_scalar_metrics_and_variants" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': {'task': {'description': 'task ID', 'type': 'string'}}, + 'required': ['task'], + 'type': 'object', + } + def __init__( + self, task, **kwargs): + super(GetScalarMetricsAndVariantsRequest, self).__init__(**kwargs) + self.task = task + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + +class GetScalarMetricsAndVariantsResponse(Response): + """ + Response of events.get_scalar_metrics_and_variants endpoint. + + :param metrics: + :type metrics: dict + """ + _service = "events" + _action = "get_scalar_metrics_and_variants" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'metrics': {'additionalProperties': True, 'type': ['object', 'null']}, + }, + 'type': 'object', + } + def __init__( + self, metrics=None, **kwargs): + super(GetScalarMetricsAndVariantsResponse, self).__init__(**kwargs) + self.metrics = metrics + + @schema_property('metrics') + def metrics(self): + return self._property_metrics + + @metrics.setter + def metrics(self, value): + if value is None: + self._property_metrics = None + return + + self.assert_isinstance(value, "metrics", (dict,)) + self._property_metrics = value + + +class GetTaskEventsRequest(Request): + """ + Scroll through task events, sorted by timestamp + + :param task: Task ID + :type task: str + :param order: 'asc' (default) or 'desc'. + :type order: str + :param scroll_id: Pass this value on next call to get next page + :type scroll_id: str + :param batch_size: Number of events to return each time + :type batch_size: int + """ + + _service = "events" + _action = "get_task_events" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': { + 'batch_size': { + 'description': 'Number of events to return each time', + 'type': 'integer', + }, + 'order': { + 'description': "'asc' (default) or 'desc'.", + 'enum': ['asc', 'desc'], + 'type': 'string', + }, + 'scroll_id': { + 'description': 'Pass this value on next call to get next page', + 'type': 'string', + }, + 'task': {'description': 'Task ID', 'type': 'string'}, + }, + 'required': ['task'], + 'type': 'object', + } + def __init__( + self, task, order=None, scroll_id=None, batch_size=None, **kwargs): + super(GetTaskEventsRequest, self).__init__(**kwargs) + self.task = task + self.order = order + self.scroll_id = scroll_id + self.batch_size = batch_size + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + @schema_property('order') + def order(self): + return self._property_order + + @order.setter + def order(self, value): + if value is None: + self._property_order = None + return + + self.assert_isinstance(value, "order", six.string_types) + self._property_order = value + + @schema_property('scroll_id') + def scroll_id(self): + return self._property_scroll_id + + @scroll_id.setter + def scroll_id(self, value): + if value is None: + self._property_scroll_id = None + return + + self.assert_isinstance(value, "scroll_id", six.string_types) + self._property_scroll_id = value + + @schema_property('batch_size') + def batch_size(self): + return self._property_batch_size + + @batch_size.setter + def batch_size(self, value): + if value is None: + self._property_batch_size = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "batch_size", six.integer_types) + self._property_batch_size = value + + +class GetTaskEventsResponse(Response): + """ + Response of events.get_task_events endpoint. + + :param events: Events list + :type events: Sequence[dict] + :param returned: Number of results returned + :type returned: int + :param total: Total number of results available for this query + :type total: float + :param scroll_id: Scroll ID for getting more results + :type scroll_id: str + """ + _service = "events" + _action = "get_task_events" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'events': { + 'description': 'Events list', + 'items': {'type': 'object'}, + 'type': ['array', 'null'], + }, + 'returned': { + 'description': 'Number of results returned', + 'type': ['integer', 'null'], + }, + 'scroll_id': { + 'description': 'Scroll ID for getting more results', + 'type': ['string', 'null'], + }, + 'total': { + 'description': 'Total number of results available for this query', + 'type': ['number', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, events=None, returned=None, total=None, scroll_id=None, **kwargs): + super(GetTaskEventsResponse, self).__init__(**kwargs) + self.events = events + self.returned = returned + self.total = total + self.scroll_id = scroll_id + + @schema_property('events') + def events(self): + return self._property_events + + @events.setter + def events(self, value): + if value is None: + self._property_events = None + return + + self.assert_isinstance(value, "events", (list, tuple)) + + self.assert_isinstance(value, "events", (dict,), is_array=True) + self._property_events = value + + @schema_property('returned') + def returned(self): + return self._property_returned + + @returned.setter + def returned(self, value): + if value is None: + self._property_returned = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "returned", six.integer_types) + self._property_returned = value + + @schema_property('total') + def total(self): + return self._property_total + + @total.setter + def total(self, value): + if value is None: + self._property_total = None + return + + self.assert_isinstance(value, "total", six.integer_types + (float,)) + self._property_total = value + + @schema_property('scroll_id') + def scroll_id(self): + return self._property_scroll_id + + @scroll_id.setter + def scroll_id(self, value): + if value is None: + self._property_scroll_id = None + return + + self.assert_isinstance(value, "scroll_id", six.string_types) + self._property_scroll_id = value + + +class GetTaskLatestScalarValuesRequest(Request): + """ + Get the tasks's latest scalar values + + :param task: Task ID + :type task: str + """ + + _service = "events" + _action = "get_task_latest_scalar_values" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': {'task': {'description': 'Task ID', 'type': 'string'}}, + 'required': ['task'], + 'type': 'object', + } + def __init__( + self, task, **kwargs): + super(GetTaskLatestScalarValuesRequest, self).__init__(**kwargs) + self.task = task + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + +class GetTaskLatestScalarValuesResponse(Response): + """ + Response of events.get_task_latest_scalar_values endpoint. + + :param metrics: + :type metrics: Sequence[dict] + """ + _service = "events" + _action = "get_task_latest_scalar_values" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'metrics': { + 'items': { + 'properties': { + 'name': {'description': 'Metric name', 'type': 'string'}, + 'variants': { + 'items': { + 'properties': { + 'last_100_value': { + 'description': 'Average of 100 last reported values', + 'type': 'number', + }, + 'last_value': { + 'description': 'Last reported value', + 'type': 'number', + }, + 'name': { + 'description': 'Variant name', + 'type': 'string', + }, + }, + 'type': 'object', + }, + 'type': 'array', + }, + }, + 'type': 'object', + }, + 'type': ['array', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, metrics=None, **kwargs): + super(GetTaskLatestScalarValuesResponse, self).__init__(**kwargs) + self.metrics = metrics + + @schema_property('metrics') + def metrics(self): + return self._property_metrics + + @metrics.setter + def metrics(self, value): + if value is None: + self._property_metrics = None + return + + self.assert_isinstance(value, "metrics", (list, tuple)) + + self.assert_isinstance(value, "metrics", (dict,), is_array=True) + self._property_metrics = value + + +class GetTaskLogRequest(Request): + """ + Get all 'log' events for this task + + :param task: Task ID + :type task: str + :param order: Timestamp order in which log events will be returned (defaults to + ascending) + :type order: str + :param from: Where will the log entries be taken from (default to the head of + the log) + :type from: str + :param scroll_id: + :type scroll_id: str + :param batch_size: + :type batch_size: int + """ + + _service = "events" + _action = "get_task_log" + _version = "1.7" + _schema = { + 'definitions': {}, + 'properties': { + 'batch_size': {'description': '', 'type': 'integer'}, + 'from': { + 'description': 'Where will the log entries be taken from (default to the head of the log)', + 'enum': ['head', 'tail'], + 'type': 'string', + }, + 'order': { + 'description': 'Timestamp order in which log events will be returned (defaults to ascending)', + 'enum': ['asc', 'desc'], + 'type': 'string', + }, + 'scroll_id': {'description': '', 'type': 'string'}, + 'task': {'description': 'Task ID', 'type': 'string'}, + }, + 'required': ['task'], + 'type': 'object', + } + def __init__( + self, task, order=None, from_=None, scroll_id=None, batch_size=None, **kwargs): + super(GetTaskLogRequest, self).__init__(**kwargs) + self.task = task + self.order = order + self.from_ = from_ + self.scroll_id = scroll_id + self.batch_size = batch_size + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + @schema_property('order') + def order(self): + return self._property_order + + @order.setter + def order(self, value): + if value is None: + self._property_order = None + return + + self.assert_isinstance(value, "order", six.string_types) + self._property_order = value + + @schema_property('from') + def from_(self): + return self._property_from_ + + @from_.setter + def from_(self, value): + if value is None: + self._property_from_ = None + return + + self.assert_isinstance(value, "from_", six.string_types) + self._property_from_ = value + + @schema_property('scroll_id') + def scroll_id(self): + return self._property_scroll_id + + @scroll_id.setter + def scroll_id(self, value): + if value is None: + self._property_scroll_id = None + return + + self.assert_isinstance(value, "scroll_id", six.string_types) + self._property_scroll_id = value + + @schema_property('batch_size') + def batch_size(self): + return self._property_batch_size + + @batch_size.setter + def batch_size(self, value): + if value is None: + self._property_batch_size = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "batch_size", six.integer_types) + self._property_batch_size = value + + +class GetTaskLogResponse(Response): + """ + Response of events.get_task_log endpoint. + + :param events: Log items list + :type events: Sequence[dict] + :param returned: Number of results returned + :type returned: int + :param total: Total number of results available for this query + :type total: float + :param scroll_id: Scroll ID for getting more results + :type scroll_id: str + """ + _service = "events" + _action = "get_task_log" + _version = "1.7" + + _schema = { + 'definitions': {}, + 'properties': { + 'events': { + 'description': 'Log items list', + 'items': {'type': 'object'}, + 'type': ['array', 'null'], + }, + 'returned': { + 'description': 'Number of results returned', + 'type': ['integer', 'null'], + }, + 'scroll_id': { + 'description': 'Scroll ID for getting more results', + 'type': ['string', 'null'], + }, + 'total': { + 'description': 'Total number of results available for this query', + 'type': ['number', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, events=None, returned=None, total=None, scroll_id=None, **kwargs): + super(GetTaskLogResponse, self).__init__(**kwargs) + self.events = events + self.returned = returned + self.total = total + self.scroll_id = scroll_id + + @schema_property('events') + def events(self): + return self._property_events + + @events.setter + def events(self, value): + if value is None: + self._property_events = None + return + + self.assert_isinstance(value, "events", (list, tuple)) + + self.assert_isinstance(value, "events", (dict,), is_array=True) + self._property_events = value + + @schema_property('returned') + def returned(self): + return self._property_returned + + @returned.setter + def returned(self, value): + if value is None: + self._property_returned = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "returned", six.integer_types) + self._property_returned = value + + @schema_property('total') + def total(self): + return self._property_total + + @total.setter + def total(self, value): + if value is None: + self._property_total = None + return + + self.assert_isinstance(value, "total", six.integer_types + (float,)) + self._property_total = value + + @schema_property('scroll_id') + def scroll_id(self): + return self._property_scroll_id + + @scroll_id.setter + def scroll_id(self, value): + if value is None: + self._property_scroll_id = None + return + + self.assert_isinstance(value, "scroll_id", six.string_types) + self._property_scroll_id = value + + +class GetTaskPlotsRequest(Request): + """ + Get all 'plot' events for this task + + :param task: Task ID + :type task: str + :param iters: Max number of latest iterations for which to return debug images + :type iters: int + :param scroll_id: Scroll ID of previous call (used for getting more results) + :type scroll_id: str + """ + + _service = "events" + _action = "get_task_plots" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': { + 'iters': { + 'description': 'Max number of latest iterations for which to return debug images', + 'type': 'integer', + }, + 'scroll_id': { + 'description': 'Scroll ID of previous call (used for getting more results)', + 'type': 'string', + }, + 'task': {'description': 'Task ID', 'type': 'string'}, + }, + 'required': ['task'], + 'type': 'object', + } + def __init__( + self, task, iters=None, scroll_id=None, **kwargs): + super(GetTaskPlotsRequest, self).__init__(**kwargs) + self.task = task + self.iters = iters + self.scroll_id = scroll_id + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + @schema_property('iters') + def iters(self): + return self._property_iters + + @iters.setter + def iters(self, value): + if value is None: + self._property_iters = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "iters", six.integer_types) + self._property_iters = value + + @schema_property('scroll_id') + def scroll_id(self): + return self._property_scroll_id + + @scroll_id.setter + def scroll_id(self, value): + if value is None: + self._property_scroll_id = None + return + + self.assert_isinstance(value, "scroll_id", six.string_types) + self._property_scroll_id = value + + +class GetTaskPlotsResponse(Response): + """ + Response of events.get_task_plots endpoint. + + :param plots: Plots list + :type plots: Sequence[dict] + :param returned: Number of results returned + :type returned: int + :param total: Total number of results available for this query + :type total: float + :param scroll_id: Scroll ID for getting more results + :type scroll_id: str + """ + _service = "events" + _action = "get_task_plots" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'plots': { + 'description': 'Plots list', + 'items': {'type': 'object'}, + 'type': ['array', 'null'], + }, + 'returned': { + 'description': 'Number of results returned', + 'type': ['integer', 'null'], + }, + 'scroll_id': { + 'description': 'Scroll ID for getting more results', + 'type': ['string', 'null'], + }, + 'total': { + 'description': 'Total number of results available for this query', + 'type': ['number', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, plots=None, returned=None, total=None, scroll_id=None, **kwargs): + super(GetTaskPlotsResponse, self).__init__(**kwargs) + self.plots = plots + self.returned = returned + self.total = total + self.scroll_id = scroll_id + + @schema_property('plots') + def plots(self): + return self._property_plots + + @plots.setter + def plots(self, value): + if value is None: + self._property_plots = None + return + + self.assert_isinstance(value, "plots", (list, tuple)) + + self.assert_isinstance(value, "plots", (dict,), is_array=True) + self._property_plots = value + + @schema_property('returned') + def returned(self): + return self._property_returned + + @returned.setter + def returned(self, value): + if value is None: + self._property_returned = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "returned", six.integer_types) + self._property_returned = value + + @schema_property('total') + def total(self): + return self._property_total + + @total.setter + def total(self, value): + if value is None: + self._property_total = None + return + + self.assert_isinstance(value, "total", six.integer_types + (float,)) + self._property_total = value + + @schema_property('scroll_id') + def scroll_id(self): + return self._property_scroll_id + + @scroll_id.setter + def scroll_id(self, value): + if value is None: + self._property_scroll_id = None + return + + self.assert_isinstance(value, "scroll_id", six.string_types) + self._property_scroll_id = value + + +class GetVectorMetricsAndVariantsRequest(Request): + """ + :param task: Task ID + :type task: str + """ + + _service = "events" + _action = "get_vector_metrics_and_variants" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': {'task': {'description': 'Task ID', 'type': 'string'}}, + 'required': ['task'], + 'type': 'object', + } + def __init__( + self, task, **kwargs): + super(GetVectorMetricsAndVariantsRequest, self).__init__(**kwargs) + self.task = task + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + +class GetVectorMetricsAndVariantsResponse(Response): + """ + Response of events.get_vector_metrics_and_variants endpoint. + + :param metrics: + :type metrics: Sequence[dict] + """ + _service = "events" + _action = "get_vector_metrics_and_variants" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'metrics': { + 'description': '', + 'items': {'type': 'object'}, + 'type': ['array', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, metrics=None, **kwargs): + super(GetVectorMetricsAndVariantsResponse, self).__init__(**kwargs) + self.metrics = metrics + + @schema_property('metrics') + def metrics(self): + return self._property_metrics + + @metrics.setter + def metrics(self, value): + if value is None: + self._property_metrics = None + return + + self.assert_isinstance(value, "metrics", (list, tuple)) + + self.assert_isinstance(value, "metrics", (dict,), is_array=True) + self._property_metrics = value + + +class MultiTaskScalarMetricsIterHistogramRequest(Request): + """ + Used to compare scalar stats histogram of multiple tasks + + :param tasks: List of task Task IDs + :type tasks: Sequence[str] + """ + + _service = "events" + _action = "multi_task_scalar_metrics_iter_histogram" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': { + 'tasks': { + 'description': 'List of task Task IDs', + 'items': { + 'description': 'List of task Task IDs', + 'type': 'string', + }, + 'type': 'array', + }, + }, + 'required': ['tasks'], + 'type': 'object', + } + def __init__( + self, tasks, **kwargs): + super(MultiTaskScalarMetricsIterHistogramRequest, self).__init__(**kwargs) + self.tasks = tasks + + @schema_property('tasks') + def tasks(self): + return self._property_tasks + + @tasks.setter + def tasks(self, value): + if value is None: + self._property_tasks = None + return + + self.assert_isinstance(value, "tasks", (list, tuple)) + + self.assert_isinstance(value, "tasks", six.string_types, is_array=True) + self._property_tasks = value + + +class MultiTaskScalarMetricsIterHistogramResponse(Response): + """ + Response of events.multi_task_scalar_metrics_iter_histogram endpoint. + + """ + _service = "events" + _action = "multi_task_scalar_metrics_iter_histogram" + _version = "1.5" + + _schema = {'additionalProperties': True, 'definitions': {}, 'type': 'object'} + + +class ScalarMetricsIterHistogramRequest(Request): + """ + Get histogram data of all the vector metrics and variants in the task + + :param task: Task ID + :type task: str + """ + + _service = "events" + _action = "scalar_metrics_iter_histogram" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': {'task': {'description': 'Task ID', 'type': 'string'}}, + 'required': ['task'], + 'type': 'object', + } + def __init__( + self, task, **kwargs): + super(ScalarMetricsIterHistogramRequest, self).__init__(**kwargs) + self.task = task + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + +class ScalarMetricsIterHistogramResponse(Response): + """ + Response of events.scalar_metrics_iter_histogram endpoint. + + :param images: + :type images: Sequence[dict] + """ + _service = "events" + _action = "scalar_metrics_iter_histogram" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'images': {'items': {'type': 'object'}, 'type': ['array', 'null']}, + }, + 'type': 'object', + } + def __init__( + self, images=None, **kwargs): + super(ScalarMetricsIterHistogramResponse, self).__init__(**kwargs) + self.images = images + + @schema_property('images') + def images(self): + return self._property_images + + @images.setter + def images(self, value): + if value is None: + self._property_images = None + return + + self.assert_isinstance(value, "images", (list, tuple)) + + self.assert_isinstance(value, "images", (dict,), is_array=True) + self._property_images = value + + +class VectorMetricsIterHistogramRequest(Request): + """ + Get histogram data of all the scalar metrics and variants in the task + + :param task: Task ID + :type task: str + :param metric: + :type metric: str + :param variant: + :type variant: str + """ + + _service = "events" + _action = "vector_metrics_iter_histogram" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': { + 'metric': {'description': '', 'type': 'string'}, + 'task': {'description': 'Task ID', 'type': 'string'}, + 'variant': {'description': '', 'type': 'string'}, + }, + 'required': ['task', 'metric', 'variant'], + 'type': 'object', + } + def __init__( + self, task, metric, variant, **kwargs): + super(VectorMetricsIterHistogramRequest, self).__init__(**kwargs) + self.task = task + self.metric = metric + self.variant = variant + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + @schema_property('metric') + def metric(self): + return self._property_metric + + @metric.setter + def metric(self, value): + if value is None: + self._property_metric = None + return + + self.assert_isinstance(value, "metric", six.string_types) + self._property_metric = value + + @schema_property('variant') + def variant(self): + return self._property_variant + + @variant.setter + def variant(self, value): + if value is None: + self._property_variant = None + return + + self.assert_isinstance(value, "variant", six.string_types) + self._property_variant = value + + +class VectorMetricsIterHistogramResponse(Response): + """ + Response of events.vector_metrics_iter_histogram endpoint. + + :param images: + :type images: Sequence[dict] + """ + _service = "events" + _action = "vector_metrics_iter_histogram" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'images': {'items': {'type': 'object'}, 'type': ['array', 'null']}, + }, + 'type': 'object', + } + def __init__( + self, images=None, **kwargs): + super(VectorMetricsIterHistogramResponse, self).__init__(**kwargs) + self.images = images + + @schema_property('images') + def images(self): + return self._property_images + + @images.setter + def images(self, value): + if value is None: + self._property_images = None + return + + self.assert_isinstance(value, "images", (list, tuple)) + + self.assert_isinstance(value, "images", (dict,), is_array=True) + self._property_images = value + + +response_mapping = { + AddRequest: AddResponse, + AddBatchRequest: AddBatchResponse, + DeleteForTaskRequest: DeleteForTaskResponse, + DebugImagesRequest: DebugImagesResponse, + GetTaskLogRequest: GetTaskLogResponse, + GetTaskEventsRequest: GetTaskEventsResponse, + DownloadTaskLogRequest: DownloadTaskLogResponse, + GetTaskPlotsRequest: GetTaskPlotsResponse, + GetMultiTaskPlotsRequest: GetMultiTaskPlotsResponse, + GetVectorMetricsAndVariantsRequest: GetVectorMetricsAndVariantsResponse, + VectorMetricsIterHistogramRequest: VectorMetricsIterHistogramResponse, + ScalarMetricsIterHistogramRequest: ScalarMetricsIterHistogramResponse, + MultiTaskScalarMetricsIterHistogramRequest: MultiTaskScalarMetricsIterHistogramResponse, + GetTaskLatestScalarValuesRequest: GetTaskLatestScalarValuesResponse, + GetScalarMetricsAndVariantsRequest: GetScalarMetricsAndVariantsResponse, + GetScalarMetricDataRequest: GetScalarMetricDataResponse, +} diff --git a/trains/backend_api/services/v2_1/models.py b/trains/backend_api/services/v2_1/models.py new file mode 100644 index 00000000..fb1479f6 --- /dev/null +++ b/trains/backend_api/services/v2_1/models.py @@ -0,0 +1,2675 @@ +""" +models service + +This service provides a management interface for models (results of training tasks) stored in the system. +""" +import six +import types +from datetime import datetime +import enum + +from dateutil.parser import parse as parse_datetime + +from ....backend_api.session import Request, BatchRequest, Response, DataModel, NonStrictDataModel, CompoundRequest, schema_property, StringEnum + + +class MultiFieldPatternData(NonStrictDataModel): + """ + :param pattern: Pattern string (regex) + :type pattern: str + :param fields: List of field names + :type fields: Sequence[str] + """ + _schema = { + 'properties': { + 'fields': { + 'description': 'List of field names', + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'pattern': { + 'description': 'Pattern string (regex)', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, pattern=None, fields=None, **kwargs): + super(MultiFieldPatternData, self).__init__(**kwargs) + self.pattern = pattern + self.fields = fields + + @schema_property('pattern') + def pattern(self): + return self._property_pattern + + @pattern.setter + def pattern(self, value): + if value is None: + self._property_pattern = None + return + + self.assert_isinstance(value, "pattern", six.string_types) + self._property_pattern = value + + @schema_property('fields') + def fields(self): + return self._property_fields + + @fields.setter + def fields(self, value): + if value is None: + self._property_fields = None + return + + self.assert_isinstance(value, "fields", (list, tuple)) + + self.assert_isinstance(value, "fields", six.string_types, is_array=True) + self._property_fields = value + + +class Model(NonStrictDataModel): + """ + :param id: Model id + :type id: str + :param name: Model name + :type name: str + :param user: Associated user id + :type user: str + :param company: Company id + :type company: str + :param created: Model creation time + :type created: datetime.datetime + :param task: Task ID of task in which the model was created + :type task: str + :param parent: Parent model ID + :type parent: str + :param project: Associated project ID + :type project: str + :param comment: Model comment + :type comment: str + :param tags: Tags + :type tags: Sequence[str] + :param framework: Framework on which the model is based. Should be identical to + the framework of the task which created the model + :type framework: str + :param design: Json object representing the model design. Should be identical + to the network design of the task which created the model + :type design: dict + :param labels: Json object representing the ids of the labels in the model. The + keys are the layers' names and the values are the ids. + :type labels: dict + :param uri: URI for the model, pointing to the destination storage. + :type uri: str + :param ready: Indication if the model is final and can be used by other tasks + :type ready: bool + :param ui_cache: UI cache for this model + :type ui_cache: dict + """ + _schema = { + 'properties': { + 'comment': {'description': 'Model comment', 'type': ['string', 'null']}, + 'company': {'description': 'Company id', 'type': ['string', 'null']}, + 'created': { + 'description': 'Model creation time', + 'format': 'date-time', + 'type': ['string', 'null'], + }, + 'design': { + 'additionalProperties': True, + 'description': 'Json object representing the model design. Should be identical to the network design of the task which created the model', + 'type': ['object', 'null'], + }, + 'framework': { + 'description': 'Framework on which the model is based. Should be identical to the framework of the task which created the model', + 'type': ['string', 'null'], + }, + 'id': {'description': 'Model id', 'type': ['string', 'null']}, + 'labels': { + 'additionalProperties': {'type': 'integer'}, + 'description': "Json object representing the ids of the labels in the model. The keys are the layers' names and the values are the ids.", + 'type': ['object', 'null'], + }, + 'name': {'description': 'Model name', 'type': ['string', 'null']}, + 'parent': { + 'description': 'Parent model ID', + 'type': ['string', 'null'], + }, + 'project': { + 'description': 'Associated project ID', + 'type': ['string', 'null'], + }, + 'ready': { + 'description': 'Indication if the model is final and can be used by other tasks', + 'type': ['boolean', 'null'], + }, + 'tags': { + 'description': 'Tags', + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'task': { + 'description': 'Task ID of task in which the model was created', + 'type': ['string', 'null'], + }, + 'ui_cache': { + 'additionalProperties': True, + 'description': 'UI cache for this model', + 'type': ['object', 'null'], + }, + 'uri': { + 'description': 'URI for the model, pointing to the destination storage.', + 'type': ['string', 'null'], + }, + 'user': { + 'description': 'Associated user id', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, id=None, name=None, user=None, company=None, created=None, task=None, parent=None, project=None, comment=None, tags=None, framework=None, design=None, labels=None, uri=None, ready=None, ui_cache=None, **kwargs): + super(Model, self).__init__(**kwargs) + self.id = id + self.name = name + self.user = user + self.company = company + self.created = created + self.task = task + self.parent = parent + self.project = project + self.comment = comment + self.tags = tags + self.framework = framework + self.design = design + self.labels = labels + self.uri = uri + self.ready = ready + self.ui_cache = ui_cache + + @schema_property('id') + def id(self): + return self._property_id + + @id.setter + def id(self, value): + if value is None: + self._property_id = None + return + + self.assert_isinstance(value, "id", six.string_types) + self._property_id = value + + @schema_property('name') + def name(self): + return self._property_name + + @name.setter + def name(self, value): + if value is None: + self._property_name = None + return + + self.assert_isinstance(value, "name", six.string_types) + self._property_name = value + + @schema_property('user') + def user(self): + return self._property_user + + @user.setter + def user(self, value): + if value is None: + self._property_user = None + return + + self.assert_isinstance(value, "user", six.string_types) + self._property_user = value + + @schema_property('company') + def company(self): + return self._property_company + + @company.setter + def company(self, value): + if value is None: + self._property_company = None + return + + self.assert_isinstance(value, "company", six.string_types) + self._property_company = value + + @schema_property('created') + def created(self): + return self._property_created + + @created.setter + def created(self, value): + if value is None: + self._property_created = None + return + + self.assert_isinstance(value, "created", six.string_types + (datetime,)) + if not isinstance(value, datetime): + value = parse_datetime(value) + self._property_created = value + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + @schema_property('parent') + def parent(self): + return self._property_parent + + @parent.setter + def parent(self, value): + if value is None: + self._property_parent = None + return + + self.assert_isinstance(value, "parent", six.string_types) + self._property_parent = value + + @schema_property('project') + def project(self): + return self._property_project + + @project.setter + def project(self, value): + if value is None: + self._property_project = None + return + + self.assert_isinstance(value, "project", six.string_types) + self._property_project = value + + @schema_property('comment') + def comment(self): + return self._property_comment + + @comment.setter + def comment(self, value): + if value is None: + self._property_comment = None + return + + self.assert_isinstance(value, "comment", six.string_types) + self._property_comment = value + + @schema_property('tags') + def tags(self): + return self._property_tags + + @tags.setter + def tags(self, value): + if value is None: + self._property_tags = None + return + + self.assert_isinstance(value, "tags", (list, tuple)) + + self.assert_isinstance(value, "tags", six.string_types, is_array=True) + self._property_tags = value + + @schema_property('framework') + def framework(self): + return self._property_framework + + @framework.setter + def framework(self, value): + if value is None: + self._property_framework = None + return + + self.assert_isinstance(value, "framework", six.string_types) + self._property_framework = value + + @schema_property('design') + def design(self): + return self._property_design + + @design.setter + def design(self, value): + if value is None: + self._property_design = None + return + + self.assert_isinstance(value, "design", (dict,)) + self._property_design = value + + @schema_property('labels') + def labels(self): + return self._property_labels + + @labels.setter + def labels(self, value): + if value is None: + self._property_labels = None + return + + self.assert_isinstance(value, "labels", (dict,)) + self._property_labels = value + + @schema_property('uri') + def uri(self): + return self._property_uri + + @uri.setter + def uri(self, value): + if value is None: + self._property_uri = None + return + + self.assert_isinstance(value, "uri", six.string_types) + self._property_uri = value + + @schema_property('ready') + def ready(self): + return self._property_ready + + @ready.setter + def ready(self, value): + if value is None: + self._property_ready = None + return + + self.assert_isinstance(value, "ready", (bool,)) + self._property_ready = value + + @schema_property('ui_cache') + def ui_cache(self): + return self._property_ui_cache + + @ui_cache.setter + def ui_cache(self, value): + if value is None: + self._property_ui_cache = None + return + + self.assert_isinstance(value, "ui_cache", (dict,)) + self._property_ui_cache = value + + +class CreateRequest(Request): + """ + Create a new model not associated with a task + + :param uri: URI for the model + :type uri: str + :param name: Model name Unique within the company. + :type name: str + :param comment: Model comment + :type comment: str + :param tags: Tags list + :type tags: Sequence[str] + :param framework: Framework on which the model is based. Case insensitive. + Should be identical to the framework of the task which created the model. + :type framework: str + :param design: Json[d] object representing the model design. Should be + identical to the network design of the task which created the model + :type design: dict + :param labels: Json object + :type labels: dict + :param ready: Indication if the model is final and can be used by other tasks + Default is false. + :type ready: bool + :param public: Create a public model Default is false. + :type public: bool + :param project: Project to which to model belongs + :type project: str + :param parent: Parent model + :type parent: str + :param task: Associated task ID + :type task: str + """ + + _service = "models" + _action = "create" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': { + 'comment': {'description': 'Model comment', 'type': 'string'}, + 'design': { + 'additionalProperties': True, + 'description': 'Json[d] object representing the model design. Should be identical to the network design of the task which created the model', + 'type': 'object', + }, + 'framework': { + 'description': 'Framework on which the model is based. Case insensitive. Should be identical to the framework of the task which created the model.', + 'type': 'string', + }, + 'labels': { + 'additionalProperties': {'type': 'integer'}, + 'description': 'Json object', + 'type': 'object', + }, + 'name': { + 'description': 'Model name Unique within the company.', + 'type': 'string', + }, + 'parent': {'description': 'Parent model', 'type': 'string'}, + 'project': { + 'description': 'Project to which to model belongs', + 'type': 'string', + }, + 'public': { + 'default': False, + 'description': 'Create a public model Default is false.', + 'type': 'boolean', + }, + 'ready': { + 'default': False, + 'description': 'Indication if the model is final and can be used by other tasks Default is false.', + 'type': 'boolean', + }, + 'tags': {'description': 'Tags list', 'items': {'type': 'string'}, 'type': 'array'}, + 'task': {'description': 'Associated task ID', 'type': 'string'}, + 'uri': {'description': 'URI for the model', 'type': 'string'}, + }, + 'required': ['uri', 'name', 'labels'], + 'type': 'object', + } + def __init__( + self, uri, name, labels, comment=None, tags=None, framework=None, design=None, ready=False, public=False, project=None, parent=None, task=None, **kwargs): + super(CreateRequest, self).__init__(**kwargs) + self.uri = uri + self.name = name + self.comment = comment + self.tags = tags + self.framework = framework + self.design = design + self.labels = labels + self.ready = ready + self.public = public + self.project = project + self.parent = parent + self.task = task + + @schema_property('uri') + def uri(self): + return self._property_uri + + @uri.setter + def uri(self, value): + if value is None: + self._property_uri = None + return + + self.assert_isinstance(value, "uri", six.string_types) + self._property_uri = value + + @schema_property('name') + def name(self): + return self._property_name + + @name.setter + def name(self, value): + if value is None: + self._property_name = None + return + + self.assert_isinstance(value, "name", six.string_types) + self._property_name = value + + @schema_property('comment') + def comment(self): + return self._property_comment + + @comment.setter + def comment(self, value): + if value is None: + self._property_comment = None + return + + self.assert_isinstance(value, "comment", six.string_types) + self._property_comment = value + + @schema_property('tags') + def tags(self): + return self._property_tags + + @tags.setter + def tags(self, value): + if value is None: + self._property_tags = None + return + + self.assert_isinstance(value, "tags", (list, tuple)) + + self.assert_isinstance(value, "tags", six.string_types, is_array=True) + self._property_tags = value + + @schema_property('framework') + def framework(self): + return self._property_framework + + @framework.setter + def framework(self, value): + if value is None: + self._property_framework = None + return + + self.assert_isinstance(value, "framework", six.string_types) + self._property_framework = value + + @schema_property('design') + def design(self): + return self._property_design + + @design.setter + def design(self, value): + if value is None: + self._property_design = None + return + + self.assert_isinstance(value, "design", (dict,)) + self._property_design = value + + @schema_property('labels') + def labels(self): + return self._property_labels + + @labels.setter + def labels(self, value): + if value is None: + self._property_labels = None + return + + self.assert_isinstance(value, "labels", (dict,)) + self._property_labels = value + + @schema_property('ready') + def ready(self): + return self._property_ready + + @ready.setter + def ready(self, value): + if value is None: + self._property_ready = None + return + + self.assert_isinstance(value, "ready", (bool,)) + self._property_ready = value + + @schema_property('public') + def public(self): + return self._property_public + + @public.setter + def public(self, value): + if value is None: + self._property_public = None + return + + self.assert_isinstance(value, "public", (bool,)) + self._property_public = value + + @schema_property('project') + def project(self): + return self._property_project + + @project.setter + def project(self, value): + if value is None: + self._property_project = None + return + + self.assert_isinstance(value, "project", six.string_types) + self._property_project = value + + @schema_property('parent') + def parent(self): + return self._property_parent + + @parent.setter + def parent(self, value): + if value is None: + self._property_parent = None + return + + self.assert_isinstance(value, "parent", six.string_types) + self._property_parent = value + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + +class CreateResponse(Response): + """ + Response of models.create endpoint. + + :param id: ID of the model + :type id: str + :param created: Was the model created + :type created: bool + """ + _service = "models" + _action = "create" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'created': { + 'description': 'Was the model created', + 'type': ['boolean', 'null'], + }, + 'id': {'description': 'ID of the model', 'type': ['string', 'null']}, + }, + 'type': 'object', + } + def __init__( + self, id=None, created=None, **kwargs): + super(CreateResponse, self).__init__(**kwargs) + self.id = id + self.created = created + + @schema_property('id') + def id(self): + return self._property_id + + @id.setter + def id(self, value): + if value is None: + self._property_id = None + return + + self.assert_isinstance(value, "id", six.string_types) + self._property_id = value + + @schema_property('created') + def created(self): + return self._property_created + + @created.setter + def created(self, value): + if value is None: + self._property_created = None + return + + self.assert_isinstance(value, "created", (bool,)) + self._property_created = value + + +class DeleteRequest(Request): + """ + Delete a model. + + :param model: Model ID + :type model: str + :param force: Force. Required if there are tasks that use the model as an + execution model, or if the model's creating task is published. + :type force: bool + """ + + _service = "models" + _action = "delete" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': { + 'force': { + 'description': "Force. Required if there are tasks that use the model as an execution model, or if the model's creating task is published.\n ", + 'type': 'boolean', + }, + 'model': {'description': 'Model ID', 'type': 'string'}, + }, + 'required': ['model'], + 'type': 'object', + } + def __init__( + self, model, force=None, **kwargs): + super(DeleteRequest, self).__init__(**kwargs) + self.model = model + self.force = force + + @schema_property('model') + def model(self): + return self._property_model + + @model.setter + def model(self, value): + if value is None: + self._property_model = None + return + + self.assert_isinstance(value, "model", six.string_types) + self._property_model = value + + @schema_property('force') + def force(self): + return self._property_force + + @force.setter + def force(self, value): + if value is None: + self._property_force = None + return + + self.assert_isinstance(value, "force", (bool,)) + self._property_force = value + + +class DeleteResponse(Response): + """ + Response of models.delete endpoint. + + :param deleted: Indicates whether the model was deleted + :type deleted: bool + """ + _service = "models" + _action = "delete" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'deleted': { + 'description': 'Indicates whether the model was deleted', + 'type': ['boolean', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, deleted=None, **kwargs): + super(DeleteResponse, self).__init__(**kwargs) + self.deleted = deleted + + @schema_property('deleted') + def deleted(self): + return self._property_deleted + + @deleted.setter + def deleted(self, value): + if value is None: + self._property_deleted = None + return + + self.assert_isinstance(value, "deleted", (bool,)) + self._property_deleted = value + + +class EditRequest(Request): + """ + Edit an existing model + + :param model: Model ID + :type model: str + :param uri: URI for the model + :type uri: str + :param name: Model name Unique within the company. + :type name: str + :param comment: Model comment + :type comment: str + :param tags: Tags list + :type tags: Sequence[str] + :param framework: Framework on which the model is based. Case insensitive. + Should be identical to the framework of the task which created the model. + :type framework: str + :param design: Json[d] object representing the model design. Should be + identical to the network design of the task which created the model + :type design: dict + :param labels: Json object + :type labels: dict + :param ready: Indication if the model is final and can be used by other tasks + :type ready: bool + :param project: Project to which to model belongs + :type project: str + :param parent: Parent model + :type parent: str + :param task: Associated task ID + :type task: str + :param iteration: Iteration (used to update task statistics) + :type iteration: int + """ + + _service = "models" + _action = "edit" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': { + 'comment': {'description': 'Model comment', 'type': 'string'}, + 'design': { + 'additionalProperties': True, + 'description': 'Json[d] object representing the model design. Should be identical to the network design of the task which created the model', + 'type': 'object', + }, + 'framework': { + 'description': 'Framework on which the model is based. Case insensitive. Should be identical to the framework of the task which created the model.', + 'type': 'string', + }, + 'iteration': { + 'description': 'Iteration (used to update task statistics)', + 'type': 'integer', + }, + 'labels': { + 'additionalProperties': {'type': 'integer'}, + 'description': 'Json object', + 'type': 'object', + }, + 'model': {'description': 'Model ID', 'type': 'string'}, + 'name': { + 'description': 'Model name Unique within the company.', + 'type': 'string', + }, + 'parent': {'description': 'Parent model', 'type': 'string'}, + 'project': { + 'description': 'Project to which to model belongs', + 'type': 'string', + }, + 'ready': { + 'description': 'Indication if the model is final and can be used by other tasks', + 'type': 'boolean', + }, + 'tags': {'description': 'Tags list', 'items': {'type': 'string'}, 'type': 'array'}, + 'task': {'description': 'Associated task ID', 'type': 'string'}, + 'uri': {'description': 'URI for the model', 'type': 'string'}, + }, + 'required': ['model'], + 'type': 'object', + } + def __init__( + self, model, uri=None, name=None, comment=None, tags=None, framework=None, design=None, labels=None, ready=None, project=None, parent=None, task=None, iteration=None, **kwargs): + super(EditRequest, self).__init__(**kwargs) + self.model = model + self.uri = uri + self.name = name + self.comment = comment + self.tags = tags + self.framework = framework + self.design = design + self.labels = labels + self.ready = ready + self.project = project + self.parent = parent + self.task = task + self.iteration = iteration + + @schema_property('model') + def model(self): + return self._property_model + + @model.setter + def model(self, value): + if value is None: + self._property_model = None + return + + self.assert_isinstance(value, "model", six.string_types) + self._property_model = value + + @schema_property('uri') + def uri(self): + return self._property_uri + + @uri.setter + def uri(self, value): + if value is None: + self._property_uri = None + return + + self.assert_isinstance(value, "uri", six.string_types) + self._property_uri = value + + @schema_property('name') + def name(self): + return self._property_name + + @name.setter + def name(self, value): + if value is None: + self._property_name = None + return + + self.assert_isinstance(value, "name", six.string_types) + self._property_name = value + + @schema_property('comment') + def comment(self): + return self._property_comment + + @comment.setter + def comment(self, value): + if value is None: + self._property_comment = None + return + + self.assert_isinstance(value, "comment", six.string_types) + self._property_comment = value + + @schema_property('tags') + def tags(self): + return self._property_tags + + @tags.setter + def tags(self, value): + if value is None: + self._property_tags = None + return + + self.assert_isinstance(value, "tags", (list, tuple)) + + self.assert_isinstance(value, "tags", six.string_types, is_array=True) + self._property_tags = value + + @schema_property('framework') + def framework(self): + return self._property_framework + + @framework.setter + def framework(self, value): + if value is None: + self._property_framework = None + return + + self.assert_isinstance(value, "framework", six.string_types) + self._property_framework = value + + @schema_property('design') + def design(self): + return self._property_design + + @design.setter + def design(self, value): + if value is None: + self._property_design = None + return + + self.assert_isinstance(value, "design", (dict,)) + self._property_design = value + + @schema_property('labels') + def labels(self): + return self._property_labels + + @labels.setter + def labels(self, value): + if value is None: + self._property_labels = None + return + + self.assert_isinstance(value, "labels", (dict,)) + self._property_labels = value + + @schema_property('ready') + def ready(self): + return self._property_ready + + @ready.setter + def ready(self, value): + if value is None: + self._property_ready = None + return + + self.assert_isinstance(value, "ready", (bool,)) + self._property_ready = value + + @schema_property('project') + def project(self): + return self._property_project + + @project.setter + def project(self, value): + if value is None: + self._property_project = None + return + + self.assert_isinstance(value, "project", six.string_types) + self._property_project = value + + @schema_property('parent') + def parent(self): + return self._property_parent + + @parent.setter + def parent(self, value): + if value is None: + self._property_parent = None + return + + self.assert_isinstance(value, "parent", six.string_types) + self._property_parent = value + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + @schema_property('iteration') + def iteration(self): + return self._property_iteration + + @iteration.setter + def iteration(self, value): + if value is None: + self._property_iteration = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "iteration", six.integer_types) + self._property_iteration = value + + +class EditResponse(Response): + """ + Response of models.edit endpoint. + + :param updated: Number of models updated (0 or 1) + :type updated: int + :param fields: Updated fields names and values + :type fields: dict + """ + _service = "models" + _action = "edit" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'fields': { + 'additionalProperties': True, + 'description': 'Updated fields names and values', + 'type': ['object', 'null'], + }, + 'updated': { + 'description': 'Number of models updated (0 or 1)', + 'enum': [0, 1], + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, updated=None, fields=None, **kwargs): + super(EditResponse, self).__init__(**kwargs) + self.updated = updated + self.fields = fields + + @schema_property('updated') + def updated(self): + return self._property_updated + + @updated.setter + def updated(self, value): + if value is None: + self._property_updated = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "updated", six.integer_types) + self._property_updated = value + + @schema_property('fields') + def fields(self): + return self._property_fields + + @fields.setter + def fields(self, value): + if value is None: + self._property_fields = None + return + + self.assert_isinstance(value, "fields", (dict,)) + self._property_fields = value + + +class GetAllRequest(Request): + """ + Get all models + + :param name: Get only models whose name matches this pattern (python regular + expression syntax) + :type name: str + :param ready: Indication whether to retrieve only models that are marked ready + If not supplied returns both ready and not-ready projects. + :type ready: bool + :param tags: Tags list used to filter results. Prepend '-' to tag name to + indicate exclusion + :type tags: Sequence[str] + :param only_fields: List of model field names (if applicable, nesting is + supported using '.'). If provided, this list defines the query's projection + (only these fields will be returned for each result entry) + :type only_fields: Sequence[str] + :param page: Page number, returns a specific page out of the resulting list of + models + :type page: int + :param page_size: Page size, specifies the number of results returned in each + page (last page may contain fewer results) + :type page_size: int + :param project: List of associated project IDs + :type project: Sequence[str] + :param order_by: List of field names to order by. When search_text is used, + '@text_score' can be used as a field representing the text score of returned + documents. Use '-' prefix to specify descending order. Optional, recommended + when using page + :type order_by: Sequence[str] + :param task: List of associated task IDs + :type task: Sequence[str] + :param id: List of model IDs + :type id: Sequence[str] + :param search_text: Free text search query + :type search_text: str + :param framework: List of frameworks + :type framework: Sequence[str] + :param uri: List of model URIs + :type uri: Sequence[str] + :param _all_: Multi-field pattern condition (all fields match pattern) + :type _all_: MultiFieldPatternData + :param _any_: Multi-field pattern condition (any field matches pattern) + :type _any_: MultiFieldPatternData + """ + + _service = "models" + _action = "get_all" + _version = "1.5" + _schema = { + 'definitions': { + 'multi_field_pattern_data': { + 'properties': { + 'fields': { + 'description': 'List of field names', + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'pattern': { + 'description': 'Pattern string (regex)', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + }, + }, + 'dependencies': {'page': ['page_size']}, + 'properties': { + '_all_': { + 'description': 'Multi-field pattern condition (all fields match pattern)', + 'oneOf': [ + {'$ref': '#/definitions/multi_field_pattern_data'}, + {'type': 'null'}, + ], + }, + '_any_': { + 'description': 'Multi-field pattern condition (any field matches pattern)', + 'oneOf': [ + {'$ref': '#/definitions/multi_field_pattern_data'}, + {'type': 'null'}, + ], + }, + 'framework': { + 'description': 'List of frameworks', + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'id': { + 'description': 'List of model IDs', + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'name': { + 'description': 'Get only models whose name matches this pattern (python regular expression syntax)', + 'type': ['string', 'null'], + }, + 'only_fields': { + 'description': "List of model field names (if applicable, nesting is supported using '.'). If provided, this list defines the query's projection (only these fields will be returned for each result entry)", + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'order_by': { + 'description': "List of field names to order by. When search_text is used, '@text_score' can be used as a field representing the text score of returned documents. Use '-' prefix to specify descending order. Optional, recommended when using page", + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'page': { + 'description': 'Page number, returns a specific page out of the resulting list of models', + 'minimum': 0, + 'type': ['integer', 'null'], + }, + 'page_size': { + 'description': 'Page size, specifies the number of results returned in each page (last page may contain fewer results)', + 'minimum': 1, + 'type': ['integer', 'null'], + }, + 'project': { + 'description': 'List of associated project IDs', + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'ready': { + 'description': 'Indication whether to retrieve only models that are marked ready If not supplied returns both ready and not-ready projects.', + 'type': ['boolean', 'null'], + }, + 'search_text': { + 'description': 'Free text search query', + 'type': ['string', 'null'], + }, + 'tags': { + 'description': "Tags list used to filter results. Prepend '-' to tag name to indicate exclusion", + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'task': { + 'description': 'List of associated task IDs', + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'uri': { + 'description': 'List of model URIs', + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, name=None, ready=None, tags=None, only_fields=None, page=None, page_size=None, project=None, order_by=None, task=None, id=None, search_text=None, framework=None, uri=None, _all_=None, _any_=None, **kwargs): + super(GetAllRequest, self).__init__(**kwargs) + self.name = name + self.ready = ready + self.tags = tags + self.only_fields = only_fields + self.page = page + self.page_size = page_size + self.project = project + self.order_by = order_by + self.task = task + self.id = id + self.search_text = search_text + self.framework = framework + self.uri = uri + self._all_ = _all_ + self._any_ = _any_ + + @schema_property('name') + def name(self): + return self._property_name + + @name.setter + def name(self, value): + if value is None: + self._property_name = None + return + + self.assert_isinstance(value, "name", six.string_types) + self._property_name = value + + @schema_property('ready') + def ready(self): + return self._property_ready + + @ready.setter + def ready(self, value): + if value is None: + self._property_ready = None + return + + self.assert_isinstance(value, "ready", (bool,)) + self._property_ready = value + + @schema_property('tags') + def tags(self): + return self._property_tags + + @tags.setter + def tags(self, value): + if value is None: + self._property_tags = None + return + + self.assert_isinstance(value, "tags", (list, tuple)) + + self.assert_isinstance(value, "tags", six.string_types, is_array=True) + self._property_tags = value + + @schema_property('only_fields') + def only_fields(self): + return self._property_only_fields + + @only_fields.setter + def only_fields(self, value): + if value is None: + self._property_only_fields = None + return + + self.assert_isinstance(value, "only_fields", (list, tuple)) + + self.assert_isinstance(value, "only_fields", six.string_types, is_array=True) + self._property_only_fields = value + + @schema_property('page') + def page(self): + return self._property_page + + @page.setter + def page(self, value): + if value is None: + self._property_page = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "page", six.integer_types) + self._property_page = value + + @schema_property('page_size') + def page_size(self): + return self._property_page_size + + @page_size.setter + def page_size(self, value): + if value is None: + self._property_page_size = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "page_size", six.integer_types) + self._property_page_size = value + + @schema_property('project') + def project(self): + return self._property_project + + @project.setter + def project(self, value): + if value is None: + self._property_project = None + return + + self.assert_isinstance(value, "project", (list, tuple)) + + self.assert_isinstance(value, "project", six.string_types, is_array=True) + self._property_project = value + + @schema_property('order_by') + def order_by(self): + return self._property_order_by + + @order_by.setter + def order_by(self, value): + if value is None: + self._property_order_by = None + return + + self.assert_isinstance(value, "order_by", (list, tuple)) + + self.assert_isinstance(value, "order_by", six.string_types, is_array=True) + self._property_order_by = value + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", (list, tuple)) + + self.assert_isinstance(value, "task", six.string_types, is_array=True) + self._property_task = value + + @schema_property('id') + def id(self): + return self._property_id + + @id.setter + def id(self, value): + if value is None: + self._property_id = None + return + + self.assert_isinstance(value, "id", (list, tuple)) + + self.assert_isinstance(value, "id", six.string_types, is_array=True) + self._property_id = value + + @schema_property('search_text') + def search_text(self): + return self._property_search_text + + @search_text.setter + def search_text(self, value): + if value is None: + self._property_search_text = None + return + + self.assert_isinstance(value, "search_text", six.string_types) + self._property_search_text = value + + @schema_property('framework') + def framework(self): + return self._property_framework + + @framework.setter + def framework(self, value): + if value is None: + self._property_framework = None + return + + self.assert_isinstance(value, "framework", (list, tuple)) + + self.assert_isinstance(value, "framework", six.string_types, is_array=True) + self._property_framework = value + + @schema_property('uri') + def uri(self): + return self._property_uri + + @uri.setter + def uri(self, value): + if value is None: + self._property_uri = None + return + + self.assert_isinstance(value, "uri", (list, tuple)) + + self.assert_isinstance(value, "uri", six.string_types, is_array=True) + self._property_uri = value + + @schema_property('_all_') + def _all_(self): + return self._property__all_ + + @_all_.setter + def _all_(self, value): + if value is None: + self._property__all_ = None + return + if isinstance(value, dict): + value = MultiFieldPatternData.from_dict(value) + else: + self.assert_isinstance(value, "_all_", MultiFieldPatternData) + self._property__all_ = value + + @schema_property('_any_') + def _any_(self): + return self._property__any_ + + @_any_.setter + def _any_(self, value): + if value is None: + self._property__any_ = None + return + if isinstance(value, dict): + value = MultiFieldPatternData.from_dict(value) + else: + self.assert_isinstance(value, "_any_", MultiFieldPatternData) + self._property__any_ = value + + +class GetAllResponse(Response): + """ + Response of models.get_all endpoint. + + :param models: Models list + :type models: Sequence[Model] + """ + _service = "models" + _action = "get_all" + _version = "1.5" + + _schema = { + 'definitions': { + 'model': { + 'properties': { + 'comment': { + 'description': 'Model comment', + 'type': ['string', 'null'], + }, + 'company': { + 'description': 'Company id', + 'type': ['string', 'null'], + }, + 'created': { + 'description': 'Model creation time', + 'format': 'date-time', + 'type': ['string', 'null'], + }, + 'design': { + 'additionalProperties': True, + 'description': 'Json object representing the model design. Should be identical to the network design of the task which created the model', + 'type': ['object', 'null'], + }, + 'framework': { + 'description': 'Framework on which the model is based. Should be identical to the framework of the task which created the model', + 'type': ['string', 'null'], + }, + 'id': {'description': 'Model id', 'type': ['string', 'null']}, + 'labels': { + 'additionalProperties': {'type': 'integer'}, + 'description': "Json object representing the ids of the labels in the model. The keys are the layers' names and the values are the ids.", + 'type': ['object', 'null'], + }, + 'name': { + 'description': 'Model name', + 'type': ['string', 'null'], + }, + 'parent': { + 'description': 'Parent model ID', + 'type': ['string', 'null'], + }, + 'project': { + 'description': 'Associated project ID', + 'type': ['string', 'null'], + }, + 'ready': { + 'description': 'Indication if the model is final and can be used by other tasks', + 'type': ['boolean', 'null'], + }, + 'tags': { + 'description': 'Tags', + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'task': { + 'description': 'Task ID of task in which the model was created', + 'type': ['string', 'null'], + }, + 'ui_cache': { + 'additionalProperties': True, + 'description': 'UI cache for this model', + 'type': ['object', 'null'], + }, + 'uri': { + 'description': 'URI for the model, pointing to the destination storage.', + 'type': ['string', 'null'], + }, + 'user': { + 'description': 'Associated user id', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + }, + }, + 'properties': { + 'models': { + 'description': 'Models list', + 'items': {'$ref': '#/definitions/model'}, + 'type': ['array', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, models=None, **kwargs): + super(GetAllResponse, self).__init__(**kwargs) + self.models = models + + @schema_property('models') + def models(self): + return self._property_models + + @models.setter + def models(self, value): + if value is None: + self._property_models = None + return + + self.assert_isinstance(value, "models", (list, tuple)) + if any(isinstance(v, dict) for v in value): + value = [Model.from_dict(v) if isinstance(v, dict) else v for v in value] + else: + self.assert_isinstance(value, "models", Model, is_array=True) + self._property_models = value + + +class GetByIdRequest(Request): + """ + Gets model information + + :param model: Model id + :type model: str + """ + + _service = "models" + _action = "get_by_id" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': {'model': {'description': 'Model id', 'type': 'string'}}, + 'required': ['model'], + 'type': 'object', + } + def __init__( + self, model, **kwargs): + super(GetByIdRequest, self).__init__(**kwargs) + self.model = model + + @schema_property('model') + def model(self): + return self._property_model + + @model.setter + def model(self, value): + if value is None: + self._property_model = None + return + + self.assert_isinstance(value, "model", six.string_types) + self._property_model = value + + +class GetByIdResponse(Response): + """ + Response of models.get_by_id endpoint. + + :param model: Model info + :type model: Model + """ + _service = "models" + _action = "get_by_id" + _version = "1.5" + + _schema = { + 'definitions': { + 'model': { + 'properties': { + 'comment': { + 'description': 'Model comment', + 'type': ['string', 'null'], + }, + 'company': { + 'description': 'Company id', + 'type': ['string', 'null'], + }, + 'created': { + 'description': 'Model creation time', + 'format': 'date-time', + 'type': ['string', 'null'], + }, + 'design': { + 'additionalProperties': True, + 'description': 'Json object representing the model design. Should be identical to the network design of the task which created the model', + 'type': ['object', 'null'], + }, + 'framework': { + 'description': 'Framework on which the model is based. Should be identical to the framework of the task which created the model', + 'type': ['string', 'null'], + }, + 'id': {'description': 'Model id', 'type': ['string', 'null']}, + 'labels': { + 'additionalProperties': {'type': 'integer'}, + 'description': "Json object representing the ids of the labels in the model. The keys are the layers' names and the values are the ids.", + 'type': ['object', 'null'], + }, + 'name': { + 'description': 'Model name', + 'type': ['string', 'null'], + }, + 'parent': { + 'description': 'Parent model ID', + 'type': ['string', 'null'], + }, + 'project': { + 'description': 'Associated project ID', + 'type': ['string', 'null'], + }, + 'ready': { + 'description': 'Indication if the model is final and can be used by other tasks', + 'type': ['boolean', 'null'], + }, + 'tags': { + 'description': 'Tags', + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'task': { + 'description': 'Task ID of task in which the model was created', + 'type': ['string', 'null'], + }, + 'ui_cache': { + 'additionalProperties': True, + 'description': 'UI cache for this model', + 'type': ['object', 'null'], + }, + 'uri': { + 'description': 'URI for the model, pointing to the destination storage.', + 'type': ['string', 'null'], + }, + 'user': { + 'description': 'Associated user id', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + }, + }, + 'properties': { + 'model': { + 'description': 'Model info', + 'oneOf': [{'$ref': '#/definitions/model'}, {'type': 'null'}], + }, + }, + 'type': 'object', + } + def __init__( + self, model=None, **kwargs): + super(GetByIdResponse, self).__init__(**kwargs) + self.model = model + + @schema_property('model') + def model(self): + return self._property_model + + @model.setter + def model(self, value): + if value is None: + self._property_model = None + return + if isinstance(value, dict): + value = Model.from_dict(value) + else: + self.assert_isinstance(value, "model", Model) + self._property_model = value + + +class GetByTaskIdRequest(Request): + """ + Gets model information + + :param task: Task id + :type task: str + """ + + _service = "models" + _action = "get_by_task_id" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': { + 'task': {'description': 'Task id', 'type': ['string', 'null']}, + }, + 'type': 'object', + } + def __init__( + self, task=None, **kwargs): + super(GetByTaskIdRequest, self).__init__(**kwargs) + self.task = task + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + +class GetByTaskIdResponse(Response): + """ + Response of models.get_by_task_id endpoint. + + :param model: Model info + :type model: Model + """ + _service = "models" + _action = "get_by_task_id" + _version = "1.5" + + _schema = { + 'definitions': { + 'model': { + 'properties': { + 'comment': { + 'description': 'Model comment', + 'type': ['string', 'null'], + }, + 'company': { + 'description': 'Company id', + 'type': ['string', 'null'], + }, + 'created': { + 'description': 'Model creation time', + 'format': 'date-time', + 'type': ['string', 'null'], + }, + 'design': { + 'additionalProperties': True, + 'description': 'Json object representing the model design. Should be identical to the network design of the task which created the model', + 'type': ['object', 'null'], + }, + 'framework': { + 'description': 'Framework on which the model is based. Should be identical to the framework of the task which created the model', + 'type': ['string', 'null'], + }, + 'id': {'description': 'Model id', 'type': ['string', 'null']}, + 'labels': { + 'additionalProperties': {'type': 'integer'}, + 'description': "Json object representing the ids of the labels in the model. The keys are the layers' names and the values are the ids.", + 'type': ['object', 'null'], + }, + 'name': { + 'description': 'Model name', + 'type': ['string', 'null'], + }, + 'parent': { + 'description': 'Parent model ID', + 'type': ['string', 'null'], + }, + 'project': { + 'description': 'Associated project ID', + 'type': ['string', 'null'], + }, + 'ready': { + 'description': 'Indication if the model is final and can be used by other tasks', + 'type': ['boolean', 'null'], + }, + 'tags': { + 'description': 'Tags', + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'task': { + 'description': 'Task ID of task in which the model was created', + 'type': ['string', 'null'], + }, + 'ui_cache': { + 'additionalProperties': True, + 'description': 'UI cache for this model', + 'type': ['object', 'null'], + }, + 'uri': { + 'description': 'URI for the model, pointing to the destination storage.', + 'type': ['string', 'null'], + }, + 'user': { + 'description': 'Associated user id', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + }, + }, + 'properties': { + 'model': { + 'description': 'Model info', + 'oneOf': [{'$ref': '#/definitions/model'}, {'type': 'null'}], + }, + }, + 'type': 'object', + } + def __init__( + self, model=None, **kwargs): + super(GetByTaskIdResponse, self).__init__(**kwargs) + self.model = model + + @schema_property('model') + def model(self): + return self._property_model + + @model.setter + def model(self, value): + if value is None: + self._property_model = None + return + if isinstance(value, dict): + value = Model.from_dict(value) + else: + self.assert_isinstance(value, "model", Model) + self._property_model = value + + +class SetReadyRequest(Request): + """ + Set the model ready flag to True. If the model is an output model of a task then try to publish the task. + + :param model: Model id + :type model: str + :param force_publish_task: Publish the associated task (if exists) even if it + is not in the 'stopped' state. Optional, the default value is False. + :type force_publish_task: bool + :param publish_task: Indicates that the associated task (if exists) should be + published. Optional, the default value is True. + :type publish_task: bool + """ + + _service = "models" + _action = "set_ready" + _version = "1.9" + _schema = { + 'definitions': {}, + 'properties': { + 'force_publish_task': { + 'description': "Publish the associated task (if exists) even if it is not in the 'stopped' state. Optional, the default value is False.", + 'type': 'boolean', + }, + 'model': {'description': 'Model id', 'type': 'string'}, + 'publish_task': { + 'description': 'Indicates that the associated task (if exists) should be published. Optional, the default value is True.', + 'type': 'boolean', + }, + }, + 'required': ['model'], + 'type': 'object', + } + def __init__( + self, model, force_publish_task=None, publish_task=None, **kwargs): + super(SetReadyRequest, self).__init__(**kwargs) + self.model = model + self.force_publish_task = force_publish_task + self.publish_task = publish_task + + @schema_property('model') + def model(self): + return self._property_model + + @model.setter + def model(self, value): + if value is None: + self._property_model = None + return + + self.assert_isinstance(value, "model", six.string_types) + self._property_model = value + + @schema_property('force_publish_task') + def force_publish_task(self): + return self._property_force_publish_task + + @force_publish_task.setter + def force_publish_task(self, value): + if value is None: + self._property_force_publish_task = None + return + + self.assert_isinstance(value, "force_publish_task", (bool,)) + self._property_force_publish_task = value + + @schema_property('publish_task') + def publish_task(self): + return self._property_publish_task + + @publish_task.setter + def publish_task(self, value): + if value is None: + self._property_publish_task = None + return + + self.assert_isinstance(value, "publish_task", (bool,)) + self._property_publish_task = value + + +class SetReadyResponse(Response): + """ + Response of models.set_ready endpoint. + + :param updated: Number of models updated (0 or 1) + :type updated: int + :param published_task: Result of publishing of the model's associated task (if + exists). Returned only if the task was published successfully as part of the + model publishing. + :type published_task: dict + """ + _service = "models" + _action = "set_ready" + _version = "1.9" + + _schema = { + 'definitions': {}, + 'properties': { + 'published_task': { + 'description': "Result of publishing of the model's associated task (if exists). Returned only if the task was published successfully as part of the model publishing.", + 'properties': { + 'data': { + 'description': 'Data returned from the task publishing operation.', + 'properties': { + 'committed_versions_results': { + 'description': 'Committed versions results', + 'items': { + 'additionalProperties': True, + 'type': 'object', + }, + 'type': 'array', + }, + 'fields': { + 'additionalProperties': True, + 'description': 'Updated fields names and values', + 'type': 'object', + }, + 'updated': { + 'description': 'Number of tasks updated (0 or 1)', + 'enum': [0, 1], + 'type': 'integer', + }, + }, + 'type': 'object', + }, + 'id': {'description': 'Task id', 'type': 'string'}, + }, + 'type': ['object', 'null'], + }, + 'updated': { + 'description': 'Number of models updated (0 or 1)', + 'enum': [0, 1], + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, updated=None, published_task=None, **kwargs): + super(SetReadyResponse, self).__init__(**kwargs) + self.updated = updated + self.published_task = published_task + + @schema_property('updated') + def updated(self): + return self._property_updated + + @updated.setter + def updated(self, value): + if value is None: + self._property_updated = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "updated", six.integer_types) + self._property_updated = value + + @schema_property('published_task') + def published_task(self): + return self._property_published_task + + @published_task.setter + def published_task(self, value): + if value is None: + self._property_published_task = None + return + + self.assert_isinstance(value, "published_task", (dict,)) + self._property_published_task = value + + +class UpdateRequest(Request): + """ + Update a model + + :param model: Model id + :type model: str + :param name: Model name Unique within the company. + :type name: str + :param comment: Model comment + :type comment: str + :param tags: Tags list + :type tags: Sequence[str] + :param ready: Indication if the model is final and can be used by other tasks + Default is false. + :type ready: bool + :param created: Model creation time (UTC) + :type created: datetime.datetime + :param ui_cache: UI cache for this model + :type ui_cache: dict + :param project: Project to which to model belongs + :type project: str + :param task: Associated task ID + :type task: str + :param iteration: Iteration (used to update task statistics if an associated + task is reported) + :type iteration: int + """ + + _service = "models" + _action = "update" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': { + 'comment': {'description': 'Model comment', 'type': 'string'}, + 'created': { + 'description': 'Model creation time (UTC) ', + 'format': 'date-time', + 'type': 'string', + }, + 'iteration': { + 'description': 'Iteration (used to update task statistics if an associated task is reported)', + 'type': 'integer', + }, + 'model': {'description': 'Model id', 'type': 'string'}, + 'name': { + 'description': 'Model name Unique within the company.', + 'type': 'string', + }, + 'project': { + 'description': 'Project to which to model belongs', + 'type': 'string', + }, + 'ready': { + 'default': False, + 'description': 'Indication if the model is final and can be used by other tasks Default is false.', + 'type': 'boolean', + }, + 'tags': {'description': 'Tags list', 'items': {'type': 'string'}, 'type': 'array'}, + 'task': {'description': 'Associated task ID', 'type': 'string'}, + 'ui_cache': { + 'additionalProperties': True, + 'description': 'UI cache for this model', + 'type': 'object', + }, + }, + 'required': ['model'], + 'type': 'object', + } + def __init__( + self, model, name=None, comment=None, tags=None, ready=False, created=None, ui_cache=None, project=None, task=None, iteration=None, **kwargs): + super(UpdateRequest, self).__init__(**kwargs) + self.model = model + self.name = name + self.comment = comment + self.tags = tags + self.ready = ready + self.created = created + self.ui_cache = ui_cache + self.project = project + self.task = task + self.iteration = iteration + + @schema_property('model') + def model(self): + return self._property_model + + @model.setter + def model(self, value): + if value is None: + self._property_model = None + return + + self.assert_isinstance(value, "model", six.string_types) + self._property_model = value + + @schema_property('name') + def name(self): + return self._property_name + + @name.setter + def name(self, value): + if value is None: + self._property_name = None + return + + self.assert_isinstance(value, "name", six.string_types) + self._property_name = value + + @schema_property('comment') + def comment(self): + return self._property_comment + + @comment.setter + def comment(self, value): + if value is None: + self._property_comment = None + return + + self.assert_isinstance(value, "comment", six.string_types) + self._property_comment = value + + @schema_property('tags') + def tags(self): + return self._property_tags + + @tags.setter + def tags(self, value): + if value is None: + self._property_tags = None + return + + self.assert_isinstance(value, "tags", (list, tuple)) + + self.assert_isinstance(value, "tags", six.string_types, is_array=True) + self._property_tags = value + + @schema_property('ready') + def ready(self): + return self._property_ready + + @ready.setter + def ready(self, value): + if value is None: + self._property_ready = None + return + + self.assert_isinstance(value, "ready", (bool,)) + self._property_ready = value + + @schema_property('created') + def created(self): + return self._property_created + + @created.setter + def created(self, value): + if value is None: + self._property_created = None + return + + self.assert_isinstance(value, "created", six.string_types + (datetime,)) + if not isinstance(value, datetime): + value = parse_datetime(value) + self._property_created = value + + @schema_property('ui_cache') + def ui_cache(self): + return self._property_ui_cache + + @ui_cache.setter + def ui_cache(self, value): + if value is None: + self._property_ui_cache = None + return + + self.assert_isinstance(value, "ui_cache", (dict,)) + self._property_ui_cache = value + + @schema_property('project') + def project(self): + return self._property_project + + @project.setter + def project(self, value): + if value is None: + self._property_project = None + return + + self.assert_isinstance(value, "project", six.string_types) + self._property_project = value + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + @schema_property('iteration') + def iteration(self): + return self._property_iteration + + @iteration.setter + def iteration(self, value): + if value is None: + self._property_iteration = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "iteration", six.integer_types) + self._property_iteration = value + + +class UpdateResponse(Response): + """ + Response of models.update endpoint. + + :param updated: Number of models updated (0 or 1) + :type updated: int + :param fields: Updated fields names and values + :type fields: dict + """ + _service = "models" + _action = "update" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'fields': { + 'additionalProperties': True, + 'description': 'Updated fields names and values', + 'type': ['object', 'null'], + }, + 'updated': { + 'description': 'Number of models updated (0 or 1)', + 'enum': [0, 1], + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, updated=None, fields=None, **kwargs): + super(UpdateResponse, self).__init__(**kwargs) + self.updated = updated + self.fields = fields + + @schema_property('updated') + def updated(self): + return self._property_updated + + @updated.setter + def updated(self, value): + if value is None: + self._property_updated = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "updated", six.integer_types) + self._property_updated = value + + @schema_property('fields') + def fields(self): + return self._property_fields + + @fields.setter + def fields(self, value): + if value is None: + self._property_fields = None + return + + self.assert_isinstance(value, "fields", (dict,)) + self._property_fields = value + + +class UpdateForTaskRequest(Request): + """ + Create or update a new model for a task + + :param task: Task id + :type task: str + :param uri: URI for the model + :type uri: str + :param name: Model name Unique within the company. + :type name: str + :param comment: Model comment + :type comment: str + :param tags: Tags list + :type tags: Sequence[str] + :param override_model_id: Override model ID. If provided, this model is updated + in the task. + :type override_model_id: str + :param iteration: Iteration (used to update task statistics) + :type iteration: int + """ + + _service = "models" + _action = "update_for_task" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': { + 'comment': {'description': 'Model comment', 'type': 'string'}, + 'iteration': { + 'description': 'Iteration (used to update task statistics)', + 'type': 'integer', + }, + 'name': { + 'description': 'Model name Unique within the company.', + 'type': 'string', + }, + 'override_model_id': { + 'description': 'Override model ID. If provided, this model is updated in the task.', + 'type': 'string', + }, + 'tags': {'description': 'Tags list', 'items': {'type': 'string'}, 'type': 'array'}, + 'task': {'description': 'Task id', 'type': 'string'}, + 'uri': {'description': 'URI for the model', 'type': 'string'}, + }, + 'required': ['task'], + 'type': 'object', + } + def __init__( + self, task, uri=None, name=None, comment=None, tags=None, override_model_id=None, iteration=None, **kwargs): + super(UpdateForTaskRequest, self).__init__(**kwargs) + self.task = task + self.uri = uri + self.name = name + self.comment = comment + self.tags = tags + self.override_model_id = override_model_id + self.iteration = iteration + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + @schema_property('uri') + def uri(self): + return self._property_uri + + @uri.setter + def uri(self, value): + if value is None: + self._property_uri = None + return + + self.assert_isinstance(value, "uri", six.string_types) + self._property_uri = value + + @schema_property('name') + def name(self): + return self._property_name + + @name.setter + def name(self, value): + if value is None: + self._property_name = None + return + + self.assert_isinstance(value, "name", six.string_types) + self._property_name = value + + @schema_property('comment') + def comment(self): + return self._property_comment + + @comment.setter + def comment(self, value): + if value is None: + self._property_comment = None + return + + self.assert_isinstance(value, "comment", six.string_types) + self._property_comment = value + + @schema_property('tags') + def tags(self): + return self._property_tags + + @tags.setter + def tags(self, value): + if value is None: + self._property_tags = None + return + + self.assert_isinstance(value, "tags", (list, tuple)) + + self.assert_isinstance(value, "tags", six.string_types, is_array=True) + self._property_tags = value + + @schema_property('override_model_id') + def override_model_id(self): + return self._property_override_model_id + + @override_model_id.setter + def override_model_id(self, value): + if value is None: + self._property_override_model_id = None + return + + self.assert_isinstance(value, "override_model_id", six.string_types) + self._property_override_model_id = value + + @schema_property('iteration') + def iteration(self): + return self._property_iteration + + @iteration.setter + def iteration(self, value): + if value is None: + self._property_iteration = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "iteration", six.integer_types) + self._property_iteration = value + + +class UpdateForTaskResponse(Response): + """ + Response of models.update_for_task endpoint. + + :param id: ID of the model + :type id: str + :param created: Was the model created + :type created: bool + :param updated: Number of models updated (0 or 1) + :type updated: int + :param fields: Updated fields names and values + :type fields: dict + """ + _service = "models" + _action = "update_for_task" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'created': { + 'description': 'Was the model created', + 'type': ['boolean', 'null'], + }, + 'fields': { + 'additionalProperties': True, + 'description': 'Updated fields names and values', + 'type': ['object', 'null'], + }, + 'id': {'description': 'ID of the model', 'type': ['string', 'null']}, + 'updated': { + 'description': 'Number of models updated (0 or 1)', + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, id=None, created=None, updated=None, fields=None, **kwargs): + super(UpdateForTaskResponse, self).__init__(**kwargs) + self.id = id + self.created = created + self.updated = updated + self.fields = fields + + @schema_property('id') + def id(self): + return self._property_id + + @id.setter + def id(self, value): + if value is None: + self._property_id = None + return + + self.assert_isinstance(value, "id", six.string_types) + self._property_id = value + + @schema_property('created') + def created(self): + return self._property_created + + @created.setter + def created(self, value): + if value is None: + self._property_created = None + return + + self.assert_isinstance(value, "created", (bool,)) + self._property_created = value + + @schema_property('updated') + def updated(self): + return self._property_updated + + @updated.setter + def updated(self, value): + if value is None: + self._property_updated = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "updated", six.integer_types) + self._property_updated = value + + @schema_property('fields') + def fields(self): + return self._property_fields + + @fields.setter + def fields(self, value): + if value is None: + self._property_fields = None + return + + self.assert_isinstance(value, "fields", (dict,)) + self._property_fields = value + + +response_mapping = { + GetByIdRequest: GetByIdResponse, + GetByTaskIdRequest: GetByTaskIdResponse, + GetAllRequest: GetAllResponse, + UpdateForTaskRequest: UpdateForTaskResponse, + CreateRequest: CreateResponse, + EditRequest: EditResponse, + UpdateRequest: UpdateResponse, + SetReadyRequest: SetReadyResponse, + DeleteRequest: DeleteResponse, +} diff --git a/trains/backend_api/services/v2_1/news.py b/trains/backend_api/services/v2_1/news.py new file mode 100644 index 00000000..34c22136 --- /dev/null +++ b/trains/backend_api/services/v2_1/news.py @@ -0,0 +1,70 @@ +""" +news service + +This service provides platform news. +""" +import six +import types +from datetime import datetime +import enum + +from dateutil.parser import parse as parse_datetime + +from ....backend_api.session import Request, BatchRequest, Response, DataModel, NonStrictDataModel, CompoundRequest, schema_property, StringEnum + + +class GetRequest(Request): + """ + Gets latest news link + + """ + + _service = "news" + _action = "get" + _version = "1.5" + _schema = {'definitions': {}, 'properties': {}, 'type': 'object'} + + +class GetResponse(Response): + """ + Response of news.get endpoint. + + :param url: URL to news html file + :type url: str + """ + _service = "news" + _action = "get" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'url': { + 'description': 'URL to news html file', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, url=None, **kwargs): + super(GetResponse, self).__init__(**kwargs) + self.url = url + + @schema_property('url') + def url(self): + return self._property_url + + @url.setter + def url(self, value): + if value is None: + self._property_url = None + return + + self.assert_isinstance(value, "url", six.string_types) + self._property_url = value + + +response_mapping = { + GetRequest: GetResponse, +} diff --git a/trains/backend_api/services/v2_1/projects.py b/trains/backend_api/services/v2_1/projects.py new file mode 100644 index 00000000..f5ffb980 --- /dev/null +++ b/trains/backend_api/services/v2_1/projects.py @@ -0,0 +1,1847 @@ +""" +projects service + +Provides support for defining Projects containing Tasks, Models and Dataset Versions. +""" +import six +import types +from datetime import datetime +import enum + +from dateutil.parser import parse as parse_datetime + +from ....backend_api.session import Request, BatchRequest, Response, DataModel, NonStrictDataModel, CompoundRequest, schema_property, StringEnum + + +class MultiFieldPatternData(NonStrictDataModel): + """ + :param pattern: Pattern string (regex) + :type pattern: str + :param fields: List of field names + :type fields: Sequence[str] + """ + _schema = { + 'properties': { + 'fields': { + 'description': 'List of field names', + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'pattern': { + 'description': 'Pattern string (regex)', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, pattern=None, fields=None, **kwargs): + super(MultiFieldPatternData, self).__init__(**kwargs) + self.pattern = pattern + self.fields = fields + + @schema_property('pattern') + def pattern(self): + return self._property_pattern + + @pattern.setter + def pattern(self, value): + if value is None: + self._property_pattern = None + return + + self.assert_isinstance(value, "pattern", six.string_types) + self._property_pattern = value + + @schema_property('fields') + def fields(self): + return self._property_fields + + @fields.setter + def fields(self, value): + if value is None: + self._property_fields = None + return + + self.assert_isinstance(value, "fields", (list, tuple)) + + self.assert_isinstance(value, "fields", six.string_types, is_array=True) + self._property_fields = value + + +class ProjectTagsEnum(StringEnum): + archived = "archived" + public = "public" + default = "default" + + +class Project(NonStrictDataModel): + """ + :param id: Project id + :type id: str + :param name: Project name + :type name: str + :param description: Project description + :type description: str + :param user: Associated user id + :type user: str + :param company: Company id + :type company: str + :param created: Creation time + :type created: datetime.datetime + :param tags: Tags + :type tags: Sequence[ProjectTagsEnum] + :param default_output_destination: The default output destination URL for new + tasks under this project + :type default_output_destination: str + :param last_update: Last project update time. Reflects the last time the + project metadata was changed or a task in this project has changed status + :type last_update: datetime.datetime + """ + _schema = { + 'properties': { + 'company': {'description': 'Company id', 'type': ['string', 'null']}, + 'created': { + 'description': 'Creation time', + 'format': 'date-time', + 'type': ['string', 'null'], + }, + 'default_output_destination': { + 'description': 'The default output destination URL for new tasks under this project', + 'type': ['string', 'null'], + }, + 'description': { + 'description': 'Project description', + 'type': ['string', 'null'], + }, + 'id': {'description': 'Project id', 'type': ['string', 'null']}, + 'last_update': { + 'description': 'Last project update time. Reflects the last time the project metadata was changed or a task in this project has changed status', + 'format': 'date-time', + 'type': ['string', 'null'], + }, + 'name': {'description': 'Project name', 'type': ['string', 'null']}, + 'tags': { + 'description': 'Tags', + 'items': {'$ref': '#/definitions/project_tags_enum'}, + 'type': ['array', 'null'], + }, + 'user': { + 'description': 'Associated user id', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, id=None, name=None, description=None, user=None, company=None, created=None, tags=None, default_output_destination=None, last_update=None, **kwargs): + super(Project, self).__init__(**kwargs) + self.id = id + self.name = name + self.description = description + self.user = user + self.company = company + self.created = created + self.tags = tags + self.default_output_destination = default_output_destination + self.last_update = last_update + + @schema_property('id') + def id(self): + return self._property_id + + @id.setter + def id(self, value): + if value is None: + self._property_id = None + return + + self.assert_isinstance(value, "id", six.string_types) + self._property_id = value + + @schema_property('name') + def name(self): + return self._property_name + + @name.setter + def name(self, value): + if value is None: + self._property_name = None + return + + self.assert_isinstance(value, "name", six.string_types) + self._property_name = value + + @schema_property('description') + def description(self): + return self._property_description + + @description.setter + def description(self, value): + if value is None: + self._property_description = None + return + + self.assert_isinstance(value, "description", six.string_types) + self._property_description = value + + @schema_property('user') + def user(self): + return self._property_user + + @user.setter + def user(self, value): + if value is None: + self._property_user = None + return + + self.assert_isinstance(value, "user", six.string_types) + self._property_user = value + + @schema_property('company') + def company(self): + return self._property_company + + @company.setter + def company(self, value): + if value is None: + self._property_company = None + return + + self.assert_isinstance(value, "company", six.string_types) + self._property_company = value + + @schema_property('created') + def created(self): + return self._property_created + + @created.setter + def created(self, value): + if value is None: + self._property_created = None + return + + self.assert_isinstance(value, "created", six.string_types + (datetime,)) + if not isinstance(value, datetime): + value = parse_datetime(value) + self._property_created = value + + @schema_property('tags') + def tags(self): + return self._property_tags + + @tags.setter + def tags(self, value): + if value is None: + self._property_tags = None + return + + self.assert_isinstance(value, "tags", (list, tuple)) + if any(isinstance(v, six.string_types) for v in value): + value = [ProjectTagsEnum(v) if isinstance(v, six.string_types) else v for v in value] + else: + self.assert_isinstance(value, "tags", ProjectTagsEnum, is_array=True) + self._property_tags = value + + @schema_property('default_output_destination') + def default_output_destination(self): + return self._property_default_output_destination + + @default_output_destination.setter + def default_output_destination(self, value): + if value is None: + self._property_default_output_destination = None + return + + self.assert_isinstance(value, "default_output_destination", six.string_types) + self._property_default_output_destination = value + + @schema_property('last_update') + def last_update(self): + return self._property_last_update + + @last_update.setter + def last_update(self, value): + if value is None: + self._property_last_update = None + return + + self.assert_isinstance(value, "last_update", six.string_types + (datetime,)) + if not isinstance(value, datetime): + value = parse_datetime(value) + self._property_last_update = value + + +class StatsStatusCount(NonStrictDataModel): + """ + :param total_runtime: Total run time of all tasks in project (in seconds) + :type total_runtime: int + :param status_count: Status counts + :type status_count: dict + """ + _schema = { + 'properties': { + 'status_count': { + 'description': 'Status counts', + 'properties': { + 'closed': { + 'description': "Number of 'closed' tasks in project", + 'type': 'integer', + }, + 'created': { + 'description': "Number of 'created' tasks in project", + 'type': 'integer', + }, + 'failed': { + 'description': "Number of 'failed' tasks in project", + 'type': 'integer', + }, + 'in_progress': { + 'description': "Number of 'in_progress' tasks in project", + 'type': 'integer', + }, + 'published': { + 'description': "Number of 'published' tasks in project", + 'type': 'integer', + }, + 'queued': { + 'description': "Number of 'queued' tasks in project", + 'type': 'integer', + }, + 'stopped': { + 'description': "Number of 'stopped' tasks in project", + 'type': 'integer', + }, + 'unknown': { + 'description': "Number of 'unknown' tasks in project", + 'type': 'integer', + }, + }, + 'type': ['object', 'null'], + }, + 'total_runtime': { + 'description': 'Total run time of all tasks in project (in seconds)', + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, total_runtime=None, status_count=None, **kwargs): + super(StatsStatusCount, self).__init__(**kwargs) + self.total_runtime = total_runtime + self.status_count = status_count + + @schema_property('total_runtime') + def total_runtime(self): + return self._property_total_runtime + + @total_runtime.setter + def total_runtime(self, value): + if value is None: + self._property_total_runtime = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "total_runtime", six.integer_types) + self._property_total_runtime = value + + @schema_property('status_count') + def status_count(self): + return self._property_status_count + + @status_count.setter + def status_count(self, value): + if value is None: + self._property_status_count = None + return + + self.assert_isinstance(value, "status_count", (dict,)) + self._property_status_count = value + + +class Stats(NonStrictDataModel): + """ + :param active: Stats for active tasks + :type active: StatsStatusCount + :param archived: Stats for archived tasks + :type archived: StatsStatusCount + """ + _schema = { + 'properties': { + 'active': { + 'description': 'Stats for active tasks', + 'oneOf': [ + {'$ref': '#/definitions/stats_status_count'}, + {'type': 'null'}, + ], + }, + 'archived': { + 'description': 'Stats for archived tasks', + 'oneOf': [ + {'$ref': '#/definitions/stats_status_count'}, + {'type': 'null'}, + ], + }, + }, + 'type': 'object', + } + def __init__( + self, active=None, archived=None, **kwargs): + super(Stats, self).__init__(**kwargs) + self.active = active + self.archived = archived + + @schema_property('active') + def active(self): + return self._property_active + + @active.setter + def active(self, value): + if value is None: + self._property_active = None + return + if isinstance(value, dict): + value = StatsStatusCount.from_dict(value) + else: + self.assert_isinstance(value, "active", StatsStatusCount) + self._property_active = value + + @schema_property('archived') + def archived(self): + return self._property_archived + + @archived.setter + def archived(self, value): + if value is None: + self._property_archived = None + return + if isinstance(value, dict): + value = StatsStatusCount.from_dict(value) + else: + self.assert_isinstance(value, "archived", StatsStatusCount) + self._property_archived = value + + +class ProjectsGetAllResponseSingle(NonStrictDataModel): + """ + :param id: Project id + :type id: str + :param name: Project name + :type name: str + :param description: Project description + :type description: str + :param user: Associated user id + :type user: str + :param company: Company id + :type company: str + :param created: Creation time + :type created: datetime.datetime + :param tags: Tags + :type tags: Sequence[ProjectTagsEnum] + :param default_output_destination: The default output destination URL for new + tasks under this project + :type default_output_destination: str + :param stats: Additional project stats + :type stats: Stats + """ + _schema = { + 'properties': { + 'company': {'description': 'Company id', 'type': ['string', 'null']}, + 'created': { + 'description': 'Creation time', + 'format': 'date-time', + 'type': ['string', 'null'], + }, + 'default_output_destination': { + 'description': 'The default output destination URL for new tasks under this project', + 'type': ['string', 'null'], + }, + 'description': { + 'description': 'Project description', + 'type': ['string', 'null'], + }, + 'id': {'description': 'Project id', 'type': ['string', 'null']}, + 'name': {'description': 'Project name', 'type': ['string', 'null']}, + 'stats': { + 'description': 'Additional project stats', + 'oneOf': [{'$ref': '#/definitions/stats'}, {'type': 'null'}], + }, + 'tags': { + 'description': 'Tags', + 'items': {'$ref': '#/definitions/project_tags_enum'}, + 'type': ['array', 'null'], + }, + 'user': { + 'description': 'Associated user id', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, id=None, name=None, description=None, user=None, company=None, created=None, tags=None, default_output_destination=None, stats=None, **kwargs): + super(ProjectsGetAllResponseSingle, self).__init__(**kwargs) + self.id = id + self.name = name + self.description = description + self.user = user + self.company = company + self.created = created + self.tags = tags + self.default_output_destination = default_output_destination + self.stats = stats + + @schema_property('id') + def id(self): + return self._property_id + + @id.setter + def id(self, value): + if value is None: + self._property_id = None + return + + self.assert_isinstance(value, "id", six.string_types) + self._property_id = value + + @schema_property('name') + def name(self): + return self._property_name + + @name.setter + def name(self, value): + if value is None: + self._property_name = None + return + + self.assert_isinstance(value, "name", six.string_types) + self._property_name = value + + @schema_property('description') + def description(self): + return self._property_description + + @description.setter + def description(self, value): + if value is None: + self._property_description = None + return + + self.assert_isinstance(value, "description", six.string_types) + self._property_description = value + + @schema_property('user') + def user(self): + return self._property_user + + @user.setter + def user(self, value): + if value is None: + self._property_user = None + return + + self.assert_isinstance(value, "user", six.string_types) + self._property_user = value + + @schema_property('company') + def company(self): + return self._property_company + + @company.setter + def company(self, value): + if value is None: + self._property_company = None + return + + self.assert_isinstance(value, "company", six.string_types) + self._property_company = value + + @schema_property('created') + def created(self): + return self._property_created + + @created.setter + def created(self, value): + if value is None: + self._property_created = None + return + + self.assert_isinstance(value, "created", six.string_types + (datetime,)) + if not isinstance(value, datetime): + value = parse_datetime(value) + self._property_created = value + + @schema_property('tags') + def tags(self): + return self._property_tags + + @tags.setter + def tags(self, value): + if value is None: + self._property_tags = None + return + + self.assert_isinstance(value, "tags", (list, tuple)) + if any(isinstance(v, six.string_types) for v in value): + value = [ProjectTagsEnum(v) if isinstance(v, six.string_types) else v for v in value] + else: + self.assert_isinstance(value, "tags", ProjectTagsEnum, is_array=True) + self._property_tags = value + + @schema_property('default_output_destination') + def default_output_destination(self): + return self._property_default_output_destination + + @default_output_destination.setter + def default_output_destination(self, value): + if value is None: + self._property_default_output_destination = None + return + + self.assert_isinstance(value, "default_output_destination", six.string_types) + self._property_default_output_destination = value + + @schema_property('stats') + def stats(self): + return self._property_stats + + @stats.setter + def stats(self, value): + if value is None: + self._property_stats = None + return + if isinstance(value, dict): + value = Stats.from_dict(value) + else: + self.assert_isinstance(value, "stats", Stats) + self._property_stats = value + + +class MetricVariantResult(NonStrictDataModel): + """ + :param metric: Metric name + :type metric: str + :param metric_hash: Metric name hash. Used instead of the metric name when + categorizing last metrics events in task objects. + :type metric_hash: str + :param variant: Variant name + :type variant: str + :param variant_hash: Variant name hash. Used instead of the variant name when + categorizing last metrics events in task objects. + :type variant_hash: str + """ + _schema = { + 'properties': { + 'metric': {'description': 'Metric name', 'type': ['string', 'null']}, + 'metric_hash': { + 'description': 'Metric name hash. Used instead of the metric name when categorizing\n last metrics events in task objects.', + 'type': ['string', 'null'], + }, + 'variant': {'description': 'Variant name', 'type': ['string', 'null']}, + 'variant_hash': { + 'description': 'Variant name hash. Used instead of the variant name when categorizing\n last metrics events in task objects.', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, metric=None, metric_hash=None, variant=None, variant_hash=None, **kwargs): + super(MetricVariantResult, self).__init__(**kwargs) + self.metric = metric + self.metric_hash = metric_hash + self.variant = variant + self.variant_hash = variant_hash + + @schema_property('metric') + def metric(self): + return self._property_metric + + @metric.setter + def metric(self, value): + if value is None: + self._property_metric = None + return + + self.assert_isinstance(value, "metric", six.string_types) + self._property_metric = value + + @schema_property('metric_hash') + def metric_hash(self): + return self._property_metric_hash + + @metric_hash.setter + def metric_hash(self, value): + if value is None: + self._property_metric_hash = None + return + + self.assert_isinstance(value, "metric_hash", six.string_types) + self._property_metric_hash = value + + @schema_property('variant') + def variant(self): + return self._property_variant + + @variant.setter + def variant(self, value): + if value is None: + self._property_variant = None + return + + self.assert_isinstance(value, "variant", six.string_types) + self._property_variant = value + + @schema_property('variant_hash') + def variant_hash(self): + return self._property_variant_hash + + @variant_hash.setter + def variant_hash(self, value): + if value is None: + self._property_variant_hash = None + return + + self.assert_isinstance(value, "variant_hash", six.string_types) + self._property_variant_hash = value + + +class CreateRequest(Request): + """ + Create a new project + + :param name: Project name Unique within the company. + :type name: str + :param description: Project description. + :type description: str + :param tags: Tags + :type tags: Sequence[ProjectTagsEnum] + :param default_output_destination: The default output destination URL for new + tasks under this project + :type default_output_destination: str + """ + + _service = "projects" + _action = "create" + _version = "1.5" + _schema = { + 'definitions': { + 'project_tags_enum': {'enum': ['archived', 'public', 'default'], 'type': 'string'}, + }, + 'properties': { + 'default_output_destination': { + 'description': 'The default output destination URL for new tasks under this project', + 'type': 'string', + }, + 'description': { + 'description': 'Project description. ', + 'type': 'string', + }, + 'name': { + 'description': 'Project name Unique within the company.', + 'type': 'string', + }, + 'tags': { + 'description': 'Tags', + 'items': {'$ref': '#/definitions/project_tags_enum'}, + 'type': 'array', + }, + }, + 'required': ['name', 'description'], + 'type': 'object', + } + def __init__( + self, name, description, tags=None, default_output_destination=None, **kwargs): + super(CreateRequest, self).__init__(**kwargs) + self.name = name + self.description = description + self.tags = tags + self.default_output_destination = default_output_destination + + @schema_property('name') + def name(self): + return self._property_name + + @name.setter + def name(self, value): + if value is None: + self._property_name = None + return + + self.assert_isinstance(value, "name", six.string_types) + self._property_name = value + + @schema_property('description') + def description(self): + return self._property_description + + @description.setter + def description(self, value): + if value is None: + self._property_description = None + return + + self.assert_isinstance(value, "description", six.string_types) + self._property_description = value + + @schema_property('tags') + def tags(self): + return self._property_tags + + @tags.setter + def tags(self, value): + if value is None: + self._property_tags = None + return + + self.assert_isinstance(value, "tags", (list, tuple)) + if any(isinstance(v, six.string_types) for v in value): + value = [ProjectTagsEnum(v) if isinstance(v, six.string_types) else v for v in value] + else: + self.assert_isinstance(value, "tags", ProjectTagsEnum, is_array=True) + self._property_tags = value + + @schema_property('default_output_destination') + def default_output_destination(self): + return self._property_default_output_destination + + @default_output_destination.setter + def default_output_destination(self, value): + if value is None: + self._property_default_output_destination = None + return + + self.assert_isinstance(value, "default_output_destination", six.string_types) + self._property_default_output_destination = value + + +class CreateResponse(Response): + """ + Response of projects.create endpoint. + + :param id: Project id + :type id: str + """ + _service = "projects" + _action = "create" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'id': {'description': 'Project id', 'type': ['string', 'null']}, + }, + 'type': 'object', + } + def __init__( + self, id=None, **kwargs): + super(CreateResponse, self).__init__(**kwargs) + self.id = id + + @schema_property('id') + def id(self): + return self._property_id + + @id.setter + def id(self, value): + if value is None: + self._property_id = None + return + + self.assert_isinstance(value, "id", six.string_types) + self._property_id = value + + +class DeleteRequest(Request): + """ + Deletes a project + + :param project: Project id + :type project: str + :param force: If not true, fails if project has tasks. If true, and project has + tasks, they will be unassigned + :type force: bool + """ + + _service = "projects" + _action = "delete" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': { + 'force': { + 'default': False, + 'description': 'If not true, fails if project has tasks.\n If true, and project has tasks, they will be unassigned', + 'type': 'boolean', + }, + 'project': {'description': 'Project id', 'type': 'string'}, + }, + 'required': ['project'], + 'type': 'object', + } + def __init__( + self, project, force=False, **kwargs): + super(DeleteRequest, self).__init__(**kwargs) + self.project = project + self.force = force + + @schema_property('project') + def project(self): + return self._property_project + + @project.setter + def project(self, value): + if value is None: + self._property_project = None + return + + self.assert_isinstance(value, "project", six.string_types) + self._property_project = value + + @schema_property('force') + def force(self): + return self._property_force + + @force.setter + def force(self, value): + if value is None: + self._property_force = None + return + + self.assert_isinstance(value, "force", (bool,)) + self._property_force = value + + +class DeleteResponse(Response): + """ + Response of projects.delete endpoint. + + :param deleted: Number of projects deleted (0 or 1) + :type deleted: int + :param disassociated_tasks: Number of tasks disassociated from the deleted + project + :type disassociated_tasks: int + """ + _service = "projects" + _action = "delete" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'deleted': { + 'description': 'Number of projects deleted (0 or 1)', + 'type': ['integer', 'null'], + }, + 'disassociated_tasks': { + 'description': 'Number of tasks disassociated from the deleted project', + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, deleted=None, disassociated_tasks=None, **kwargs): + super(DeleteResponse, self).__init__(**kwargs) + self.deleted = deleted + self.disassociated_tasks = disassociated_tasks + + @schema_property('deleted') + def deleted(self): + return self._property_deleted + + @deleted.setter + def deleted(self, value): + if value is None: + self._property_deleted = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "deleted", six.integer_types) + self._property_deleted = value + + @schema_property('disassociated_tasks') + def disassociated_tasks(self): + return self._property_disassociated_tasks + + @disassociated_tasks.setter + def disassociated_tasks(self, value): + if value is None: + self._property_disassociated_tasks = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "disassociated_tasks", six.integer_types) + self._property_disassociated_tasks = value + + +class GetAllRequest(Request): + """ + Get all the company's projects and all public projects + + :param id: List of IDs to filter by + :type id: Sequence[str] + :param name: Get only projects whose name matches this pattern (python regular + expression syntax) + :type name: str + :param description: Get only projects whose description matches this pattern + (python regular expression syntax) + :type description: str + :param tags: Tags list used to filter results. Prepend '-' to tag name to + indicate exclusion + :type tags: Sequence[str] + :param order_by: List of field names to order by. When search_text is used, + '@text_score' can be used as a field representing the text score of returned + documents. Use '-' prefix to specify descending order. Optional, recommended + when using page + :type order_by: Sequence[str] + :param page: Page number, returns a specific page out of the resulting list of + dataviews + :type page: int + :param page_size: Page size, specifies the number of results returned in each + page (last page may contain fewer results) + :type page_size: int + :param search_text: Free text search query + :type search_text: str + :param only_fields: List of document's field names (nesting is supported using + '.', e.g. execution.model_labels). If provided, this list defines the query's + projection (only these fields will be returned for each result entry) + :type only_fields: Sequence[str] + :param _all_: Multi-field pattern condition (all fields match pattern) + :type _all_: MultiFieldPatternData + :param _any_: Multi-field pattern condition (any field matches pattern) + :type _any_: MultiFieldPatternData + """ + + _service = "projects" + _action = "get_all" + _version = "1.5" + _schema = { + 'definitions': { + 'multi_field_pattern_data': { + 'properties': { + 'fields': { + 'description': 'List of field names', + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'pattern': { + 'description': 'Pattern string (regex)', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + }, + }, + 'properties': { + '_all_': { + 'description': 'Multi-field pattern condition (all fields match pattern)', + 'oneOf': [ + {'$ref': '#/definitions/multi_field_pattern_data'}, + {'type': 'null'}, + ], + }, + '_any_': { + 'description': 'Multi-field pattern condition (any field matches pattern)', + 'oneOf': [ + {'$ref': '#/definitions/multi_field_pattern_data'}, + {'type': 'null'}, + ], + }, + 'description': { + 'description': 'Get only projects whose description matches this pattern (python regular expression syntax)', + 'type': ['string', 'null'], + }, + 'id': { + 'description': 'List of IDs to filter by', + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'name': { + 'description': 'Get only projects whose name matches this pattern (python regular expression syntax)', + 'type': ['string', 'null'], + }, + 'only_fields': { + 'description': "List of document's field names (nesting is supported using '.', e.g. execution.model_labels). If provided, this list defines the query's projection (only these fields will be returned for each result entry)", + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'order_by': { + 'description': "List of field names to order by. When search_text is used, '@text_score' can be used as a field representing the text score of returned documents. Use '-' prefix to specify descending order. Optional, recommended when using page", + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'page': { + 'description': 'Page number, returns a specific page out of the resulting list of dataviews', + 'minimum': 0, + 'type': ['integer', 'null'], + }, + 'page_size': { + 'description': 'Page size, specifies the number of results returned in each page (last page may contain fewer results)', + 'minimum': 1, + 'type': ['integer', 'null'], + }, + 'search_text': { + 'description': 'Free text search query', + 'type': ['string', 'null'], + }, + 'tags': { + 'description': "Tags list used to filter results. Prepend '-' to tag name to indicate exclusion", + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, id=None, name=None, description=None, tags=None, order_by=None, page=None, page_size=None, search_text=None, only_fields=None, _all_=None, _any_=None, **kwargs): + super(GetAllRequest, self).__init__(**kwargs) + self.id = id + self.name = name + self.description = description + self.tags = tags + self.order_by = order_by + self.page = page + self.page_size = page_size + self.search_text = search_text + self.only_fields = only_fields + self._all_ = _all_ + self._any_ = _any_ + + @schema_property('id') + def id(self): + return self._property_id + + @id.setter + def id(self, value): + if value is None: + self._property_id = None + return + + self.assert_isinstance(value, "id", (list, tuple)) + + self.assert_isinstance(value, "id", six.string_types, is_array=True) + self._property_id = value + + @schema_property('name') + def name(self): + return self._property_name + + @name.setter + def name(self, value): + if value is None: + self._property_name = None + return + + self.assert_isinstance(value, "name", six.string_types) + self._property_name = value + + @schema_property('description') + def description(self): + return self._property_description + + @description.setter + def description(self, value): + if value is None: + self._property_description = None + return + + self.assert_isinstance(value, "description", six.string_types) + self._property_description = value + + @schema_property('tags') + def tags(self): + return self._property_tags + + @tags.setter + def tags(self, value): + if value is None: + self._property_tags = None + return + + self.assert_isinstance(value, "tags", (list, tuple)) + + self.assert_isinstance(value, "tags", six.string_types, is_array=True) + self._property_tags = value + + @schema_property('order_by') + def order_by(self): + return self._property_order_by + + @order_by.setter + def order_by(self, value): + if value is None: + self._property_order_by = None + return + + self.assert_isinstance(value, "order_by", (list, tuple)) + + self.assert_isinstance(value, "order_by", six.string_types, is_array=True) + self._property_order_by = value + + @schema_property('page') + def page(self): + return self._property_page + + @page.setter + def page(self, value): + if value is None: + self._property_page = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "page", six.integer_types) + self._property_page = value + + @schema_property('page_size') + def page_size(self): + return self._property_page_size + + @page_size.setter + def page_size(self, value): + if value is None: + self._property_page_size = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "page_size", six.integer_types) + self._property_page_size = value + + @schema_property('search_text') + def search_text(self): + return self._property_search_text + + @search_text.setter + def search_text(self, value): + if value is None: + self._property_search_text = None + return + + self.assert_isinstance(value, "search_text", six.string_types) + self._property_search_text = value + + @schema_property('only_fields') + def only_fields(self): + return self._property_only_fields + + @only_fields.setter + def only_fields(self, value): + if value is None: + self._property_only_fields = None + return + + self.assert_isinstance(value, "only_fields", (list, tuple)) + + self.assert_isinstance(value, "only_fields", six.string_types, is_array=True) + self._property_only_fields = value + + @schema_property('_all_') + def _all_(self): + return self._property__all_ + + @_all_.setter + def _all_(self, value): + if value is None: + self._property__all_ = None + return + if isinstance(value, dict): + value = MultiFieldPatternData.from_dict(value) + else: + self.assert_isinstance(value, "_all_", MultiFieldPatternData) + self._property__all_ = value + + @schema_property('_any_') + def _any_(self): + return self._property__any_ + + @_any_.setter + def _any_(self, value): + if value is None: + self._property__any_ = None + return + if isinstance(value, dict): + value = MultiFieldPatternData.from_dict(value) + else: + self.assert_isinstance(value, "_any_", MultiFieldPatternData) + self._property__any_ = value + + +class GetAllResponse(Response): + """ + Response of projects.get_all endpoint. + + :param projects: Projects list + :type projects: Sequence[ProjectsGetAllResponseSingle] + """ + _service = "projects" + _action = "get_all" + _version = "1.5" + + _schema = { + 'definitions': { + 'project_tags_enum': {'enum': ['archived', 'public', 'default'], 'type': 'string'}, + 'projects_get_all_response_single': { + 'properties': { + 'company': { + 'description': 'Company id', + 'type': ['string', 'null'], + }, + 'created': { + 'description': 'Creation time', + 'format': 'date-time', + 'type': ['string', 'null'], + }, + 'default_output_destination': { + 'description': 'The default output destination URL for new tasks under this project', + 'type': ['string', 'null'], + }, + 'description': { + 'description': 'Project description', + 'type': ['string', 'null'], + }, + 'id': {'description': 'Project id', 'type': ['string', 'null']}, + 'name': { + 'description': 'Project name', + 'type': ['string', 'null'], + }, + 'stats': { + 'description': 'Additional project stats', + 'oneOf': [ + {'$ref': '#/definitions/stats'}, + {'type': 'null'}, + ], + }, + 'tags': { + 'description': 'Tags', + 'items': {'$ref': '#/definitions/project_tags_enum'}, + 'type': ['array', 'null'], + }, + 'user': { + 'description': 'Associated user id', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + }, + 'stats': { + 'properties': { + 'active': { + 'description': 'Stats for active tasks', + 'oneOf': [ + {'$ref': '#/definitions/stats_status_count'}, + {'type': 'null'}, + ], + }, + 'archived': { + 'description': 'Stats for archived tasks', + 'oneOf': [ + {'$ref': '#/definitions/stats_status_count'}, + {'type': 'null'}, + ], + }, + }, + 'type': 'object', + }, + 'stats_status_count': { + 'properties': { + 'status_count': { + 'description': 'Status counts', + 'properties': { + 'closed': { + 'description': "Number of 'closed' tasks in project", + 'type': 'integer', + }, + 'created': { + 'description': "Number of 'created' tasks in project", + 'type': 'integer', + }, + 'failed': { + 'description': "Number of 'failed' tasks in project", + 'type': 'integer', + }, + 'in_progress': { + 'description': "Number of 'in_progress' tasks in project", + 'type': 'integer', + }, + 'published': { + 'description': "Number of 'published' tasks in project", + 'type': 'integer', + }, + 'queued': { + 'description': "Number of 'queued' tasks in project", + 'type': 'integer', + }, + 'stopped': { + 'description': "Number of 'stopped' tasks in project", + 'type': 'integer', + }, + 'unknown': { + 'description': "Number of 'unknown' tasks in project", + 'type': 'integer', + }, + }, + 'type': ['object', 'null'], + }, + 'total_runtime': { + 'description': 'Total run time of all tasks in project (in seconds)', + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + }, + }, + 'properties': { + 'projects': { + 'description': 'Projects list', + 'items': { + '$ref': '#/definitions/projects_get_all_response_single', + }, + 'type': ['array', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, projects=None, **kwargs): + super(GetAllResponse, self).__init__(**kwargs) + self.projects = projects + + @schema_property('projects') + def projects(self): + return self._property_projects + + @projects.setter + def projects(self, value): + if value is None: + self._property_projects = None + return + + self.assert_isinstance(value, "projects", (list, tuple)) + if any(isinstance(v, dict) for v in value): + value = [ProjectsGetAllResponseSingle.from_dict(v) if isinstance(v, dict) else v for v in value] + else: + self.assert_isinstance(value, "projects", ProjectsGetAllResponseSingle, is_array=True) + self._property_projects = value + + +class GetByIdRequest(Request): + """ + :param project: Project id + :type project: str + """ + + _service = "projects" + _action = "get_by_id" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': {'project': {'description': 'Project id', 'type': 'string'}}, + 'required': ['project'], + 'type': 'object', + } + def __init__( + self, project, **kwargs): + super(GetByIdRequest, self).__init__(**kwargs) + self.project = project + + @schema_property('project') + def project(self): + return self._property_project + + @project.setter + def project(self, value): + if value is None: + self._property_project = None + return + + self.assert_isinstance(value, "project", six.string_types) + self._property_project = value + + +class GetByIdResponse(Response): + """ + Response of projects.get_by_id endpoint. + + :param project: Project info + :type project: Project + """ + _service = "projects" + _action = "get_by_id" + _version = "1.5" + + _schema = { + 'definitions': { + 'project': { + 'properties': { + 'company': { + 'description': 'Company id', + 'type': ['string', 'null'], + }, + 'created': { + 'description': 'Creation time', + 'format': 'date-time', + 'type': ['string', 'null'], + }, + 'default_output_destination': { + 'description': 'The default output destination URL for new tasks under this project', + 'type': ['string', 'null'], + }, + 'description': { + 'description': 'Project description', + 'type': ['string', 'null'], + }, + 'id': {'description': 'Project id', 'type': ['string', 'null']}, + 'last_update': { + 'description': 'Last project update time. Reflects the last time the project metadata was changed or a task in this project has changed status', + 'format': 'date-time', + 'type': ['string', 'null'], + }, + 'name': { + 'description': 'Project name', + 'type': ['string', 'null'], + }, + 'tags': { + 'description': 'Tags', + 'items': {'$ref': '#/definitions/project_tags_enum'}, + 'type': ['array', 'null'], + }, + 'user': { + 'description': 'Associated user id', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + }, + 'project_tags_enum': {'enum': ['archived', 'public', 'default'], 'type': 'string'}, + }, + 'properties': { + 'project': { + 'description': 'Project info', + 'oneOf': [{'$ref': '#/definitions/project'}, {'type': 'null'}], + }, + }, + 'type': 'object', + } + def __init__( + self, project=None, **kwargs): + super(GetByIdResponse, self).__init__(**kwargs) + self.project = project + + @schema_property('project') + def project(self): + return self._property_project + + @project.setter + def project(self, value): + if value is None: + self._property_project = None + return + if isinstance(value, dict): + value = Project.from_dict(value) + else: + self.assert_isinstance(value, "project", Project) + self._property_project = value + + +class GetUniqueMetricVariantsRequest(Request): + """ + Get all metric/variant pairs reported for tasks in a specific project. + If no project is specified, metrics/variant paris reported for all tasks will be returned. + If the project does not exist, an empty list will be returned. + + :param project: Project ID + :type project: str + """ + + _service = "projects" + _action = "get_unique_metric_variants" + _version = "1.6" + _schema = { + 'definitions': {}, + 'properties': { + 'project': {'description': 'Project ID', 'type': ['string', 'null']}, + }, + 'type': 'object', + } + def __init__( + self, project=None, **kwargs): + super(GetUniqueMetricVariantsRequest, self).__init__(**kwargs) + self.project = project + + @schema_property('project') + def project(self): + return self._property_project + + @project.setter + def project(self, value): + if value is None: + self._property_project = None + return + + self.assert_isinstance(value, "project", six.string_types) + self._property_project = value + + +class GetUniqueMetricVariantsResponse(Response): + """ + Response of projects.get_unique_metric_variants endpoint. + + :param metrics: A list of metric variants reported for tasks in this project + :type metrics: Sequence[MetricVariantResult] + """ + _service = "projects" + _action = "get_unique_metric_variants" + _version = "1.6" + + _schema = { + 'definitions': { + 'metric_variant_result': { + 'properties': { + 'metric': { + 'description': 'Metric name', + 'type': ['string', 'null'], + }, + 'metric_hash': { + 'description': 'Metric name hash. Used instead of the metric name when categorizing\n last metrics events in task objects.', + 'type': ['string', 'null'], + }, + 'variant': { + 'description': 'Variant name', + 'type': ['string', 'null'], + }, + 'variant_hash': { + 'description': 'Variant name hash. Used instead of the variant name when categorizing\n last metrics events in task objects.', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + }, + }, + 'properties': { + 'metrics': { + 'description': 'A list of metric variants reported for tasks in this project', + 'items': {'$ref': '#/definitions/metric_variant_result'}, + 'type': ['array', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, metrics=None, **kwargs): + super(GetUniqueMetricVariantsResponse, self).__init__(**kwargs) + self.metrics = metrics + + @schema_property('metrics') + def metrics(self): + return self._property_metrics + + @metrics.setter + def metrics(self, value): + if value is None: + self._property_metrics = None + return + + self.assert_isinstance(value, "metrics", (list, tuple)) + if any(isinstance(v, dict) for v in value): + value = [MetricVariantResult.from_dict(v) if isinstance(v, dict) else v for v in value] + else: + self.assert_isinstance(value, "metrics", MetricVariantResult, is_array=True) + self._property_metrics = value + + +class UpdateRequest(Request): + """ + Update project information + + :param project: Project id + :type project: str + :param name: Project name. Unique within the company. + :type name: str + :param description: Project description + :type description: str + :param tags: Tags list + :type tags: Sequence[str] + :param default_output_destination: The default output destination URL for new + tasks under this project + :type default_output_destination: str + """ + + _service = "projects" + _action = "update" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': { + 'default_output_destination': { + 'description': 'The default output destination URL for new tasks under this project', + 'type': 'string', + }, + 'description': { + 'description': 'Project description', + 'type': 'string', + }, + 'name': { + 'description': 'Project name. Unique within the company.', + 'type': 'string', + }, + 'project': {'description': 'Project id', 'type': 'string'}, + 'tags': {'description': 'Tags list', 'items': {'type': 'string'}, 'type': 'array'}, + }, + 'required': ['project'], + 'type': 'object', + } + def __init__( + self, project, name=None, description=None, tags=None, default_output_destination=None, **kwargs): + super(UpdateRequest, self).__init__(**kwargs) + self.project = project + self.name = name + self.description = description + self.tags = tags + self.default_output_destination = default_output_destination + + @schema_property('project') + def project(self): + return self._property_project + + @project.setter + def project(self, value): + if value is None: + self._property_project = None + return + + self.assert_isinstance(value, "project", six.string_types) + self._property_project = value + + @schema_property('name') + def name(self): + return self._property_name + + @name.setter + def name(self, value): + if value is None: + self._property_name = None + return + + self.assert_isinstance(value, "name", six.string_types) + self._property_name = value + + @schema_property('description') + def description(self): + return self._property_description + + @description.setter + def description(self, value): + if value is None: + self._property_description = None + return + + self.assert_isinstance(value, "description", six.string_types) + self._property_description = value + + @schema_property('tags') + def tags(self): + return self._property_tags + + @tags.setter + def tags(self, value): + if value is None: + self._property_tags = None + return + + self.assert_isinstance(value, "tags", (list, tuple)) + + self.assert_isinstance(value, "tags", six.string_types, is_array=True) + self._property_tags = value + + @schema_property('default_output_destination') + def default_output_destination(self): + return self._property_default_output_destination + + @default_output_destination.setter + def default_output_destination(self, value): + if value is None: + self._property_default_output_destination = None + return + + self.assert_isinstance(value, "default_output_destination", six.string_types) + self._property_default_output_destination = value + + +class UpdateResponse(Response): + """ + Response of projects.update endpoint. + + :param updated: Number of projects updated (0 or 1) + :type updated: int + :param fields: Updated fields names and values + :type fields: dict + """ + _service = "projects" + _action = "update" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'fields': { + 'additionalProperties': True, + 'description': 'Updated fields names and values', + 'type': ['object', 'null'], + }, + 'updated': { + 'description': 'Number of projects updated (0 or 1)', + 'enum': [0, 1], + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, updated=None, fields=None, **kwargs): + super(UpdateResponse, self).__init__(**kwargs) + self.updated = updated + self.fields = fields + + @schema_property('updated') + def updated(self): + return self._property_updated + + @updated.setter + def updated(self, value): + if value is None: + self._property_updated = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "updated", six.integer_types) + self._property_updated = value + + @schema_property('fields') + def fields(self): + return self._property_fields + + @fields.setter + def fields(self, value): + if value is None: + self._property_fields = None + return + + self.assert_isinstance(value, "fields", (dict,)) + self._property_fields = value + + +response_mapping = { + CreateRequest: CreateResponse, + GetByIdRequest: GetByIdResponse, + GetAllRequest: GetAllResponse, + UpdateRequest: UpdateResponse, + DeleteRequest: DeleteResponse, + GetUniqueMetricVariantsRequest: GetUniqueMetricVariantsResponse, +} diff --git a/trains/backend_api/services/v2_1/storage.py b/trains/backend_api/services/v2_1/storage.py new file mode 100644 index 00000000..6a63da23 --- /dev/null +++ b/trains/backend_api/services/v2_1/storage.py @@ -0,0 +1,681 @@ +""" +storage service + +Provides a management API for customer-associated storage locations +""" +import six +import types +from datetime import datetime +import enum + +from dateutil.parser import parse as parse_datetime + +from ....backend_api.session import Request, BatchRequest, Response, DataModel, NonStrictDataModel, CompoundRequest, schema_property, StringEnum + + +class Credentials(NonStrictDataModel): + """ + :param access_key: Credentials access key + :type access_key: str + :param secret_key: Credentials secret key + :type secret_key: str + """ + _schema = { + 'properties': { + 'access_key': { + 'description': 'Credentials access key', + 'type': ['string', 'null'], + }, + 'secret_key': { + 'description': 'Credentials secret key', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, access_key=None, secret_key=None, **kwargs): + super(Credentials, self).__init__(**kwargs) + self.access_key = access_key + self.secret_key = secret_key + + @schema_property('access_key') + def access_key(self): + return self._property_access_key + + @access_key.setter + def access_key(self, value): + if value is None: + self._property_access_key = None + return + + self.assert_isinstance(value, "access_key", six.string_types) + self._property_access_key = value + + @schema_property('secret_key') + def secret_key(self): + return self._property_secret_key + + @secret_key.setter + def secret_key(self, value): + if value is None: + self._property_secret_key = None + return + + self.assert_isinstance(value, "secret_key", six.string_types) + self._property_secret_key = value + + +class Storage(NonStrictDataModel): + """ + :param id: Entry ID + :type id: str + :param name: Entry name + :type name: str + :param company: Company ID + :type company: str + :param created: Entry creation time + :type created: datetime.datetime + :param uri: Storage URI + :type uri: str + :param credentials: Credentials required for accessing the storage + :type credentials: Credentials + """ + _schema = { + 'properties': { + 'company': {'description': 'Company ID', 'type': ['string', 'null']}, + 'created': { + 'description': 'Entry creation time', + 'format': 'date-time', + 'type': ['string', 'null'], + }, + 'credentials': { + 'description': 'Credentials required for accessing the storage', + 'oneOf': [{'$ref': '#/definitions/credentials'}, {'type': 'null'}], + }, + 'id': {'description': 'Entry ID', 'type': ['string', 'null']}, + 'name': {'description': 'Entry name', 'type': ['string', 'null']}, + 'uri': {'description': 'Storage URI', 'type': ['string', 'null']}, + }, + 'type': 'object', + } + def __init__( + self, id=None, name=None, company=None, created=None, uri=None, credentials=None, **kwargs): + super(Storage, self).__init__(**kwargs) + self.id = id + self.name = name + self.company = company + self.created = created + self.uri = uri + self.credentials = credentials + + @schema_property('id') + def id(self): + return self._property_id + + @id.setter + def id(self, value): + if value is None: + self._property_id = None + return + + self.assert_isinstance(value, "id", six.string_types) + self._property_id = value + + @schema_property('name') + def name(self): + return self._property_name + + @name.setter + def name(self, value): + if value is None: + self._property_name = None + return + + self.assert_isinstance(value, "name", six.string_types) + self._property_name = value + + @schema_property('company') + def company(self): + return self._property_company + + @company.setter + def company(self, value): + if value is None: + self._property_company = None + return + + self.assert_isinstance(value, "company", six.string_types) + self._property_company = value + + @schema_property('created') + def created(self): + return self._property_created + + @created.setter + def created(self, value): + if value is None: + self._property_created = None + return + + self.assert_isinstance(value, "created", six.string_types + (datetime,)) + if not isinstance(value, datetime): + value = parse_datetime(value) + self._property_created = value + + @schema_property('uri') + def uri(self): + return self._property_uri + + @uri.setter + def uri(self, value): + if value is None: + self._property_uri = None + return + + self.assert_isinstance(value, "uri", six.string_types) + self._property_uri = value + + @schema_property('credentials') + def credentials(self): + return self._property_credentials + + @credentials.setter + def credentials(self, value): + if value is None: + self._property_credentials = None + return + if isinstance(value, dict): + value = Credentials.from_dict(value) + else: + self.assert_isinstance(value, "credentials", Credentials) + self._property_credentials = value + + +class CreateRequest(Request): + """ + Create a new storage entry + + :param name: Storage name + :type name: str + :param uri: Storage URI + :type uri: str + :param credentials: Credentials required for accessing the storage + :type credentials: Credentials + :param company: Company under which to add this storage. Only valid for users + with the root or system role, otherwise the calling user's company will be + used. + :type company: str + """ + + _service = "storage" + _action = "create" + _version = "1.5" + _schema = { + 'definitions': { + 'credentials': { + 'properties': { + 'access_key': { + 'description': 'Credentials access key', + 'type': ['string', 'null'], + }, + 'secret_key': { + 'description': 'Credentials secret key', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + }, + }, + 'properties': { + 'company': { + 'description': "Company under which to add this storage. Only valid for users with the root or system role, otherwise the calling user's company will be used.", + 'type': 'string', + }, + 'credentials': { + '$ref': '#/definitions/credentials', + 'description': 'Credentials required for accessing the storage', + }, + 'name': {'description': 'Storage name', 'type': ['string', 'null']}, + 'uri': {'description': 'Storage URI', 'type': 'string'}, + }, + 'required': ['uri'], + 'type': 'object', + } + def __init__( + self, uri, name=None, credentials=None, company=None, **kwargs): + super(CreateRequest, self).__init__(**kwargs) + self.name = name + self.uri = uri + self.credentials = credentials + self.company = company + + @schema_property('name') + def name(self): + return self._property_name + + @name.setter + def name(self, value): + if value is None: + self._property_name = None + return + + self.assert_isinstance(value, "name", six.string_types) + self._property_name = value + + @schema_property('uri') + def uri(self): + return self._property_uri + + @uri.setter + def uri(self, value): + if value is None: + self._property_uri = None + return + + self.assert_isinstance(value, "uri", six.string_types) + self._property_uri = value + + @schema_property('credentials') + def credentials(self): + return self._property_credentials + + @credentials.setter + def credentials(self, value): + if value is None: + self._property_credentials = None + return + if isinstance(value, dict): + value = Credentials.from_dict(value) + else: + self.assert_isinstance(value, "credentials", Credentials) + self._property_credentials = value + + @schema_property('company') + def company(self): + return self._property_company + + @company.setter + def company(self, value): + if value is None: + self._property_company = None + return + + self.assert_isinstance(value, "company", six.string_types) + self._property_company = value + + +class CreateResponse(Response): + """ + Response of storage.create endpoint. + + :param id: New storage ID + :type id: str + """ + _service = "storage" + _action = "create" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'id': {'description': 'New storage ID', 'type': ['string', 'null']}, + }, + 'type': 'object', + } + def __init__( + self, id=None, **kwargs): + super(CreateResponse, self).__init__(**kwargs) + self.id = id + + @schema_property('id') + def id(self): + return self._property_id + + @id.setter + def id(self, value): + if value is None: + self._property_id = None + return + + self.assert_isinstance(value, "id", six.string_types) + self._property_id = value + + +class DeleteRequest(Request): + """ + Deletes a storage entry + + :param storage: Storage entry ID + :type storage: str + """ + + _service = "storage" + _action = "delete" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': { + 'storage': {'description': 'Storage entry ID', 'type': 'string'}, + }, + 'required': ['storage'], + 'type': 'object', + } + def __init__( + self, storage, **kwargs): + super(DeleteRequest, self).__init__(**kwargs) + self.storage = storage + + @schema_property('storage') + def storage(self): + return self._property_storage + + @storage.setter + def storage(self, value): + if value is None: + self._property_storage = None + return + + self.assert_isinstance(value, "storage", six.string_types) + self._property_storage = value + + +class DeleteResponse(Response): + """ + Response of storage.delete endpoint. + + :param deleted: Number of storage entries deleted (0 or 1) + :type deleted: int + """ + _service = "storage" + _action = "delete" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'deleted': { + 'description': 'Number of storage entries deleted (0 or 1)', + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, deleted=None, **kwargs): + super(DeleteResponse, self).__init__(**kwargs) + self.deleted = deleted + + @schema_property('deleted') + def deleted(self): + return self._property_deleted + + @deleted.setter + def deleted(self, value): + if value is None: + self._property_deleted = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "deleted", six.integer_types) + self._property_deleted = value + + +class GetAllRequest(Request): + """ + Get all storage entries + + :param name: Get only storage entries whose name matches this pattern (python + regular expression syntax) + :type name: str + :param id: List of Storage IDs used to filter results + :type id: Sequence[str] + :param page: Page number, returns a specific page out of the result list of + results. + :type page: int + :param page_size: Page size, specifies the number of results returned in each + page (last page may contain fewer results) + :type page_size: int + :param order_by: List of field names to order by. When search_text is used, + '@text_score' can be used as a field representing the text score of returned + documents. Use '-' prefix to specify descending order. Optional, recommended + when using page + :type order_by: Sequence[str] + :param only_fields: List of document field names (nesting is supported using + '.', e.g. execution.model_labels). If provided, this list defines the query's + projection (only these fields will be returned for each result entry) + :type only_fields: Sequence[str] + """ + + _service = "storage" + _action = "get_all" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': { + 'id': { + 'description': 'List of Storage IDs used to filter results', + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'name': { + 'description': 'Get only storage entries whose name matches this pattern (python regular expression syntax)', + 'type': ['string', 'null'], + }, + 'only_fields': { + 'description': "List of document field names (nesting is supported using '.', e.g. execution.model_labels). If provided, this list defines the query's projection (only these fields will be returned for each result entry)", + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'order_by': { + 'description': "List of field names to order by. When search_text is used, '@text_score' can be used as a field representing the text score of returned documents. Use '-' prefix to specify descending order. Optional, recommended when using page", + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'page': { + 'description': 'Page number, returns a specific page out of the result list of results.', + 'minimum': 0, + 'type': ['integer', 'null'], + }, + 'page_size': { + 'description': 'Page size, specifies the number of results returned in each page (last page may contain fewer results)', + 'minimum': 1, + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, name=None, id=None, page=None, page_size=None, order_by=None, only_fields=None, **kwargs): + super(GetAllRequest, self).__init__(**kwargs) + self.name = name + self.id = id + self.page = page + self.page_size = page_size + self.order_by = order_by + self.only_fields = only_fields + + @schema_property('name') + def name(self): + return self._property_name + + @name.setter + def name(self, value): + if value is None: + self._property_name = None + return + + self.assert_isinstance(value, "name", six.string_types) + self._property_name = value + + @schema_property('id') + def id(self): + return self._property_id + + @id.setter + def id(self, value): + if value is None: + self._property_id = None + return + + self.assert_isinstance(value, "id", (list, tuple)) + + self.assert_isinstance(value, "id", six.string_types, is_array=True) + self._property_id = value + + @schema_property('page') + def page(self): + return self._property_page + + @page.setter + def page(self, value): + if value is None: + self._property_page = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "page", six.integer_types) + self._property_page = value + + @schema_property('page_size') + def page_size(self): + return self._property_page_size + + @page_size.setter + def page_size(self, value): + if value is None: + self._property_page_size = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "page_size", six.integer_types) + self._property_page_size = value + + @schema_property('order_by') + def order_by(self): + return self._property_order_by + + @order_by.setter + def order_by(self, value): + if value is None: + self._property_order_by = None + return + + self.assert_isinstance(value, "order_by", (list, tuple)) + + self.assert_isinstance(value, "order_by", six.string_types, is_array=True) + self._property_order_by = value + + @schema_property('only_fields') + def only_fields(self): + return self._property_only_fields + + @only_fields.setter + def only_fields(self, value): + if value is None: + self._property_only_fields = None + return + + self.assert_isinstance(value, "only_fields", (list, tuple)) + + self.assert_isinstance(value, "only_fields", six.string_types, is_array=True) + self._property_only_fields = value + + +class GetAllResponse(Response): + """ + Response of storage.get_all endpoint. + + :param results: Storage entries list + :type results: Sequence[Storage] + """ + _service = "storage" + _action = "get_all" + _version = "1.5" + + _schema = { + 'definitions': { + 'credentials': { + 'properties': { + 'access_key': { + 'description': 'Credentials access key', + 'type': ['string', 'null'], + }, + 'secret_key': { + 'description': 'Credentials secret key', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + }, + 'storage': { + 'properties': { + 'company': { + 'description': 'Company ID', + 'type': ['string', 'null'], + }, + 'created': { + 'description': 'Entry creation time', + 'format': 'date-time', + 'type': ['string', 'null'], + }, + 'credentials': { + 'description': 'Credentials required for accessing the storage', + 'oneOf': [ + {'$ref': '#/definitions/credentials'}, + {'type': 'null'}, + ], + }, + 'id': {'description': 'Entry ID', 'type': ['string', 'null']}, + 'name': { + 'description': 'Entry name', + 'type': ['string', 'null'], + }, + 'uri': { + 'description': 'Storage URI', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + }, + }, + 'properties': { + 'results': { + 'description': 'Storage entries list', + 'items': {'$ref': '#/definitions/storage'}, + 'type': ['array', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, results=None, **kwargs): + super(GetAllResponse, self).__init__(**kwargs) + self.results = results + + @schema_property('results') + def results(self): + return self._property_results + + @results.setter + def results(self, value): + if value is None: + self._property_results = None + return + + self.assert_isinstance(value, "results", (list, tuple)) + if any(isinstance(v, dict) for v in value): + value = [Storage.from_dict(v) if isinstance(v, dict) else v for v in value] + else: + self.assert_isinstance(value, "results", Storage, is_array=True) + self._property_results = value + + +response_mapping = { + GetAllRequest: GetAllResponse, + CreateRequest: CreateResponse, + DeleteRequest: DeleteResponse, +} diff --git a/trains/backend_api/services/v2_1/tasks.py b/trains/backend_api/services/v2_1/tasks.py new file mode 100644 index 00000000..e4b96ea3 --- /dev/null +++ b/trains/backend_api/services/v2_1/tasks.py @@ -0,0 +1,8460 @@ +""" +tasks service + +Provides a management API for tasks in the system. +""" +import six +import types +from datetime import datetime +import enum + +from dateutil.parser import parse as parse_datetime + +from ....backend_api.session import Request, BatchRequest, Response, DataModel, NonStrictDataModel, CompoundRequest, schema_property, StringEnum + + +class FilterByRoiEnum(StringEnum): + disabled = "disabled" + no_rois = "no_rois" + label_rules = "label_rules" + + +class FilterLabelRule(NonStrictDataModel): + """ + :param label: Lucene format query (see lucene query syntax). Default search + field is label.keyword and default operator is AND, so searching for: + 'Bus Stop' Blue + is equivalent to: + Label.keyword:'Bus Stop' AND label.keyword:'Blue' + :type label: str + :param count_range: Range of times ROI appears in the frame (min, max). -1 for + not applicable. Both integers must be larger than or equal to -1. 2nd integer + (max) must be either -1 or larger than or equal to the 1st integer (min) + :type count_range: Sequence[int] + :param conf_range: Range of ROI confidence level in the frame (min, max). -1 + for not applicable Both min and max can be either -1 or positive. 2nd number + (max) must be either -1 or larger than or equal to the 1st number (min) + :type conf_range: Sequence[float] + """ + _schema = { + 'properties': { + 'conf_range': { + 'description': 'Range of ROI confidence level in the frame (min, max). -1 for not applicable\n Both min and max can be either -1 or positive.\n 2nd number (max) must be either -1 or larger than or equal to the 1st number (min)', + 'items': {'type': 'number'}, + 'maxItems': 2, + 'minItems': 1, + 'type': 'array', + }, + 'count_range': { + 'description': 'Range of times ROI appears in the frame (min, max). -1 for not applicable.\n Both integers must be larger than or equal to -1.\n 2nd integer (max) must be either -1 or larger than or equal to the 1st integer (min)', + 'items': {'type': 'integer'}, + 'maxItems': 2, + 'minItems': 1, + 'type': 'array', + }, + 'label': { + 'description': "Lucene format query (see lucene query syntax).\nDefault search field is label.keyword and default operator is AND, so searching for:\n\n'Bus Stop' Blue\n\nis equivalent to:\n\nLabel.keyword:'Bus Stop' AND label.keyword:'Blue'", + 'type': 'string', + }, + }, + 'required': ['label'], + 'type': 'object', + } + def __init__( + self, label, count_range=None, conf_range=None, **kwargs): + super(FilterLabelRule, self).__init__(**kwargs) + self.label = label + self.count_range = count_range + self.conf_range = conf_range + + @schema_property('label') + def label(self): + return self._property_label + + @label.setter + def label(self, value): + if value is None: + self._property_label = None + return + + self.assert_isinstance(value, "label", six.string_types) + self._property_label = value + + @schema_property('count_range') + def count_range(self): + return self._property_count_range + + @count_range.setter + def count_range(self, value): + if value is None: + self._property_count_range = None + return + + self.assert_isinstance(value, "count_range", (list, tuple)) + value = [int(v) if isinstance(v, float) and v.is_integer() else v for v in value] + + self.assert_isinstance(value, "count_range", six.integer_types, is_array=True) + self._property_count_range = value + + @schema_property('conf_range') + def conf_range(self): + return self._property_conf_range + + @conf_range.setter + def conf_range(self, value): + if value is None: + self._property_conf_range = None + return + + self.assert_isinstance(value, "conf_range", (list, tuple)) + + self.assert_isinstance(value, "conf_range", six.integer_types + (float,), is_array=True) + self._property_conf_range = value + + +class FilterRule(NonStrictDataModel): + """ + :param label_rules: List of FilterLabelRule ('AND' connection) + disabled - No filtering by ROIs. Select all frames, even if they don't have + ROIs (all frames) + no_rois - Select only frames without ROIs (empty frames) + label_rules - Select frames according to label rules + :type label_rules: Sequence[FilterLabelRule] + :param filter_by_roi: Type of filter + :type filter_by_roi: FilterByRoiEnum + :param frame_query: Frame filter, in Lucene query syntax + :type frame_query: str + :param sources_query: Sources filter, in Lucene query syntax. Filters sources + in each frame. + :type sources_query: str + :param dataset: Dataset ID. Must be a dataset which is in the task's view. If + set to '*' all datasets in View are used. + :type dataset: str + :param version: Dataset version to apply rule to. Must belong to the dataset + and be in the task's view. If set to '*' all version of the datasets in View + are used. + :type version: str + :param weight: Rule weight. Default is 1 + :type weight: float + """ + _schema = { + 'properties': { + 'dataset': { + 'description': "Dataset ID. Must be a dataset which is in the task's view. If set to '*' all datasets in View are used.", + 'type': 'string', + }, + 'filter_by_roi': { + '$ref': '#/definitions/filter_by_roi_enum', + 'description': 'Type of filter', + }, + 'frame_query': { + 'description': 'Frame filter, in Lucene query syntax', + 'type': 'string', + }, + 'label_rules': { + 'description': "List of FilterLabelRule ('AND' connection)\n\ndisabled - No filtering by ROIs. Select all frames, even if they don't have ROIs (all frames)\n\nno_rois - Select only frames without ROIs (empty frames)\n\nlabel_rules - Select frames according to label rules", + 'items': {'$ref': '#/definitions/filter_label_rule'}, + 'type': ['array', 'null'], + }, + 'sources_query': { + 'description': 'Sources filter, in Lucene query syntax. Filters sources in each frame.', + 'type': 'string', + }, + 'version': { + 'description': "Dataset version to apply rule to. Must belong to the dataset and be in the task's view. If set to '*' all version of the datasets in View are used.", + 'type': 'string', + }, + 'weight': { + 'description': 'Rule weight. Default is 1', + 'type': 'number', + }, + }, + 'required': ['filter_by_roi'], + 'type': 'object', + } + def __init__( + self, filter_by_roi, label_rules=None, frame_query=None, sources_query=None, dataset=None, version=None, weight=None, **kwargs): + super(FilterRule, self).__init__(**kwargs) + self.label_rules = label_rules + self.filter_by_roi = filter_by_roi + self.frame_query = frame_query + self.sources_query = sources_query + self.dataset = dataset + self.version = version + self.weight = weight + + @schema_property('label_rules') + def label_rules(self): + return self._property_label_rules + + @label_rules.setter + def label_rules(self, value): + if value is None: + self._property_label_rules = None + return + + self.assert_isinstance(value, "label_rules", (list, tuple)) + if any(isinstance(v, dict) for v in value): + value = [FilterLabelRule.from_dict(v) if isinstance(v, dict) else v for v in value] + else: + self.assert_isinstance(value, "label_rules", FilterLabelRule, is_array=True) + self._property_label_rules = value + + @schema_property('filter_by_roi') + def filter_by_roi(self): + return self._property_filter_by_roi + + @filter_by_roi.setter + def filter_by_roi(self, value): + if value is None: + self._property_filter_by_roi = None + return + if isinstance(value, six.string_types): + try: + value = FilterByRoiEnum(value) + except ValueError: + pass + else: + self.assert_isinstance(value, "filter_by_roi", enum.Enum) + self._property_filter_by_roi = value + + @schema_property('frame_query') + def frame_query(self): + return self._property_frame_query + + @frame_query.setter + def frame_query(self, value): + if value is None: + self._property_frame_query = None + return + + self.assert_isinstance(value, "frame_query", six.string_types) + self._property_frame_query = value + + @schema_property('sources_query') + def sources_query(self): + return self._property_sources_query + + @sources_query.setter + def sources_query(self, value): + if value is None: + self._property_sources_query = None + return + + self.assert_isinstance(value, "sources_query", six.string_types) + self._property_sources_query = value + + @schema_property('dataset') + def dataset(self): + return self._property_dataset + + @dataset.setter + def dataset(self, value): + if value is None: + self._property_dataset = None + return + + self.assert_isinstance(value, "dataset", six.string_types) + self._property_dataset = value + + @schema_property('version') + def version(self): + return self._property_version + + @version.setter + def version(self, value): + if value is None: + self._property_version = None + return + + self.assert_isinstance(value, "version", six.string_types) + self._property_version = value + + @schema_property('weight') + def weight(self): + return self._property_weight + + @weight.setter + def weight(self, value): + if value is None: + self._property_weight = None + return + + self.assert_isinstance(value, "weight", six.integer_types + (float,)) + self._property_weight = value + + +class MultiFieldPatternData(NonStrictDataModel): + """ + :param pattern: Pattern string (regex) + :type pattern: str + :param fields: List of field names + :type fields: Sequence[str] + """ + _schema = { + 'properties': { + 'fields': { + 'description': 'List of field names', + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'pattern': { + 'description': 'Pattern string (regex)', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, pattern=None, fields=None, **kwargs): + super(MultiFieldPatternData, self).__init__(**kwargs) + self.pattern = pattern + self.fields = fields + + @schema_property('pattern') + def pattern(self): + return self._property_pattern + + @pattern.setter + def pattern(self, value): + if value is None: + self._property_pattern = None + return + + self.assert_isinstance(value, "pattern", six.string_types) + self._property_pattern = value + + @schema_property('fields') + def fields(self): + return self._property_fields + + @fields.setter + def fields(self, value): + if value is None: + self._property_fields = None + return + + self.assert_isinstance(value, "fields", (list, tuple)) + + self.assert_isinstance(value, "fields", six.string_types, is_array=True) + self._property_fields = value + + +class Script(NonStrictDataModel): + """ + :param binary: Binary to use when running the script + :type binary: str + :param repository: Name of the repository where the script is located + :type repository: str + :param tag: Repository tag + :type tag: str + :param branch: Repository branch id If not provided and tag not provided, + default repository branch is used. + :type branch: str + :param version_num: Version (changeset) number. Optional (default is head + version) Unused if tag is provided. + :type version_num: str + :param entry_point: Path to execute within the repository + :type entry_point: str + :param working_dir: Path to the folder from which to run the script Default - + root folder of repository[f] + :type working_dir: str + :param requirements: A JSON object containing requirements strings by key + :type requirements: dict + """ + _schema = { + 'properties': { + 'binary': { + 'default': 'python', + 'description': 'Binary to use when running the script', + 'type': ['string', 'null'], + }, + 'branch': { + 'description': 'Repository branch id If not provided and tag not provided, default repository branch is used.', + 'type': ['string', 'null'], + }, + 'entry_point': { + 'description': 'Path to execute within the repository', + 'type': ['string', 'null'], + }, + 'repository': { + 'description': 'Name of the repository where the script is located', + 'type': ['string', 'null'], + }, + 'requirements': { + 'description': 'A JSON object containing requirements strings by key', + 'type': ['object', 'null'], + }, + 'tag': {'description': 'Repository tag', 'type': ['string', 'null']}, + 'version_num': { + 'description': 'Version (changeset) number. Optional (default is head version) Unused if tag is provided.', + 'type': ['string', 'null'], + }, + 'working_dir': { + 'description': 'Path to the folder from which to run the script Default - root folder of repository[f]', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, binary="python", repository=None, tag=None, branch=None, version_num=None, entry_point=None, working_dir=None, requirements=None, **kwargs): + super(Script, self).__init__(**kwargs) + self.binary = binary + self.repository = repository + self.tag = tag + self.branch = branch + self.version_num = version_num + self.entry_point = entry_point + self.working_dir = working_dir + self.requirements = requirements + + @schema_property('binary') + def binary(self): + return self._property_binary + + @binary.setter + def binary(self, value): + if value is None: + self._property_binary = None + return + + self.assert_isinstance(value, "binary", six.string_types) + self._property_binary = value + + @schema_property('repository') + def repository(self): + return self._property_repository + + @repository.setter + def repository(self, value): + if value is None: + self._property_repository = None + return + + self.assert_isinstance(value, "repository", six.string_types) + self._property_repository = value + + @schema_property('tag') + def tag(self): + return self._property_tag + + @tag.setter + def tag(self, value): + if value is None: + self._property_tag = None + return + + self.assert_isinstance(value, "tag", six.string_types) + self._property_tag = value + + @schema_property('branch') + def branch(self): + return self._property_branch + + @branch.setter + def branch(self, value): + if value is None: + self._property_branch = None + return + + self.assert_isinstance(value, "branch", six.string_types) + self._property_branch = value + + @schema_property('version_num') + def version_num(self): + return self._property_version_num + + @version_num.setter + def version_num(self, value): + if value is None: + self._property_version_num = None + return + + self.assert_isinstance(value, "version_num", six.string_types) + self._property_version_num = value + + @schema_property('entry_point') + def entry_point(self): + return self._property_entry_point + + @entry_point.setter + def entry_point(self, value): + if value is None: + self._property_entry_point = None + return + + self.assert_isinstance(value, "entry_point", six.string_types) + self._property_entry_point = value + + @schema_property('working_dir') + def working_dir(self): + return self._property_working_dir + + @working_dir.setter + def working_dir(self, value): + if value is None: + self._property_working_dir = None + return + + self.assert_isinstance(value, "working_dir", six.string_types) + self._property_working_dir = value + + @schema_property('requirements') + def requirements(self): + return self._property_requirements + + @requirements.setter + def requirements(self, value): + if value is None: + self._property_requirements = None + return + + self.assert_isinstance(value, "requirements", (dict,)) + self._property_requirements = value + + +class LabelSource(NonStrictDataModel): + """ + :param labels: List of source labels (AND connection). '*' indicates any label. + Labels must exist in at least one of the dataset versions in the task's view + :type labels: Sequence[str] + :param dataset: Source dataset id. '*' for all datasets in view + :type dataset: str + :param version: Source dataset version id. Default is '*' (for all versions in + dataset in the view) Version must belong to the selected dataset, and must be + in the task's view[i] + :type version: str + """ + _schema = { + 'properties': { + 'dataset': { + 'description': "Source dataset id. '*' for all datasets in view", + 'type': ['string', 'null'], + }, + 'labels': { + 'description': "List of source labels (AND connection). '*' indicates any label. Labels must exist in at least one of the dataset versions in the task's view", + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'version': { + 'description': "Source dataset version id. Default is '*' (for all versions in dataset in the view) Version must belong to the selected dataset, and must be in the task's view[i]", + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, labels=None, dataset=None, version=None, **kwargs): + super(LabelSource, self).__init__(**kwargs) + self.labels = labels + self.dataset = dataset + self.version = version + + @schema_property('labels') + def labels(self): + return self._property_labels + + @labels.setter + def labels(self, value): + if value is None: + self._property_labels = None + return + + self.assert_isinstance(value, "labels", (list, tuple)) + + self.assert_isinstance(value, "labels", six.string_types, is_array=True) + self._property_labels = value + + @schema_property('dataset') + def dataset(self): + return self._property_dataset + + @dataset.setter + def dataset(self, value): + if value is None: + self._property_dataset = None + return + + self.assert_isinstance(value, "dataset", six.string_types) + self._property_dataset = value + + @schema_property('version') + def version(self): + return self._property_version + + @version.setter + def version(self, value): + if value is None: + self._property_version = None + return + + self.assert_isinstance(value, "version", six.string_types) + self._property_version = value + + +class MappingRule(NonStrictDataModel): + """ + :param source: Source label info + :type source: LabelSource + :param target: Target label name + :type target: str + """ + _schema = { + 'properties': { + 'source': { + 'description': 'Source label info', + 'oneOf': [{'$ref': '#/definitions/label_source'}, {'type': 'null'}], + }, + 'target': { + 'description': 'Target label name', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, source=None, target=None, **kwargs): + super(MappingRule, self).__init__(**kwargs) + self.source = source + self.target = target + + @schema_property('source') + def source(self): + return self._property_source + + @source.setter + def source(self, value): + if value is None: + self._property_source = None + return + if isinstance(value, dict): + value = LabelSource.from_dict(value) + else: + self.assert_isinstance(value, "source", LabelSource) + self._property_source = value + + @schema_property('target') + def target(self): + return self._property_target + + @target.setter + def target(self, value): + if value is None: + self._property_target = None + return + + self.assert_isinstance(value, "target", six.string_types) + self._property_target = value + + +class Mapping(NonStrictDataModel): + """ + :param rules: Rules list + :type rules: Sequence[MappingRule] + """ + _schema = { + 'properties': { + 'rules': { + 'description': 'Rules list', + 'items': {'$ref': '#/definitions/mapping_rule'}, + 'type': ['array', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, rules=None, **kwargs): + super(Mapping, self).__init__(**kwargs) + self.rules = rules + + @schema_property('rules') + def rules(self): + return self._property_rules + + @rules.setter + def rules(self, value): + if value is None: + self._property_rules = None + return + + self.assert_isinstance(value, "rules", (list, tuple)) + if any(isinstance(v, dict) for v in value): + value = [MappingRule.from_dict(v) if isinstance(v, dict) else v for v in value] + else: + self.assert_isinstance(value, "rules", MappingRule, is_array=True) + self._property_rules = value + + +class Filtering(NonStrictDataModel): + """ + :param filtering_rules: List of FilterRule ('OR' connection) + :type filtering_rules: Sequence[FilterRule] + :param output_rois: 'all_in_frame' - all rois for a frame are returned + 'only_filtered' - only rois which led this frame to be selected + 'frame_per_roi' - single roi per frame. Frame can be returned multiple times + with a different roi each time. + Note: this should be used for Training tasks only + Note: frame_per_roi implies that only filtered rois will be returned + :type output_rois: OutputRoisEnum + """ + _schema = { + 'properties': { + 'filtering_rules': { + 'description': "List of FilterRule ('OR' connection)", + 'items': {'$ref': '#/definitions/filter_rule'}, + 'type': ['array', 'null'], + }, + 'output_rois': { + 'description': "'all_in_frame' - all rois for a frame are returned\n\n'only_filtered' - only rois which led this frame to be selected\n\n'frame_per_roi' - single roi per frame. Frame can be returned multiple times with a different roi each time.\n\nNote: this should be used for Training tasks only\n\nNote: frame_per_roi implies that only filtered rois will be returned\n ", + 'oneOf': [ + {'$ref': '#/definitions/output_rois_enum'}, + {'type': 'null'}, + ], + }, + }, + 'type': 'object', + } + def __init__( + self, filtering_rules=None, output_rois=None, **kwargs): + super(Filtering, self).__init__(**kwargs) + self.filtering_rules = filtering_rules + self.output_rois = output_rois + + @schema_property('filtering_rules') + def filtering_rules(self): + return self._property_filtering_rules + + @filtering_rules.setter + def filtering_rules(self, value): + if value is None: + self._property_filtering_rules = None + return + + self.assert_isinstance(value, "filtering_rules", (list, tuple)) + if any(isinstance(v, dict) for v in value): + value = [FilterRule.from_dict(v) if isinstance(v, dict) else v for v in value] + else: + self.assert_isinstance(value, "filtering_rules", FilterRule, is_array=True) + self._property_filtering_rules = value + + @schema_property('output_rois') + def output_rois(self): + return self._property_output_rois + + @output_rois.setter + def output_rois(self, value): + if value is None: + self._property_output_rois = None + return + if isinstance(value, six.string_types): + try: + value = OutputRoisEnum(value) + except ValueError: + pass + else: + self.assert_isinstance(value, "output_rois", enum.Enum) + self._property_output_rois = value + + +class Jump(NonStrictDataModel): + """ + :param time: Max time in milliseconds between frames + :type time: int + """ + _schema = { + 'properties': { + 'time': { + 'description': 'Max time in milliseconds between frames', + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, time=None, **kwargs): + super(Jump, self).__init__(**kwargs) + self.time = time + + @schema_property('time') + def time(self): + return self._property_time + + @time.setter + def time(self, value): + if value is None: + self._property_time = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "time", six.integer_types) + self._property_time = value + + +class AugmentationSet(NonStrictDataModel): + """ + :param cls: Augmentation class + :type cls: str + :param types: Augmentation type + :type types: Sequence[str] + :param strength: Augmentation strength. Range [0,). + :type strength: float + :param arguments: Arguments dictionary per custom augmentation type. + :type arguments: dict + """ + _schema = { + 'properties': { + 'arguments': { + 'additionalProperties': { + 'additionalProperties': True, + 'type': 'object', + }, + 'description': 'Arguments dictionary per custom augmentation type.', + 'type': ['object', 'null'], + }, + 'cls': { + 'description': 'Augmentation class', + 'type': ['string', 'null'], + }, + 'strength': { + 'description': 'Augmentation strength. Range [0,).', + 'minimum': 0, + 'type': ['number', 'null'], + }, + 'types': { + 'description': 'Augmentation type', + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, cls=None, types=None, strength=None, arguments=None, **kwargs): + super(AugmentationSet, self).__init__(**kwargs) + self.cls = cls + self.types = types + self.strength = strength + self.arguments = arguments + + @schema_property('cls') + def cls(self): + return self._property_cls + + @cls.setter + def cls(self, value): + if value is None: + self._property_cls = None + return + + self.assert_isinstance(value, "cls", six.string_types) + self._property_cls = value + + @schema_property('types') + def types(self): + return self._property_types + + @types.setter + def types(self, value): + if value is None: + self._property_types = None + return + + self.assert_isinstance(value, "types", (list, tuple)) + + self.assert_isinstance(value, "types", six.string_types, is_array=True) + self._property_types = value + + @schema_property('strength') + def strength(self): + return self._property_strength + + @strength.setter + def strength(self, value): + if value is None: + self._property_strength = None + return + + self.assert_isinstance(value, "strength", six.integer_types + (float,)) + self._property_strength = value + + @schema_property('arguments') + def arguments(self): + return self._property_arguments + + @arguments.setter + def arguments(self, value): + if value is None: + self._property_arguments = None + return + + self.assert_isinstance(value, "arguments", (dict,)) + self._property_arguments = value + + +class Augmentation(NonStrictDataModel): + """ + :param sets: List of augmentation sets + :type sets: Sequence[AugmentationSet] + :param crop_around_rois: Crop image data around all frame ROIs + :type crop_around_rois: bool + """ + _schema = { + 'properties': { + 'crop_around_rois': { + 'description': 'Crop image data around all frame ROIs', + 'type': ['boolean', 'null'], + }, + 'sets': { + 'description': 'List of augmentation sets', + 'items': {'$ref': '#/definitions/augmentation_set'}, + 'type': ['array', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, sets=None, crop_around_rois=None, **kwargs): + super(Augmentation, self).__init__(**kwargs) + self.sets = sets + self.crop_around_rois = crop_around_rois + + @schema_property('sets') + def sets(self): + return self._property_sets + + @sets.setter + def sets(self, value): + if value is None: + self._property_sets = None + return + + self.assert_isinstance(value, "sets", (list, tuple)) + if any(isinstance(v, dict) for v in value): + value = [AugmentationSet.from_dict(v) if isinstance(v, dict) else v for v in value] + else: + self.assert_isinstance(value, "sets", AugmentationSet, is_array=True) + self._property_sets = value + + @schema_property('crop_around_rois') + def crop_around_rois(self): + return self._property_crop_around_rois + + @crop_around_rois.setter + def crop_around_rois(self, value): + if value is None: + self._property_crop_around_rois = None + return + + self.assert_isinstance(value, "crop_around_rois", (bool,)) + self._property_crop_around_rois = value + + +class Iteration(NonStrictDataModel): + """ + Sequential Iteration API configuration + + :param order: Input frames order. Values: 'sequential', 'random' In Sequential + mode frames will be returned according to the order in which the frames were + added to the dataset. + :type order: str + :param jump: Jump entry + :type jump: Jump + :param min_sequence: Length (in ms) of video clips to return. This is used in + random order, and in sequential order only if jumping is provided and only for + video frames + :type min_sequence: int + :param infinite: Infinite iteration + :type infinite: bool + :param limit: Maximum frames per task. If not passed, frames will end when no + more matching frames are found, unless infinite is True. + :type limit: int + :param random_seed: Random seed used during iteration + :type random_seed: int + """ + _schema = { + 'description': 'Sequential Iteration API configuration', + 'properties': { + 'infinite': { + 'description': 'Infinite iteration', + 'type': ['boolean', 'null'], + }, + 'jump': { + 'description': 'Jump entry', + 'oneOf': [{'$ref': '#/definitions/jump'}, {'type': 'null'}], + }, + 'limit': { + 'description': 'Maximum frames per task. If not passed, frames will end when no more matching frames are found, unless infinite is True.', + 'type': ['integer', 'null'], + }, + 'min_sequence': { + 'description': 'Length (in ms) of video clips to return. This is used in random order, and in sequential order only if jumping is provided and only for video frames', + 'type': ['integer', 'null'], + }, + 'order': { + 'description': "\n Input frames order. Values: 'sequential', 'random'\n In Sequential mode frames will be returned according to the order in which the frames were added to the dataset.", + 'type': ['string', 'null'], + }, + 'random_seed': { + 'description': 'Random seed used during iteration', + 'type': 'integer', + }, + }, + 'required': ['random_seed'], + 'type': 'object', + } + def __init__( + self, random_seed, order=None, jump=None, min_sequence=None, infinite=None, limit=None, **kwargs): + super(Iteration, self).__init__(**kwargs) + self.order = order + self.jump = jump + self.min_sequence = min_sequence + self.infinite = infinite + self.limit = limit + self.random_seed = random_seed + + @schema_property('order') + def order(self): + return self._property_order + + @order.setter + def order(self, value): + if value is None: + self._property_order = None + return + + self.assert_isinstance(value, "order", six.string_types) + self._property_order = value + + @schema_property('jump') + def jump(self): + return self._property_jump + + @jump.setter + def jump(self, value): + if value is None: + self._property_jump = None + return + if isinstance(value, dict): + value = Jump.from_dict(value) + else: + self.assert_isinstance(value, "jump", Jump) + self._property_jump = value + + @schema_property('min_sequence') + def min_sequence(self): + return self._property_min_sequence + + @min_sequence.setter + def min_sequence(self, value): + if value is None: + self._property_min_sequence = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "min_sequence", six.integer_types) + self._property_min_sequence = value + + @schema_property('infinite') + def infinite(self): + return self._property_infinite + + @infinite.setter + def infinite(self, value): + if value is None: + self._property_infinite = None + return + + self.assert_isinstance(value, "infinite", (bool,)) + self._property_infinite = value + + @schema_property('limit') + def limit(self): + return self._property_limit + + @limit.setter + def limit(self, value): + if value is None: + self._property_limit = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "limit", six.integer_types) + self._property_limit = value + + @schema_property('random_seed') + def random_seed(self): + return self._property_random_seed + + @random_seed.setter + def random_seed(self, value): + if value is None: + self._property_random_seed = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "random_seed", six.integer_types) + self._property_random_seed = value + + +class ViewEntry(NonStrictDataModel): + """ + :param version: Version id of a version belonging to the dataset + :type version: str + :param dataset: Existing Dataset id + :type dataset: str + :param merge_with: Version ID to merge with + :type merge_with: str + """ + _schema = { + 'properties': { + 'dataset': { + 'description': 'Existing Dataset id', + 'type': ['string', 'null'], + }, + 'merge_with': { + 'description': 'Version ID to merge with', + 'type': ['string', 'null'], + }, + 'version': { + 'description': 'Version id of a version belonging to the dataset', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, version=None, dataset=None, merge_with=None, **kwargs): + super(ViewEntry, self).__init__(**kwargs) + self.version = version + self.dataset = dataset + self.merge_with = merge_with + + @schema_property('version') + def version(self): + return self._property_version + + @version.setter + def version(self, value): + if value is None: + self._property_version = None + return + + self.assert_isinstance(value, "version", six.string_types) + self._property_version = value + + @schema_property('dataset') + def dataset(self): + return self._property_dataset + + @dataset.setter + def dataset(self, value): + if value is None: + self._property_dataset = None + return + + self.assert_isinstance(value, "dataset", six.string_types) + self._property_dataset = value + + @schema_property('merge_with') + def merge_with(self): + return self._property_merge_with + + @merge_with.setter + def merge_with(self, value): + if value is None: + self._property_merge_with = None + return + + self.assert_isinstance(value, "merge_with", six.string_types) + self._property_merge_with = value + + +class View(NonStrictDataModel): + """ + :param entries: List of view entries. All tasks must have at least one view. + :type entries: Sequence[ViewEntry] + """ + _schema = { + 'properties': { + 'entries': { + 'description': 'List of view entries. All tasks must have at least one view.', + 'items': {'$ref': '#/definitions/view_entry'}, + 'type': ['array', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, entries=None, **kwargs): + super(View, self).__init__(**kwargs) + self.entries = entries + + @schema_property('entries') + def entries(self): + return self._property_entries + + @entries.setter + def entries(self, value): + if value is None: + self._property_entries = None + return + + self.assert_isinstance(value, "entries", (list, tuple)) + if any(isinstance(v, dict) for v in value): + value = [ViewEntry.from_dict(v) if isinstance(v, dict) else v for v in value] + else: + self.assert_isinstance(value, "entries", ViewEntry, is_array=True) + self._property_entries = value + + +class Input(NonStrictDataModel): + """ + :param view: View params + :type view: View + :param frames_filter: Filtering params + :type frames_filter: Filtering + :param mapping: Mapping params (see common definitions section) + :type mapping: Mapping + :param augmentation: Augmentation parameters. Only for training and testing + tasks. + :type augmentation: Augmentation + :param iteration: Iteration parameters. Not applicable for register (import) + tasks. + :type iteration: Iteration + :param dataviews: Key to DataView ID Mapping + :type dataviews: dict + """ + _schema = { + 'properties': { + 'augmentation': { + 'description': 'Augmentation parameters. Only for training and testing tasks.', + 'oneOf': [{'$ref': '#/definitions/augmentation'}, {'type': 'null'}], + }, + 'dataviews': { + 'additionalProperties': {'type': 'string'}, + 'description': 'Key to DataView ID Mapping', + 'type': ['object', 'null'], + }, + 'frames_filter': { + 'description': 'Filtering params', + 'oneOf': [{'$ref': '#/definitions/filtering'}, {'type': 'null'}], + }, + 'iteration': { + 'description': 'Iteration parameters. Not applicable for register (import) tasks.', + 'oneOf': [{'$ref': '#/definitions/iteration'}, {'type': 'null'}], + }, + 'mapping': { + 'description': 'Mapping params (see common definitions section)', + 'oneOf': [{'$ref': '#/definitions/mapping'}, {'type': 'null'}], + }, + 'view': { + 'description': 'View params', + 'oneOf': [{'$ref': '#/definitions/view'}, {'type': 'null'}], + }, + }, + 'type': 'object', + } + def __init__( + self, view=None, frames_filter=None, mapping=None, augmentation=None, iteration=None, dataviews=None, **kwargs): + super(Input, self).__init__(**kwargs) + self.view = view + self.frames_filter = frames_filter + self.mapping = mapping + self.augmentation = augmentation + self.iteration = iteration + self.dataviews = dataviews + + @schema_property('view') + def view(self): + return self._property_view + + @view.setter + def view(self, value): + if value is None: + self._property_view = None + return + if isinstance(value, dict): + value = View.from_dict(value) + else: + self.assert_isinstance(value, "view", View) + self._property_view = value + + @schema_property('frames_filter') + def frames_filter(self): + return self._property_frames_filter + + @frames_filter.setter + def frames_filter(self, value): + if value is None: + self._property_frames_filter = None + return + if isinstance(value, dict): + value = Filtering.from_dict(value) + else: + self.assert_isinstance(value, "frames_filter", Filtering) + self._property_frames_filter = value + + @schema_property('mapping') + def mapping(self): + return self._property_mapping + + @mapping.setter + def mapping(self, value): + if value is None: + self._property_mapping = None + return + if isinstance(value, dict): + value = Mapping.from_dict(value) + else: + self.assert_isinstance(value, "mapping", Mapping) + self._property_mapping = value + + @schema_property('augmentation') + def augmentation(self): + return self._property_augmentation + + @augmentation.setter + def augmentation(self, value): + if value is None: + self._property_augmentation = None + return + if isinstance(value, dict): + value = Augmentation.from_dict(value) + else: + self.assert_isinstance(value, "augmentation", Augmentation) + self._property_augmentation = value + + @schema_property('iteration') + def iteration(self): + return self._property_iteration + + @iteration.setter + def iteration(self, value): + if value is None: + self._property_iteration = None + return + if isinstance(value, dict): + value = Iteration.from_dict(value) + else: + self.assert_isinstance(value, "iteration", Iteration) + self._property_iteration = value + + @schema_property('dataviews') + def dataviews(self): + return self._property_dataviews + + @dataviews.setter + def dataviews(self, value): + if value is None: + self._property_dataviews = None + return + + self.assert_isinstance(value, "dataviews", (dict,)) + self._property_dataviews = value + + +class Output(NonStrictDataModel): + """ + :param view: View params + :type view: View + :param destination: Storage id. This is where output files will be stored. + :type destination: str + :param model: Model id. + :type model: str + :param result: Task result. Values: 'success', 'failure' + :type result: str + :param error: Last error text + :type error: str + """ + _schema = { + 'properties': { + 'destination': { + 'description': 'Storage id. This is where output files will be stored.', + 'type': ['string', 'null'], + }, + 'error': {'description': 'Last error text', 'type': ['string', 'null']}, + 'model': {'description': 'Model id.', 'type': ['string', 'null']}, + 'result': { + 'description': "Task result. Values: 'success', 'failure'", + 'type': ['string', 'null'], + }, + 'view': { + 'description': 'View params', + 'oneOf': [{'$ref': '#/definitions/view'}, {'type': 'null'}], + }, + }, + 'type': 'object', + } + def __init__( + self, view=None, destination=None, model=None, result=None, error=None, **kwargs): + super(Output, self).__init__(**kwargs) + self.view = view + self.destination = destination + self.model = model + self.result = result + self.error = error + + @schema_property('view') + def view(self): + return self._property_view + + @view.setter + def view(self, value): + if value is None: + self._property_view = None + return + if isinstance(value, dict): + value = View.from_dict(value) + else: + self.assert_isinstance(value, "view", View) + self._property_view = value + + @schema_property('destination') + def destination(self): + return self._property_destination + + @destination.setter + def destination(self, value): + if value is None: + self._property_destination = None + return + + self.assert_isinstance(value, "destination", six.string_types) + self._property_destination = value + + @schema_property('model') + def model(self): + return self._property_model + + @model.setter + def model(self, value): + if value is None: + self._property_model = None + return + + self.assert_isinstance(value, "model", six.string_types) + self._property_model = value + + @schema_property('result') + def result(self): + return self._property_result + + @result.setter + def result(self, value): + if value is None: + self._property_result = None + return + + self.assert_isinstance(value, "result", six.string_types) + self._property_result = value + + @schema_property('error') + def error(self): + return self._property_error + + @error.setter + def error(self, value): + if value is None: + self._property_error = None + return + + self.assert_isinstance(value, "error", six.string_types) + self._property_error = value + + +class OutputRoisEnum(StringEnum): + all_in_frame = "all_in_frame" + only_filtered = "only_filtered" + frame_per_roi = "frame_per_roi" + + +class Execution(NonStrictDataModel): + """ + :param queue: Queue ID where task was queued. + :type queue: str + :param test_split: Percentage of frames to use for testing only + :type test_split: int + :param parameters: Json object containing the Task parameters + :type parameters: dict + :param model: Execution input model ID Not applicable for Register (Import) + tasks + :type model: str + :param model_desc: Json object representing the Model descriptors + :type model_desc: dict + :param model_labels: Json object representing the ids of the labels in the + model. The keys are the layers' names and the values are the IDs. Not + applicable for Register (Import) tasks. Mandatory for Training tasks[z] + :type model_labels: dict + :param framework: Framework related to the task. Case insensitive. Mandatory + for Training tasks. + :type framework: str + :param dataviews: Additional dataviews for the task + :type dataviews: Sequence[dict] + """ + _schema = { + 'properties': { + 'dataviews': { + 'description': 'Additional dataviews for the task', + 'items': {'additionalProperties': True, 'type': 'object'}, + 'type': ['array', 'null'], + }, + 'framework': { + 'description': 'Framework related to the task. Case insensitive. Mandatory for Training tasks. ', + 'type': ['string', 'null'], + }, + 'model': { + 'description': 'Execution input model ID Not applicable for Register (Import) tasks', + 'type': ['string', 'null'], + }, + 'model_desc': { + 'additionalProperties': True, + 'description': 'Json object representing the Model descriptors', + 'type': ['object', 'null'], + }, + 'model_labels': { + 'additionalProperties': {'type': 'integer'}, + 'description': "Json object representing the ids of the labels in the model.\n The keys are the layers' names and the values are the IDs.\n Not applicable for Register (Import) tasks.\n Mandatory for Training tasks[z]", + 'type': ['object', 'null'], + }, + 'parameters': { + 'additionalProperties': True, + 'description': 'Json object containing the Task parameters', + 'type': ['object', 'null'], + }, + 'queue': { + 'description': 'Queue ID where task was queued.', + 'type': ['string', 'null'], + }, + 'test_split': { + 'description': 'Percentage of frames to use for testing only', + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, queue=None, test_split=None, parameters=None, model=None, model_desc=None, model_labels=None, framework=None, dataviews=None, **kwargs): + super(Execution, self).__init__(**kwargs) + self.queue = queue + self.test_split = test_split + self.parameters = parameters + self.model = model + self.model_desc = model_desc + self.model_labels = model_labels + self.framework = framework + self.dataviews = dataviews + + @schema_property('queue') + def queue(self): + return self._property_queue + + @queue.setter + def queue(self, value): + if value is None: + self._property_queue = None + return + + self.assert_isinstance(value, "queue", six.string_types) + self._property_queue = value + + @schema_property('test_split') + def test_split(self): + return self._property_test_split + + @test_split.setter + def test_split(self, value): + if value is None: + self._property_test_split = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "test_split", six.integer_types) + self._property_test_split = value + + @schema_property('parameters') + def parameters(self): + return self._property_parameters + + @parameters.setter + def parameters(self, value): + if value is None: + self._property_parameters = None + return + + self.assert_isinstance(value, "parameters", (dict,)) + self._property_parameters = value + + @schema_property('model') + def model(self): + return self._property_model + + @model.setter + def model(self, value): + if value is None: + self._property_model = None + return + + self.assert_isinstance(value, "model", six.string_types) + self._property_model = value + + @schema_property('model_desc') + def model_desc(self): + return self._property_model_desc + + @model_desc.setter + def model_desc(self, value): + if value is None: + self._property_model_desc = None + return + + self.assert_isinstance(value, "model_desc", (dict,)) + self._property_model_desc = value + + @schema_property('model_labels') + def model_labels(self): + return self._property_model_labels + + @model_labels.setter + def model_labels(self, value): + if value is None: + self._property_model_labels = None + return + + self.assert_isinstance(value, "model_labels", (dict,)) + self._property_model_labels = value + + @schema_property('framework') + def framework(self): + return self._property_framework + + @framework.setter + def framework(self, value): + if value is None: + self._property_framework = None + return + + self.assert_isinstance(value, "framework", six.string_types) + self._property_framework = value + + @schema_property('dataviews') + def dataviews(self): + return self._property_dataviews + + @dataviews.setter + def dataviews(self, value): + if value is None: + self._property_dataviews = None + return + + self.assert_isinstance(value, "dataviews", (list, tuple)) + + self.assert_isinstance(value, "dataviews", (dict,), is_array=True) + self._property_dataviews = value + + +class TaskStatusEnum(StringEnum): + created = "created" + queued = "queued" + in_progress = "in_progress" + stopped = "stopped" + published = "published" + publishing = "publishing" + closed = "closed" + failed = "failed" + unknown = "unknown" + + +class TaskTypeEnum(StringEnum): + training = "training" + testing = "testing" + + +class LastMetricsEvent(NonStrictDataModel): + """ + :param metric: Metric name + :type metric: str + :param variant: Variant name + :type variant: str + :param type: Event type + :type type: str + :param timestamp: Event report time (UTC) + :type timestamp: datetime.datetime + :param iter: Iteration number + :type iter: int + :param value: Value + :type value: float + """ + _schema = { + 'properties': { + 'iter': { + 'description': 'Iteration number', + 'type': ['integer', 'null'], + }, + 'metric': {'description': 'Metric name', 'type': ['string', 'null']}, + 'timestamp': { + 'description': 'Event report time (UTC)', + 'format': 'date-time', + 'type': ['string', 'null'], + }, + 'type': {'description': 'Event type', 'type': ['string', 'null']}, + 'value': {'description': 'Value', 'type': ['number', 'null']}, + 'variant': {'description': 'Variant name', 'type': ['string', 'null']}, + }, + 'type': 'object', + } + def __init__( + self, metric=None, variant=None, type=None, timestamp=None, iter=None, value=None, **kwargs): + super(LastMetricsEvent, self).__init__(**kwargs) + self.metric = metric + self.variant = variant + self.type = type + self.timestamp = timestamp + self.iter = iter + self.value = value + + @schema_property('metric') + def metric(self): + return self._property_metric + + @metric.setter + def metric(self, value): + if value is None: + self._property_metric = None + return + + self.assert_isinstance(value, "metric", six.string_types) + self._property_metric = value + + @schema_property('variant') + def variant(self): + return self._property_variant + + @variant.setter + def variant(self, value): + if value is None: + self._property_variant = None + return + + self.assert_isinstance(value, "variant", six.string_types) + self._property_variant = value + + @schema_property('type') + def type(self): + return self._property_type + + @type.setter + def type(self, value): + if value is None: + self._property_type = None + return + + self.assert_isinstance(value, "type", six.string_types) + self._property_type = value + + @schema_property('timestamp') + def timestamp(self): + return self._property_timestamp + + @timestamp.setter + def timestamp(self, value): + if value is None: + self._property_timestamp = None + return + + self.assert_isinstance(value, "timestamp", six.string_types + (datetime,)) + if not isinstance(value, datetime): + value = parse_datetime(value) + self._property_timestamp = value + + @schema_property('iter') + def iter(self): + return self._property_iter + + @iter.setter + def iter(self, value): + if value is None: + self._property_iter = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "iter", six.integer_types) + self._property_iter = value + + @schema_property('value') + def value(self): + return self._property_value + + @value.setter + def value(self, value): + if value is None: + self._property_value = None + return + + self.assert_isinstance(value, "value", six.integer_types + (float,)) + self._property_value = value + + +class LastMetricsVariants(NonStrictDataModel): + """ + Last metric events, one for each variant hash + + """ + _schema = { + 'additionalProperties': {'$ref': '#/definitions/last_metrics_event'}, + 'description': 'Last metric events, one for each variant hash', + 'type': 'object', + } + + +class Task(NonStrictDataModel): + """ + :param id: Task id + :type id: str + :param name: Task Name + :type name: str + :param user: Associated user id + :type user: str + :param company: Company ID + :type company: str + :param type: Type of task. Values: 'dataset_import', 'annotation', 'training', + 'testing' + :type type: TaskTypeEnum + :param status: + :type status: TaskStatusEnum + :param comment: Free text comment + :type comment: str + :param created: Task creation time (UTC) + :type created: datetime.datetime + :param started: Task start time (UTC) + :type started: datetime.datetime + :param completed: Task end time (UTC) + :type completed: datetime.datetime + :param parent: Parent task id + :type parent: str + :param project: Project ID of the project to which this task is assigned + :type project: str + :param input: Task input params + :type input: Input + :param output: Task output params + :type output: Output + :param execution: Task execution params + :type execution: Execution + :param script: Script info + :type script: Script + :param tags: Tags list + :type tags: Sequence[str] + :param status_changed: Last status change time + :type status_changed: datetime.datetime + :param status_message: free text string representing info about the status + :type status_message: str + :param status_reason: Reason for last status change + :type status_reason: str + :param published: Last status change time + :type published: datetime.datetime + :param last_worker: ID of last worker that handled the task + :type last_worker: str + :param last_worker_report: Last time a worker reported while working on this + task + :type last_worker_report: datetime.datetime + :param last_update: Last time this task was created, updated, changed or events + for this task were reported + :type last_update: datetime.datetime + :param last_iteration: Last iteration reported for this task + :type last_iteration: int + :param last_metrics: Last metric variants (hash to events), one for each metric + hash + :type last_metrics: dict + """ + _schema = { + 'properties': { + 'comment': { + 'description': 'Free text comment', + 'type': ['string', 'null'], + }, + 'company': {'description': 'Company ID', 'type': ['string', 'null']}, + 'completed': { + 'description': 'Task end time (UTC)', + 'format': 'date-time', + 'type': ['string', 'null'], + }, + 'created': { + 'description': 'Task creation time (UTC) ', + 'format': 'date-time', + 'type': ['string', 'null'], + }, + 'execution': { + 'description': 'Task execution params', + 'oneOf': [{'$ref': '#/definitions/execution'}, {'type': 'null'}], + }, + 'id': {'description': 'Task id', 'type': ['string', 'null']}, + 'input': { + 'description': 'Task input params', + 'oneOf': [{'$ref': '#/definitions/input'}, {'type': 'null'}], + }, + 'last_iteration': { + 'description': 'Last iteration reported for this task', + 'type': ['integer', 'null'], + }, + 'last_metrics': { + 'additionalProperties': { + '$ref': '#/definitions/last_metrics_variants', + }, + 'description': 'Last metric variants (hash to events), one for each metric hash', + 'type': ['object', 'null'], + }, + 'last_update': { + 'description': 'Last time this task was created, updated, changed or events for this task were reported', + 'format': 'date-time', + 'type': ['string', 'null'], + }, + 'last_worker': { + 'description': 'ID of last worker that handled the task', + 'type': ['string', 'null'], + }, + 'last_worker_report': { + 'description': 'Last time a worker reported while working on this task', + 'format': 'date-time', + 'type': ['string', 'null'], + }, + 'name': {'description': 'Task Name', 'type': ['string', 'null']}, + 'output': { + 'description': 'Task output params', + 'oneOf': [{'$ref': '#/definitions/output'}, {'type': 'null'}], + }, + 'parent': {'description': 'Parent task id', 'type': ['string', 'null']}, + 'project': { + 'description': 'Project ID of the project to which this task is assigned', + 'type': ['string', 'null'], + }, + 'published': { + 'description': 'Last status change time', + 'format': 'date-time', + 'type': ['string', 'null'], + }, + 'script': { + 'description': 'Script info', + 'oneOf': [{'$ref': '#/definitions/script'}, {'type': 'null'}], + }, + 'started': { + 'description': 'Task start time (UTC)', + 'format': 'date-time', + 'type': ['string', 'null'], + }, + 'status': { + 'description': '', + 'oneOf': [ + {'$ref': '#/definitions/task_status_enum'}, + {'type': 'null'}, + ], + }, + 'status_changed': { + 'description': 'Last status change time', + 'format': 'date-time', + 'type': ['string', 'null'], + }, + 'status_message': { + 'description': 'free text string representing info about the status', + 'type': ['string', 'null'], + }, + 'status_reason': { + 'description': 'Reason for last status change', + 'type': ['string', 'null'], + }, + 'tags': { + 'description': 'Tags list', + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'type': { + 'description': "Type of task. Values: 'dataset_import', 'annotation', 'training', 'testing'", + 'oneOf': [ + {'$ref': '#/definitions/task_type_enum'}, + {'type': 'null'}, + ], + }, + 'user': { + 'description': 'Associated user id', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, id=None, name=None, user=None, company=None, type=None, status=None, comment=None, created=None, started=None, completed=None, parent=None, project=None, input=None, output=None, execution=None, script=None, tags=None, status_changed=None, status_message=None, status_reason=None, published=None, last_worker=None, last_worker_report=None, last_update=None, last_iteration=None, last_metrics=None, **kwargs): + super(Task, self).__init__(**kwargs) + self.id = id + self.name = name + self.user = user + self.company = company + self.type = type + self.status = status + self.comment = comment + self.created = created + self.started = started + self.completed = completed + self.parent = parent + self.project = project + self.input = input + self.output = output + self.execution = execution + self.script = script + self.tags = tags + self.status_changed = status_changed + self.status_message = status_message + self.status_reason = status_reason + self.published = published + self.last_worker = last_worker + self.last_worker_report = last_worker_report + self.last_update = last_update + self.last_iteration = last_iteration + self.last_metrics = last_metrics + + @schema_property('id') + def id(self): + return self._property_id + + @id.setter + def id(self, value): + if value is None: + self._property_id = None + return + + self.assert_isinstance(value, "id", six.string_types) + self._property_id = value + + @schema_property('name') + def name(self): + return self._property_name + + @name.setter + def name(self, value): + if value is None: + self._property_name = None + return + + self.assert_isinstance(value, "name", six.string_types) + self._property_name = value + + @schema_property('user') + def user(self): + return self._property_user + + @user.setter + def user(self, value): + if value is None: + self._property_user = None + return + + self.assert_isinstance(value, "user", six.string_types) + self._property_user = value + + @schema_property('company') + def company(self): + return self._property_company + + @company.setter + def company(self, value): + if value is None: + self._property_company = None + return + + self.assert_isinstance(value, "company", six.string_types) + self._property_company = value + + @schema_property('type') + def type(self): + return self._property_type + + @type.setter + def type(self, value): + if value is None: + self._property_type = None + return + if isinstance(value, six.string_types): + try: + value = TaskTypeEnum(value) + except ValueError: + pass + else: + self.assert_isinstance(value, "type", enum.Enum) + self._property_type = value + + @schema_property('status') + def status(self): + return self._property_status + + @status.setter + def status(self, value): + if value is None: + self._property_status = None + return + if isinstance(value, six.string_types): + try: + value = TaskStatusEnum(value) + except ValueError: + pass + else: + self.assert_isinstance(value, "status", enum.Enum) + self._property_status = value + + @schema_property('comment') + def comment(self): + return self._property_comment + + @comment.setter + def comment(self, value): + if value is None: + self._property_comment = None + return + + self.assert_isinstance(value, "comment", six.string_types) + self._property_comment = value + + @schema_property('created') + def created(self): + return self._property_created + + @created.setter + def created(self, value): + if value is None: + self._property_created = None + return + + self.assert_isinstance(value, "created", six.string_types + (datetime,)) + if not isinstance(value, datetime): + value = parse_datetime(value) + self._property_created = value + + @schema_property('started') + def started(self): + return self._property_started + + @started.setter + def started(self, value): + if value is None: + self._property_started = None + return + + self.assert_isinstance(value, "started", six.string_types + (datetime,)) + if not isinstance(value, datetime): + value = parse_datetime(value) + self._property_started = value + + @schema_property('completed') + def completed(self): + return self._property_completed + + @completed.setter + def completed(self, value): + if value is None: + self._property_completed = None + return + + self.assert_isinstance(value, "completed", six.string_types + (datetime,)) + if not isinstance(value, datetime): + value = parse_datetime(value) + self._property_completed = value + + @schema_property('parent') + def parent(self): + return self._property_parent + + @parent.setter + def parent(self, value): + if value is None: + self._property_parent = None + return + + self.assert_isinstance(value, "parent", six.string_types) + self._property_parent = value + + @schema_property('project') + def project(self): + return self._property_project + + @project.setter + def project(self, value): + if value is None: + self._property_project = None + return + + self.assert_isinstance(value, "project", six.string_types) + self._property_project = value + + @schema_property('input') + def input(self): + return self._property_input + + @input.setter + def input(self, value): + if value is None: + self._property_input = None + return + if isinstance(value, dict): + value = Input.from_dict(value) + else: + self.assert_isinstance(value, "input", Input) + self._property_input = value + + @schema_property('output') + def output(self): + return self._property_output + + @output.setter + def output(self, value): + if value is None: + self._property_output = None + return + if isinstance(value, dict): + value = Output.from_dict(value) + else: + self.assert_isinstance(value, "output", Output) + self._property_output = value + + @schema_property('execution') + def execution(self): + return self._property_execution + + @execution.setter + def execution(self, value): + if value is None: + self._property_execution = None + return + if isinstance(value, dict): + value = Execution.from_dict(value) + else: + self.assert_isinstance(value, "execution", Execution) + self._property_execution = value + + @schema_property('script') + def script(self): + return self._property_script + + @script.setter + def script(self, value): + if value is None: + self._property_script = None + return + if isinstance(value, dict): + value = Script.from_dict(value) + else: + self.assert_isinstance(value, "script", Script) + self._property_script = value + + @schema_property('tags') + def tags(self): + return self._property_tags + + @tags.setter + def tags(self, value): + if value is None: + self._property_tags = None + return + + self.assert_isinstance(value, "tags", (list, tuple)) + + self.assert_isinstance(value, "tags", six.string_types, is_array=True) + self._property_tags = value + + @schema_property('status_changed') + def status_changed(self): + return self._property_status_changed + + @status_changed.setter + def status_changed(self, value): + if value is None: + self._property_status_changed = None + return + + self.assert_isinstance(value, "status_changed", six.string_types + (datetime,)) + if not isinstance(value, datetime): + value = parse_datetime(value) + self._property_status_changed = value + + @schema_property('status_message') + def status_message(self): + return self._property_status_message + + @status_message.setter + def status_message(self, value): + if value is None: + self._property_status_message = None + return + + self.assert_isinstance(value, "status_message", six.string_types) + self._property_status_message = value + + @schema_property('status_reason') + def status_reason(self): + return self._property_status_reason + + @status_reason.setter + def status_reason(self, value): + if value is None: + self._property_status_reason = None + return + + self.assert_isinstance(value, "status_reason", six.string_types) + self._property_status_reason = value + + @schema_property('published') + def published(self): + return self._property_published + + @published.setter + def published(self, value): + if value is None: + self._property_published = None + return + + self.assert_isinstance(value, "published", six.string_types + (datetime,)) + if not isinstance(value, datetime): + value = parse_datetime(value) + self._property_published = value + + @schema_property('last_worker') + def last_worker(self): + return self._property_last_worker + + @last_worker.setter + def last_worker(self, value): + if value is None: + self._property_last_worker = None + return + + self.assert_isinstance(value, "last_worker", six.string_types) + self._property_last_worker = value + + @schema_property('last_worker_report') + def last_worker_report(self): + return self._property_last_worker_report + + @last_worker_report.setter + def last_worker_report(self, value): + if value is None: + self._property_last_worker_report = None + return + + self.assert_isinstance(value, "last_worker_report", six.string_types + (datetime,)) + if not isinstance(value, datetime): + value = parse_datetime(value) + self._property_last_worker_report = value + + @schema_property('last_update') + def last_update(self): + return self._property_last_update + + @last_update.setter + def last_update(self, value): + if value is None: + self._property_last_update = None + return + + self.assert_isinstance(value, "last_update", six.string_types + (datetime,)) + if not isinstance(value, datetime): + value = parse_datetime(value) + self._property_last_update = value + + @schema_property('last_iteration') + def last_iteration(self): + return self._property_last_iteration + + @last_iteration.setter + def last_iteration(self, value): + if value is None: + self._property_last_iteration = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "last_iteration", six.integer_types) + self._property_last_iteration = value + + @schema_property('last_metrics') + def last_metrics(self): + return self._property_last_metrics + + @last_metrics.setter + def last_metrics(self, value): + if value is None: + self._property_last_metrics = None + return + + self.assert_isinstance(value, "last_metrics", (dict,)) + self._property_last_metrics = value + + +class CloseRequest(Request): + """ + Indicates that task is closed + + :param force: Allows forcing state change even if transition is not supported + :type force: bool + :param task: Task ID + :type task: str + :param status_reason: Reason for status change + :type status_reason: str + :param status_message: Extra information regarding status change + :type status_message: str + """ + + _service = "tasks" + _action = "close" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': { + 'force': { + 'default': False, + 'description': 'Allows forcing state change even if transition is not supported', + 'type': ['boolean', 'null'], + }, + 'status_message': { + 'description': 'Extra information regarding status change', + 'type': 'string', + }, + 'status_reason': { + 'description': 'Reason for status change', + 'type': 'string', + }, + 'task': {'description': 'Task ID', 'type': 'string'}, + }, + 'required': ['task'], + 'type': 'object', + } + def __init__( + self, task, force=False, status_reason=None, status_message=None, **kwargs): + super(CloseRequest, self).__init__(**kwargs) + self.force = force + self.task = task + self.status_reason = status_reason + self.status_message = status_message + + @schema_property('force') + def force(self): + return self._property_force + + @force.setter + def force(self, value): + if value is None: + self._property_force = None + return + + self.assert_isinstance(value, "force", (bool,)) + self._property_force = value + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + @schema_property('status_reason') + def status_reason(self): + return self._property_status_reason + + @status_reason.setter + def status_reason(self, value): + if value is None: + self._property_status_reason = None + return + + self.assert_isinstance(value, "status_reason", six.string_types) + self._property_status_reason = value + + @schema_property('status_message') + def status_message(self): + return self._property_status_message + + @status_message.setter + def status_message(self, value): + if value is None: + self._property_status_message = None + return + + self.assert_isinstance(value, "status_message", six.string_types) + self._property_status_message = value + + +class CloseResponse(Response): + """ + Response of tasks.close endpoint. + + :param updated: Number of tasks updated (0 or 1) + :type updated: int + :param fields: Updated fields names and values + :type fields: dict + """ + _service = "tasks" + _action = "close" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'fields': { + 'additionalProperties': True, + 'description': 'Updated fields names and values', + 'type': ['object', 'null'], + }, + 'updated': { + 'description': 'Number of tasks updated (0 or 1)', + 'enum': [0, 1], + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, updated=None, fields=None, **kwargs): + super(CloseResponse, self).__init__(**kwargs) + self.updated = updated + self.fields = fields + + @schema_property('updated') + def updated(self): + return self._property_updated + + @updated.setter + def updated(self, value): + if value is None: + self._property_updated = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "updated", six.integer_types) + self._property_updated = value + + @schema_property('fields') + def fields(self): + return self._property_fields + + @fields.setter + def fields(self, value): + if value is None: + self._property_fields = None + return + + self.assert_isinstance(value, "fields", (dict,)) + self._property_fields = value + + +class CreateRequest(Request): + """ + Create a new task + + :param name: Task name. Unique within the company. + :type name: str + :param tags: Tags list + :type tags: Sequence[str] + :param type: Type of task + :type type: TaskTypeEnum + :param comment: Free text comment + :type comment: str + :param parent: Parent task id Must be a completed task. + :type parent: str + :param project: Project ID of the project to which this task is assigned Must + exist[ab] + :type project: str + :param input: Task input params. (input view must be provided). + :type input: Input + :param output_dest: Output storage id Must be a reference to an existing + storage. + :type output_dest: str + :param execution: Task execution params + :type execution: Execution + :param script: Script info + :type script: Script + """ + + _service = "tasks" + _action = "create" + _version = "1.9" + _schema = { + 'definitions': { + 'augmentation': { + 'properties': { + 'crop_around_rois': { + 'description': 'Crop image data around all frame ROIs', + 'type': ['boolean', 'null'], + }, + 'sets': { + 'description': 'List of augmentation sets', + 'items': {'$ref': '#/definitions/augmentation_set'}, + 'type': ['array', 'null'], + }, + }, + 'type': 'object', + }, + 'augmentation_set': { + 'properties': { + 'arguments': { + 'additionalProperties': { + 'additionalProperties': True, + 'type': 'object', + }, + 'description': 'Arguments dictionary per custom augmentation type.', + 'type': ['object', 'null'], + }, + 'cls': { + 'description': 'Augmentation class', + 'type': ['string', 'null'], + }, + 'strength': { + 'description': 'Augmentation strength. Range [0,).', + 'minimum': 0, + 'type': ['number', 'null'], + }, + 'types': { + 'description': 'Augmentation type', + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + }, + 'type': 'object', + }, + 'execution': { + 'properties': { + 'dataviews': { + 'description': 'Additional dataviews for the task', + 'items': {'additionalProperties': True, 'type': 'object'}, + 'type': ['array', 'null'], + }, + 'framework': { + 'description': 'Framework related to the task. Case insensitive. Mandatory for Training tasks. ', + 'type': ['string', 'null'], + }, + 'model': { + 'description': 'Execution input model ID Not applicable for Register (Import) tasks', + 'type': ['string', 'null'], + }, + 'model_desc': { + 'additionalProperties': True, + 'description': 'Json object representing the Model descriptors', + 'type': ['object', 'null'], + }, + 'model_labels': { + 'additionalProperties': {'type': 'integer'}, + 'description': "Json object representing the ids of the labels in the model.\n The keys are the layers' names and the values are the IDs.\n Not applicable for Register (Import) tasks.\n Mandatory for Training tasks[z]", + 'type': ['object', 'null'], + }, + 'parameters': { + 'additionalProperties': True, + 'description': 'Json object containing the Task parameters', + 'type': ['object', 'null'], + }, + 'queue': { + 'description': 'Queue ID where task was queued.', + 'type': ['string', 'null'], + }, + 'test_split': { + 'description': 'Percentage of frames to use for testing only', + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + }, + 'filter_by_roi_enum': { + 'default': 'label_rules', + 'enum': ['disabled', 'no_rois', 'label_rules'], + 'type': 'string', + }, + 'filter_label_rule': { + 'properties': { + 'conf_range': { + 'description': 'Range of ROI confidence level in the frame (min, max). -1 for not applicable\n Both min and max can be either -1 or positive.\n 2nd number (max) must be either -1 or larger than or equal to the 1st number (min)', + 'items': {'type': 'number'}, + 'maxItems': 2, + 'minItems': 1, + 'type': 'array', + }, + 'count_range': { + 'description': 'Range of times ROI appears in the frame (min, max). -1 for not applicable.\n Both integers must be larger than or equal to -1.\n 2nd integer (max) must be either -1 or larger than or equal to the 1st integer (min)', + 'items': {'type': 'integer'}, + 'maxItems': 2, + 'minItems': 1, + 'type': 'array', + }, + 'label': { + 'description': "Lucene format query (see lucene query syntax).\nDefault search field is label.keyword and default operator is AND, so searching for:\n\n'Bus Stop' Blue\n\nis equivalent to:\n\nLabel.keyword:'Bus Stop' AND label.keyword:'Blue'", + 'type': 'string', + }, + }, + 'required': ['label'], + 'type': 'object', + }, + 'filter_rule': { + 'properties': { + 'dataset': { + 'description': "Dataset ID. Must be a dataset which is in the task's view. If set to '*' all datasets in View are used.", + 'type': 'string', + }, + 'filter_by_roi': { + '$ref': '#/definitions/filter_by_roi_enum', + 'description': 'Type of filter', + }, + 'frame_query': { + 'description': 'Frame filter, in Lucene query syntax', + 'type': 'string', + }, + 'label_rules': { + 'description': "List of FilterLabelRule ('AND' connection)\n\ndisabled - No filtering by ROIs. Select all frames, even if they don't have ROIs (all frames)\n\nno_rois - Select only frames without ROIs (empty frames)\n\nlabel_rules - Select frames according to label rules", + 'items': {'$ref': '#/definitions/filter_label_rule'}, + 'type': ['array', 'null'], + }, + 'sources_query': { + 'description': 'Sources filter, in Lucene query syntax. Filters sources in each frame.', + 'type': 'string', + }, + 'version': { + 'description': "Dataset version to apply rule to. Must belong to the dataset and be in the task's view. If set to '*' all version of the datasets in View are used.", + 'type': 'string', + }, + 'weight': { + 'description': 'Rule weight. Default is 1', + 'type': 'number', + }, + }, + 'required': ['filter_by_roi'], + 'type': 'object', + }, + 'filtering': { + 'properties': { + 'filtering_rules': { + 'description': "List of FilterRule ('OR' connection)", + 'items': {'$ref': '#/definitions/filter_rule'}, + 'type': ['array', 'null'], + }, + 'output_rois': { + 'description': "'all_in_frame' - all rois for a frame are returned\n\n'only_filtered' - only rois which led this frame to be selected\n\n'frame_per_roi' - single roi per frame. Frame can be returned multiple times with a different roi each time.\n\nNote: this should be used for Training tasks only\n\nNote: frame_per_roi implies that only filtered rois will be returned\n ", + 'oneOf': [ + {'$ref': '#/definitions/output_rois_enum'}, + {'type': 'null'}, + ], + }, + }, + 'type': 'object', + }, + 'input': { + 'properties': { + 'augmentation': { + 'description': 'Augmentation parameters. Only for training and testing tasks.', + 'oneOf': [ + {'$ref': '#/definitions/augmentation'}, + {'type': 'null'}, + ], + }, + 'dataviews': { + 'additionalProperties': {'type': 'string'}, + 'description': 'Key to DataView ID Mapping', + 'type': ['object', 'null'], + }, + 'frames_filter': { + 'description': 'Filtering params', + 'oneOf': [ + {'$ref': '#/definitions/filtering'}, + {'type': 'null'}, + ], + }, + 'iteration': { + 'description': 'Iteration parameters. Not applicable for register (import) tasks.', + 'oneOf': [ + {'$ref': '#/definitions/iteration'}, + {'type': 'null'}, + ], + }, + 'mapping': { + 'description': 'Mapping params (see common definitions section)', + 'oneOf': [ + {'$ref': '#/definitions/mapping'}, + {'type': 'null'}, + ], + }, + 'view': { + 'description': 'View params', + 'oneOf': [{'$ref': '#/definitions/view'}, {'type': 'null'}], + }, + }, + 'type': 'object', + }, + 'iteration': { + 'description': 'Sequential Iteration API configuration', + 'properties': { + 'infinite': { + 'description': 'Infinite iteration', + 'type': ['boolean', 'null'], + }, + 'jump': { + 'description': 'Jump entry', + 'oneOf': [{'$ref': '#/definitions/jump'}, {'type': 'null'}], + }, + 'limit': { + 'description': 'Maximum frames per task. If not passed, frames will end when no more matching frames are found, unless infinite is True.', + 'type': ['integer', 'null'], + }, + 'min_sequence': { + 'description': 'Length (in ms) of video clips to return. This is used in random order, and in sequential order only if jumping is provided and only for video frames', + 'type': ['integer', 'null'], + }, + 'order': { + 'description': "\n Input frames order. Values: 'sequential', 'random'\n In Sequential mode frames will be returned according to the order in which the frames were added to the dataset.", + 'type': ['string', 'null'], + }, + 'random_seed': { + 'description': 'Random seed used during iteration', + 'type': 'integer', + }, + }, + 'required': ['random_seed'], + 'type': 'object', + }, + 'jump': { + 'properties': { + 'time': { + 'description': 'Max time in milliseconds between frames', + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + }, + 'label_source': { + 'properties': { + 'dataset': { + 'description': "Source dataset id. '*' for all datasets in view", + 'type': ['string', 'null'], + }, + 'labels': { + 'description': "List of source labels (AND connection). '*' indicates any label. Labels must exist in at least one of the dataset versions in the task's view", + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'version': { + 'description': "Source dataset version id. Default is '*' (for all versions in dataset in the view) Version must belong to the selected dataset, and must be in the task's view[i]", + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + }, + 'mapping': { + 'properties': { + 'rules': { + 'description': 'Rules list', + 'items': {'$ref': '#/definitions/mapping_rule'}, + 'type': ['array', 'null'], + }, + }, + 'type': 'object', + }, + 'mapping_rule': { + 'properties': { + 'source': { + 'description': 'Source label info', + 'oneOf': [ + {'$ref': '#/definitions/label_source'}, + {'type': 'null'}, + ], + }, + 'target': { + 'description': 'Target label name', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + }, + 'output_rois_enum': { + 'enum': ['all_in_frame', 'only_filtered', 'frame_per_roi'], + 'type': 'string', + }, + 'script': { + 'properties': { + 'binary': { + 'default': 'python', + 'description': 'Binary to use when running the script', + 'type': ['string', 'null'], + }, + 'branch': { + 'description': 'Repository branch id If not provided and tag not provided, default repository branch is used.', + 'type': ['string', 'null'], + }, + 'entry_point': { + 'description': 'Path to execute within the repository', + 'type': ['string', 'null'], + }, + 'repository': { + 'description': 'Name of the repository where the script is located', + 'type': ['string', 'null'], + }, + 'requirements': { + 'description': 'A JSON object containing requirements strings by key', + 'type': ['object', 'null'], + }, + 'tag': { + 'description': 'Repository tag', + 'type': ['string', 'null'], + }, + 'version_num': { + 'description': 'Version (changeset) number. Optional (default is head version) Unused if tag is provided.', + 'type': ['string', 'null'], + }, + 'working_dir': { + 'description': 'Path to the folder from which to run the script Default - root folder of repository[f]', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + }, + 'task_type_enum': { + 'enum': [ + 'dataset_import', + 'annotation', + 'annotation_manual', + 'training', + 'testing', + ], + 'type': 'string', + }, + 'view': { + 'properties': { + 'entries': { + 'description': 'List of view entries. All tasks must have at least one view.', + 'items': {'$ref': '#/definitions/view_entry'}, + 'type': ['array', 'null'], + }, + }, + 'type': 'object', + }, + 'view_entry': { + 'properties': { + 'dataset': { + 'description': 'Existing Dataset id', + 'type': ['string', 'null'], + }, + 'merge_with': { + 'description': 'Version ID to merge with', + 'type': ['string', 'null'], + }, + 'version': { + 'description': 'Version id of a version belonging to the dataset', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + }, + }, + 'properties': { + 'comment': {'description': 'Free text comment ', 'type': 'string'}, + 'execution': { + '$ref': '#/definitions/execution', + 'description': 'Task execution params', + }, + 'input': { + '$ref': '#/definitions/input', + 'description': 'Task input params. (input view must be provided).', + }, + 'name': { + 'description': 'Task name. Unique within the company.', + 'type': 'string', + }, + 'output_dest': { + 'description': 'Output storage id Must be a reference to an existing storage.', + 'type': 'string', + }, + 'parent': { + 'description': 'Parent task id Must be a completed task.', + 'type': 'string', + }, + 'project': { + 'description': 'Project ID of the project to which this task is assigned Must exist[ab]', + 'type': 'string', + }, + 'script': { + '$ref': '#/definitions/script', + 'description': 'Script info', + }, + 'tags': {'description': 'Tags list', 'items': {'type': 'string'}, 'type': 'array'}, + 'type': { + '$ref': '#/definitions/task_type_enum', + 'description': 'Type of task', + }, + }, + 'required': ['name', 'type'], + 'type': 'object', + } + def __init__( + self, name, type, tags=None, comment=None, parent=None, project=None, input=None, output_dest=None, execution=None, script=None, **kwargs): + super(CreateRequest, self).__init__(**kwargs) + self.name = name + self.tags = tags + self.type = type + self.comment = comment + self.parent = parent + self.project = project + self.input = input + self.output_dest = output_dest + self.execution = execution + self.script = script + + @schema_property('name') + def name(self): + return self._property_name + + @name.setter + def name(self, value): + if value is None: + self._property_name = None + return + + self.assert_isinstance(value, "name", six.string_types) + self._property_name = value + + @schema_property('tags') + def tags(self): + return self._property_tags + + @tags.setter + def tags(self, value): + if value is None: + self._property_tags = None + return + + self.assert_isinstance(value, "tags", (list, tuple)) + + self.assert_isinstance(value, "tags", six.string_types, is_array=True) + self._property_tags = value + + @schema_property('type') + def type(self): + return self._property_type + + @type.setter + def type(self, value): + if value is None: + self._property_type = None + return + if isinstance(value, six.string_types): + try: + value = TaskTypeEnum(value) + except ValueError: + pass + else: + self.assert_isinstance(value, "type", enum.Enum) + self._property_type = value + + @schema_property('comment') + def comment(self): + return self._property_comment + + @comment.setter + def comment(self, value): + if value is None: + self._property_comment = None + return + + self.assert_isinstance(value, "comment", six.string_types) + self._property_comment = value + + @schema_property('parent') + def parent(self): + return self._property_parent + + @parent.setter + def parent(self, value): + if value is None: + self._property_parent = None + return + + self.assert_isinstance(value, "parent", six.string_types) + self._property_parent = value + + @schema_property('project') + def project(self): + return self._property_project + + @project.setter + def project(self, value): + if value is None: + self._property_project = None + return + + self.assert_isinstance(value, "project", six.string_types) + self._property_project = value + + @schema_property('input') + def input(self): + return self._property_input + + @input.setter + def input(self, value): + if value is None: + self._property_input = None + return + if isinstance(value, dict): + value = Input.from_dict(value) + else: + self.assert_isinstance(value, "input", Input) + self._property_input = value + + @schema_property('output_dest') + def output_dest(self): + return self._property_output_dest + + @output_dest.setter + def output_dest(self, value): + if value is None: + self._property_output_dest = None + return + + self.assert_isinstance(value, "output_dest", six.string_types) + self._property_output_dest = value + + @schema_property('execution') + def execution(self): + return self._property_execution + + @execution.setter + def execution(self, value): + if value is None: + self._property_execution = None + return + if isinstance(value, dict): + value = Execution.from_dict(value) + else: + self.assert_isinstance(value, "execution", Execution) + self._property_execution = value + + @schema_property('script') + def script(self): + return self._property_script + + @script.setter + def script(self, value): + if value is None: + self._property_script = None + return + if isinstance(value, dict): + value = Script.from_dict(value) + else: + self.assert_isinstance(value, "script", Script) + self._property_script = value + + +class CreateResponse(Response): + """ + Response of tasks.create endpoint. + + :param id: ID of the task + :type id: str + """ + _service = "tasks" + _action = "create" + _version = "1.9" + + _schema = { + 'definitions': {}, + 'properties': { + 'id': {'description': 'ID of the task', 'type': ['string', 'null']}, + }, + 'type': 'object', + } + def __init__( + self, id=None, **kwargs): + super(CreateResponse, self).__init__(**kwargs) + self.id = id + + @schema_property('id') + def id(self): + return self._property_id + + @id.setter + def id(self, value): + if value is None: + self._property_id = None + return + + self.assert_isinstance(value, "id", six.string_types) + self._property_id = value + + +class DeleteRequest(Request): + """ + Delete a task along with any information stored for it (statistics, frame updates etc.) + Unless Force flag is provided, operation will fail if task has objects associated with it - i.e. children tasks, projects or datasets. + Models that refer to the deleted task will be updated with a task ID indicating a deleted task. + + + :param move_to_trash: Move task to trash instead of deleting it. For internal + use only, tasks in the trash are not visible from the API and cannot be + restored! + :type move_to_trash: bool + :param force: If not true, call fails if the task status is 'in_progress' + :type force: bool + :param task: Task ID + :type task: str + :param status_reason: Reason for status change + :type status_reason: str + :param status_message: Extra information regarding status change + :type status_message: str + """ + + _service = "tasks" + _action = "delete" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': { + 'force': { + 'default': False, + 'description': "If not true, call fails if the task status is 'in_progress'", + 'type': ['boolean', 'null'], + }, + 'move_to_trash': { + 'default': False, + 'description': 'Move task to trash instead of deleting it. For internal use only, tasks in the trash are not visible from the API and cannot be restored!', + 'type': ['boolean', 'null'], + }, + 'status_message': { + 'description': 'Extra information regarding status change', + 'type': 'string', + }, + 'status_reason': { + 'description': 'Reason for status change', + 'type': 'string', + }, + 'task': {'description': 'Task ID', 'type': 'string'}, + }, + 'required': ['task'], + 'type': 'object', + } + def __init__( + self, task, move_to_trash=False, force=False, status_reason=None, status_message=None, **kwargs): + super(DeleteRequest, self).__init__(**kwargs) + self.move_to_trash = move_to_trash + self.force = force + self.task = task + self.status_reason = status_reason + self.status_message = status_message + + @schema_property('move_to_trash') + def move_to_trash(self): + return self._property_move_to_trash + + @move_to_trash.setter + def move_to_trash(self, value): + if value is None: + self._property_move_to_trash = None + return + + self.assert_isinstance(value, "move_to_trash", (bool,)) + self._property_move_to_trash = value + + @schema_property('force') + def force(self): + return self._property_force + + @force.setter + def force(self, value): + if value is None: + self._property_force = None + return + + self.assert_isinstance(value, "force", (bool,)) + self._property_force = value + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + @schema_property('status_reason') + def status_reason(self): + return self._property_status_reason + + @status_reason.setter + def status_reason(self, value): + if value is None: + self._property_status_reason = None + return + + self.assert_isinstance(value, "status_reason", six.string_types) + self._property_status_reason = value + + @schema_property('status_message') + def status_message(self): + return self._property_status_message + + @status_message.setter + def status_message(self, value): + if value is None: + self._property_status_message = None + return + + self.assert_isinstance(value, "status_message", six.string_types) + self._property_status_message = value + + +class DeleteResponse(Response): + """ + Response of tasks.delete endpoint. + + :param deleted: Indicates whether the task was deleted + :type deleted: bool + :param updated_children: Number of child tasks whose parent property was + updated + :type updated_children: int + :param updated_models: Number of models whose task property was updated + :type updated_models: int + :param updated_versions: Number of dataset versions whose task property was + updated + :type updated_versions: int + :param frames: Response from frames.rollback + :type frames: dict + :param events: Response from events.delete_for_task + :type events: dict + """ + _service = "tasks" + _action = "delete" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'deleted': { + 'description': 'Indicates whether the task was deleted', + 'type': ['boolean', 'null'], + }, + 'events': { + 'additionalProperties': True, + 'description': 'Response from events.delete_for_task', + 'type': ['object', 'null'], + }, + 'frames': { + 'additionalProperties': True, + 'description': 'Response from frames.rollback', + 'type': ['object', 'null'], + }, + 'updated_children': { + 'description': 'Number of child tasks whose parent property was updated', + 'type': ['integer', 'null'], + }, + 'updated_models': { + 'description': 'Number of models whose task property was updated', + 'type': ['integer', 'null'], + }, + 'updated_versions': { + 'description': 'Number of dataset versions whose task property was updated', + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, deleted=None, updated_children=None, updated_models=None, updated_versions=None, frames=None, events=None, **kwargs): + super(DeleteResponse, self).__init__(**kwargs) + self.deleted = deleted + self.updated_children = updated_children + self.updated_models = updated_models + self.updated_versions = updated_versions + self.frames = frames + self.events = events + + @schema_property('deleted') + def deleted(self): + return self._property_deleted + + @deleted.setter + def deleted(self, value): + if value is None: + self._property_deleted = None + return + + self.assert_isinstance(value, "deleted", (bool,)) + self._property_deleted = value + + @schema_property('updated_children') + def updated_children(self): + return self._property_updated_children + + @updated_children.setter + def updated_children(self, value): + if value is None: + self._property_updated_children = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "updated_children", six.integer_types) + self._property_updated_children = value + + @schema_property('updated_models') + def updated_models(self): + return self._property_updated_models + + @updated_models.setter + def updated_models(self, value): + if value is None: + self._property_updated_models = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "updated_models", six.integer_types) + self._property_updated_models = value + + @schema_property('updated_versions') + def updated_versions(self): + return self._property_updated_versions + + @updated_versions.setter + def updated_versions(self, value): + if value is None: + self._property_updated_versions = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "updated_versions", six.integer_types) + self._property_updated_versions = value + + @schema_property('frames') + def frames(self): + return self._property_frames + + @frames.setter + def frames(self, value): + if value is None: + self._property_frames = None + return + + self.assert_isinstance(value, "frames", (dict,)) + self._property_frames = value + + @schema_property('events') + def events(self): + return self._property_events + + @events.setter + def events(self, value): + if value is None: + self._property_events = None + return + + self.assert_isinstance(value, "events", (dict,)) + self._property_events = value + + +class DequeueRequest(Request): + """ + Remove a task from its queue. + Fails if task status is not queued. + + :param task: Task ID + :type task: str + :param status_reason: Reason for status change + :type status_reason: str + :param status_message: Extra information regarding status change + :type status_message: str + """ + + _service = "tasks" + _action = "dequeue" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': { + 'status_message': { + 'description': 'Extra information regarding status change', + 'type': 'string', + }, + 'status_reason': { + 'description': 'Reason for status change', + 'type': 'string', + }, + 'task': {'description': 'Task ID', 'type': 'string'}, + }, + 'required': ['task'], + 'type': 'object', + } + def __init__( + self, task, status_reason=None, status_message=None, **kwargs): + super(DequeueRequest, self).__init__(**kwargs) + self.task = task + self.status_reason = status_reason + self.status_message = status_message + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + @schema_property('status_reason') + def status_reason(self): + return self._property_status_reason + + @status_reason.setter + def status_reason(self, value): + if value is None: + self._property_status_reason = None + return + + self.assert_isinstance(value, "status_reason", six.string_types) + self._property_status_reason = value + + @schema_property('status_message') + def status_message(self): + return self._property_status_message + + @status_message.setter + def status_message(self, value): + if value is None: + self._property_status_message = None + return + + self.assert_isinstance(value, "status_message", six.string_types) + self._property_status_message = value + + +class DequeueResponse(Response): + """ + Response of tasks.dequeue endpoint. + + :param dequeued: Number of tasks dequeued (0 or 1) + :type dequeued: int + :param updated: Number of tasks updated (0 or 1) + :type updated: int + :param fields: Updated fields names and values + :type fields: dict + """ + _service = "tasks" + _action = "dequeue" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'dequeued': { + 'description': 'Number of tasks dequeued (0 or 1)', + 'enum': [0, 1], + 'type': ['integer', 'null'], + }, + 'fields': { + 'additionalProperties': True, + 'description': 'Updated fields names and values', + 'type': ['object', 'null'], + }, + 'updated': { + 'description': 'Number of tasks updated (0 or 1)', + 'enum': [0, 1], + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, dequeued=None, updated=None, fields=None, **kwargs): + super(DequeueResponse, self).__init__(**kwargs) + self.dequeued = dequeued + self.updated = updated + self.fields = fields + + @schema_property('dequeued') + def dequeued(self): + return self._property_dequeued + + @dequeued.setter + def dequeued(self, value): + if value is None: + self._property_dequeued = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "dequeued", six.integer_types) + self._property_dequeued = value + + @schema_property('updated') + def updated(self): + return self._property_updated + + @updated.setter + def updated(self, value): + if value is None: + self._property_updated = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "updated", six.integer_types) + self._property_updated = value + + @schema_property('fields') + def fields(self): + return self._property_fields + + @fields.setter + def fields(self, value): + if value is None: + self._property_fields = None + return + + self.assert_isinstance(value, "fields", (dict,)) + self._property_fields = value + + +class EditRequest(Request): + """ + Edit task's details. + + :param task: ID of the task + :type task: str + :param force: If not true, call fails if the task status is not 'created' + :type force: bool + :param name: Task name Unique within the company. + :type name: str + :param tags: Tags list + :type tags: Sequence[str] + :param type: Type of task + :type type: TaskTypeEnum + :param comment: Free text comment + :type comment: str + :param parent: Parent task id Must be a completed task. + :type parent: str + :param project: Project ID of the project to which this task is assigned Must + exist[ab] + :type project: str + :param input: Task input params. (input view must be provided). + :type input: Input + :param output_dest: Output storage id Must be a reference to an existing + storage. + :type output_dest: str + :param execution: Task execution params + :type execution: Execution + :param script: Script info + :type script: Script + """ + + _service = "tasks" + _action = "edit" + _version = "1.9" + _schema = { + 'definitions': { + 'augmentation': { + 'properties': { + 'crop_around_rois': { + 'description': 'Crop image data around all frame ROIs', + 'type': ['boolean', 'null'], + }, + 'sets': { + 'description': 'List of augmentation sets', + 'items': {'$ref': '#/definitions/augmentation_set'}, + 'type': ['array', 'null'], + }, + }, + 'type': 'object', + }, + 'augmentation_set': { + 'properties': { + 'arguments': { + 'additionalProperties': { + 'additionalProperties': True, + 'type': 'object', + }, + 'description': 'Arguments dictionary per custom augmentation type.', + 'type': ['object', 'null'], + }, + 'cls': { + 'description': 'Augmentation class', + 'type': ['string', 'null'], + }, + 'strength': { + 'description': 'Augmentation strength. Range [0,).', + 'minimum': 0, + 'type': ['number', 'null'], + }, + 'types': { + 'description': 'Augmentation type', + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + }, + 'type': 'object', + }, + 'execution': { + 'properties': { + 'dataviews': { + 'description': 'Additional dataviews for the task', + 'items': {'additionalProperties': True, 'type': 'object'}, + 'type': ['array', 'null'], + }, + 'framework': { + 'description': 'Framework related to the task. Case insensitive. Mandatory for Training tasks. ', + 'type': ['string', 'null'], + }, + 'model': { + 'description': 'Execution input model ID Not applicable for Register (Import) tasks', + 'type': ['string', 'null'], + }, + 'model_desc': { + 'additionalProperties': True, + 'description': 'Json object representing the Model descriptors', + 'type': ['object', 'null'], + }, + 'model_labels': { + 'additionalProperties': {'type': 'integer'}, + 'description': "Json object representing the ids of the labels in the model.\n The keys are the layers' names and the values are the IDs.\n Not applicable for Register (Import) tasks.\n Mandatory for Training tasks[z]", + 'type': ['object', 'null'], + }, + 'parameters': { + 'additionalProperties': True, + 'description': 'Json object containing the Task parameters', + 'type': ['object', 'null'], + }, + 'queue': { + 'description': 'Queue ID where task was queued.', + 'type': ['string', 'null'], + }, + 'test_split': { + 'description': 'Percentage of frames to use for testing only', + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + }, + 'filter_by_roi_enum': { + 'default': 'label_rules', + 'enum': ['disabled', 'no_rois', 'label_rules'], + 'type': 'string', + }, + 'filter_label_rule': { + 'properties': { + 'conf_range': { + 'description': 'Range of ROI confidence level in the frame (min, max). -1 for not applicable\n Both min and max can be either -1 or positive.\n 2nd number (max) must be either -1 or larger than or equal to the 1st number (min)', + 'items': {'type': 'number'}, + 'maxItems': 2, + 'minItems': 1, + 'type': 'array', + }, + 'count_range': { + 'description': 'Range of times ROI appears in the frame (min, max). -1 for not applicable.\n Both integers must be larger than or equal to -1.\n 2nd integer (max) must be either -1 or larger than or equal to the 1st integer (min)', + 'items': {'type': 'integer'}, + 'maxItems': 2, + 'minItems': 1, + 'type': 'array', + }, + 'label': { + 'description': "Lucene format query (see lucene query syntax).\nDefault search field is label.keyword and default operator is AND, so searching for:\n\n'Bus Stop' Blue\n\nis equivalent to:\n\nLabel.keyword:'Bus Stop' AND label.keyword:'Blue'", + 'type': 'string', + }, + }, + 'required': ['label'], + 'type': 'object', + }, + 'filter_rule': { + 'properties': { + 'dataset': { + 'description': "Dataset ID. Must be a dataset which is in the task's view. If set to '*' all datasets in View are used.", + 'type': 'string', + }, + 'filter_by_roi': { + '$ref': '#/definitions/filter_by_roi_enum', + 'description': 'Type of filter', + }, + 'frame_query': { + 'description': 'Frame filter, in Lucene query syntax', + 'type': 'string', + }, + 'label_rules': { + 'description': "List of FilterLabelRule ('AND' connection)\n\ndisabled - No filtering by ROIs. Select all frames, even if they don't have ROIs (all frames)\n\nno_rois - Select only frames without ROIs (empty frames)\n\nlabel_rules - Select frames according to label rules", + 'items': {'$ref': '#/definitions/filter_label_rule'}, + 'type': ['array', 'null'], + }, + 'sources_query': { + 'description': 'Sources filter, in Lucene query syntax. Filters sources in each frame.', + 'type': 'string', + }, + 'version': { + 'description': "Dataset version to apply rule to. Must belong to the dataset and be in the task's view. If set to '*' all version of the datasets in View are used.", + 'type': 'string', + }, + 'weight': { + 'description': 'Rule weight. Default is 1', + 'type': 'number', + }, + }, + 'required': ['filter_by_roi'], + 'type': 'object', + }, + 'filtering': { + 'properties': { + 'filtering_rules': { + 'description': "List of FilterRule ('OR' connection)", + 'items': {'$ref': '#/definitions/filter_rule'}, + 'type': ['array', 'null'], + }, + 'output_rois': { + 'description': "'all_in_frame' - all rois for a frame are returned\n\n'only_filtered' - only rois which led this frame to be selected\n\n'frame_per_roi' - single roi per frame. Frame can be returned multiple times with a different roi each time.\n\nNote: this should be used for Training tasks only\n\nNote: frame_per_roi implies that only filtered rois will be returned\n ", + 'oneOf': [ + {'$ref': '#/definitions/output_rois_enum'}, + {'type': 'null'}, + ], + }, + }, + 'type': 'object', + }, + 'input': { + 'properties': { + 'augmentation': { + 'description': 'Augmentation parameters. Only for training and testing tasks.', + 'oneOf': [ + {'$ref': '#/definitions/augmentation'}, + {'type': 'null'}, + ], + }, + 'dataviews': { + 'additionalProperties': {'type': 'string'}, + 'description': 'Key to DataView ID Mapping', + 'type': ['object', 'null'], + }, + 'frames_filter': { + 'description': 'Filtering params', + 'oneOf': [ + {'$ref': '#/definitions/filtering'}, + {'type': 'null'}, + ], + }, + 'iteration': { + 'description': 'Iteration parameters. Not applicable for register (import) tasks.', + 'oneOf': [ + {'$ref': '#/definitions/iteration'}, + {'type': 'null'}, + ], + }, + 'mapping': { + 'description': 'Mapping params (see common definitions section)', + 'oneOf': [ + {'$ref': '#/definitions/mapping'}, + {'type': 'null'}, + ], + }, + 'view': { + 'description': 'View params', + 'oneOf': [{'$ref': '#/definitions/view'}, {'type': 'null'}], + }, + }, + 'type': 'object', + }, + 'iteration': { + 'description': 'Sequential Iteration API configuration', + 'properties': { + 'infinite': { + 'description': 'Infinite iteration', + 'type': ['boolean', 'null'], + }, + 'jump': { + 'description': 'Jump entry', + 'oneOf': [{'$ref': '#/definitions/jump'}, {'type': 'null'}], + }, + 'limit': { + 'description': 'Maximum frames per task. If not passed, frames will end when no more matching frames are found, unless infinite is True.', + 'type': ['integer', 'null'], + }, + 'min_sequence': { + 'description': 'Length (in ms) of video clips to return. This is used in random order, and in sequential order only if jumping is provided and only for video frames', + 'type': ['integer', 'null'], + }, + 'order': { + 'description': "\n Input frames order. Values: 'sequential', 'random'\n In Sequential mode frames will be returned according to the order in which the frames were added to the dataset.", + 'type': ['string', 'null'], + }, + 'random_seed': { + 'description': 'Random seed used during iteration', + 'type': 'integer', + }, + }, + 'required': ['random_seed'], + 'type': 'object', + }, + 'jump': { + 'properties': { + 'time': { + 'description': 'Max time in milliseconds between frames', + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + }, + 'label_source': { + 'properties': { + 'dataset': { + 'description': "Source dataset id. '*' for all datasets in view", + 'type': ['string', 'null'], + }, + 'labels': { + 'description': "List of source labels (AND connection). '*' indicates any label. Labels must exist in at least one of the dataset versions in the task's view", + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'version': { + 'description': "Source dataset version id. Default is '*' (for all versions in dataset in the view) Version must belong to the selected dataset, and must be in the task's view[i]", + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + }, + 'mapping': { + 'properties': { + 'rules': { + 'description': 'Rules list', + 'items': {'$ref': '#/definitions/mapping_rule'}, + 'type': ['array', 'null'], + }, + }, + 'type': 'object', + }, + 'mapping_rule': { + 'properties': { + 'source': { + 'description': 'Source label info', + 'oneOf': [ + {'$ref': '#/definitions/label_source'}, + {'type': 'null'}, + ], + }, + 'target': { + 'description': 'Target label name', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + }, + 'output_rois_enum': { + 'enum': ['all_in_frame', 'only_filtered', 'frame_per_roi'], + 'type': 'string', + }, + 'script': { + 'properties': { + 'binary': { + 'default': 'python', + 'description': 'Binary to use when running the script', + 'type': ['string', 'null'], + }, + 'branch': { + 'description': 'Repository branch id If not provided and tag not provided, default repository branch is used.', + 'type': ['string', 'null'], + }, + 'entry_point': { + 'description': 'Path to execute within the repository', + 'type': ['string', 'null'], + }, + 'repository': { + 'description': 'Name of the repository where the script is located', + 'type': ['string', 'null'], + }, + 'requirements': { + 'description': 'A JSON object containing requirements strings by key', + 'type': ['object', 'null'], + }, + 'tag': { + 'description': 'Repository tag', + 'type': ['string', 'null'], + }, + 'version_num': { + 'description': 'Version (changeset) number. Optional (default is head version) Unused if tag is provided.', + 'type': ['string', 'null'], + }, + 'working_dir': { + 'description': 'Path to the folder from which to run the script Default - root folder of repository[f]', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + }, + 'task_type_enum': { + 'enum': [ + 'dataset_import', + 'annotation', + 'annotation_manual', + 'training', + 'testing', + ], + 'type': 'string', + }, + 'view': { + 'properties': { + 'entries': { + 'description': 'List of view entries. All tasks must have at least one view.', + 'items': {'$ref': '#/definitions/view_entry'}, + 'type': ['array', 'null'], + }, + }, + 'type': 'object', + }, + 'view_entry': { + 'properties': { + 'dataset': { + 'description': 'Existing Dataset id', + 'type': ['string', 'null'], + }, + 'merge_with': { + 'description': 'Version ID to merge with', + 'type': ['string', 'null'], + }, + 'version': { + 'description': 'Version id of a version belonging to the dataset', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + }, + }, + 'properties': { + 'comment': {'description': 'Free text comment ', 'type': 'string'}, + 'execution': { + '$ref': '#/definitions/execution', + 'description': 'Task execution params', + }, + 'force': { + 'default': False, + 'description': "If not true, call fails if the task status is not 'created'", + 'type': 'boolean', + }, + 'input': { + '$ref': '#/definitions/input', + 'description': 'Task input params. (input view must be provided).', + }, + 'name': { + 'description': 'Task name Unique within the company.', + 'type': 'string', + }, + 'output_dest': { + 'description': 'Output storage id Must be a reference to an existing storage.', + 'type': 'string', + }, + 'parent': { + 'description': 'Parent task id Must be a completed task.', + 'type': 'string', + }, + 'project': { + 'description': 'Project ID of the project to which this task is assigned Must exist[ab]', + 'type': 'string', + }, + 'script': { + '$ref': '#/definitions/script', + 'description': 'Script info', + }, + 'tags': {'description': 'Tags list', 'items': {'type': 'string'}, 'type': 'array'}, + 'task': {'description': 'ID of the task', 'type': 'string'}, + 'type': { + '$ref': '#/definitions/task_type_enum', + 'description': 'Type of task', + }, + }, + 'required': ['task'], + 'type': 'object', + } + def __init__( + self, task, force=False, name=None, tags=None, type=None, comment=None, parent=None, project=None, input=None, output_dest=None, execution=None, script=None, **kwargs): + super(EditRequest, self).__init__(**kwargs) + self.task = task + self.force = force + self.name = name + self.tags = tags + self.type = type + self.comment = comment + self.parent = parent + self.project = project + self.input = input + self.output_dest = output_dest + self.execution = execution + self.script = script + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + @schema_property('force') + def force(self): + return self._property_force + + @force.setter + def force(self, value): + if value is None: + self._property_force = None + return + + self.assert_isinstance(value, "force", (bool,)) + self._property_force = value + + @schema_property('name') + def name(self): + return self._property_name + + @name.setter + def name(self, value): + if value is None: + self._property_name = None + return + + self.assert_isinstance(value, "name", six.string_types) + self._property_name = value + + @schema_property('tags') + def tags(self): + return self._property_tags + + @tags.setter + def tags(self, value): + if value is None: + self._property_tags = None + return + + self.assert_isinstance(value, "tags", (list, tuple)) + + self.assert_isinstance(value, "tags", six.string_types, is_array=True) + self._property_tags = value + + @schema_property('type') + def type(self): + return self._property_type + + @type.setter + def type(self, value): + if value is None: + self._property_type = None + return + if isinstance(value, six.string_types): + try: + value = TaskTypeEnum(value) + except ValueError: + pass + else: + self.assert_isinstance(value, "type", enum.Enum) + self._property_type = value + + @schema_property('comment') + def comment(self): + return self._property_comment + + @comment.setter + def comment(self, value): + if value is None: + self._property_comment = None + return + + self.assert_isinstance(value, "comment", six.string_types) + self._property_comment = value + + @schema_property('parent') + def parent(self): + return self._property_parent + + @parent.setter + def parent(self, value): + if value is None: + self._property_parent = None + return + + self.assert_isinstance(value, "parent", six.string_types) + self._property_parent = value + + @schema_property('project') + def project(self): + return self._property_project + + @project.setter + def project(self, value): + if value is None: + self._property_project = None + return + + self.assert_isinstance(value, "project", six.string_types) + self._property_project = value + + @schema_property('input') + def input(self): + return self._property_input + + @input.setter + def input(self, value): + if value is None: + self._property_input = None + return + if isinstance(value, dict): + value = Input.from_dict(value) + else: + self.assert_isinstance(value, "input", Input) + self._property_input = value + + @schema_property('output_dest') + def output_dest(self): + return self._property_output_dest + + @output_dest.setter + def output_dest(self, value): + if value is None: + self._property_output_dest = None + return + + self.assert_isinstance(value, "output_dest", six.string_types) + self._property_output_dest = value + + @schema_property('execution') + def execution(self): + return self._property_execution + + @execution.setter + def execution(self, value): + if value is None: + self._property_execution = None + return + if isinstance(value, dict): + value = Execution.from_dict(value) + else: + self.assert_isinstance(value, "execution", Execution) + self._property_execution = value + + @schema_property('script') + def script(self): + return self._property_script + + @script.setter + def script(self, value): + if value is None: + self._property_script = None + return + if isinstance(value, dict): + value = Script.from_dict(value) + else: + self.assert_isinstance(value, "script", Script) + self._property_script = value + + +class EditResponse(Response): + """ + Response of tasks.edit endpoint. + + :param updated: Number of tasks updated (0 or 1) + :type updated: int + :param fields: Updated fields names and values + :type fields: dict + """ + _service = "tasks" + _action = "edit" + _version = "1.9" + + _schema = { + 'definitions': {}, + 'properties': { + 'fields': { + 'additionalProperties': True, + 'description': 'Updated fields names and values', + 'type': ['object', 'null'], + }, + 'updated': { + 'description': 'Number of tasks updated (0 or 1)', + 'enum': [0, 1], + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, updated=None, fields=None, **kwargs): + super(EditResponse, self).__init__(**kwargs) + self.updated = updated + self.fields = fields + + @schema_property('updated') + def updated(self): + return self._property_updated + + @updated.setter + def updated(self, value): + if value is None: + self._property_updated = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "updated", six.integer_types) + self._property_updated = value + + @schema_property('fields') + def fields(self): + return self._property_fields + + @fields.setter + def fields(self, value): + if value is None: + self._property_fields = None + return + + self.assert_isinstance(value, "fields", (dict,)) + self._property_fields = value + + +class EnqueueRequest(Request): + """ + Adds a task into a queue. + + Fails if task state is not 'created'. + + Fails if the following parameters in the task were not filled: + + * execution.script.repository + + * execution.script.entrypoint + + + :param queue: Queue id. If not provided, task is added to the default queue. + :type queue: str + :param task: Task ID + :type task: str + :param status_reason: Reason for status change + :type status_reason: str + :param status_message: Extra information regarding status change + :type status_message: str + """ + + _service = "tasks" + _action = "enqueue" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': { + 'queue': { + 'description': 'Queue id. If not provided, task is added to the default queue.', + 'type': ['string', 'null'], + }, + 'status_message': { + 'description': 'Extra information regarding status change', + 'type': 'string', + }, + 'status_reason': { + 'description': 'Reason for status change', + 'type': 'string', + }, + 'task': {'description': 'Task ID', 'type': 'string'}, + }, + 'required': ['task'], + 'type': 'object', + } + def __init__( + self, task, queue=None, status_reason=None, status_message=None, **kwargs): + super(EnqueueRequest, self).__init__(**kwargs) + self.queue = queue + self.task = task + self.status_reason = status_reason + self.status_message = status_message + + @schema_property('queue') + def queue(self): + return self._property_queue + + @queue.setter + def queue(self, value): + if value is None: + self._property_queue = None + return + + self.assert_isinstance(value, "queue", six.string_types) + self._property_queue = value + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + @schema_property('status_reason') + def status_reason(self): + return self._property_status_reason + + @status_reason.setter + def status_reason(self, value): + if value is None: + self._property_status_reason = None + return + + self.assert_isinstance(value, "status_reason", six.string_types) + self._property_status_reason = value + + @schema_property('status_message') + def status_message(self): + return self._property_status_message + + @status_message.setter + def status_message(self, value): + if value is None: + self._property_status_message = None + return + + self.assert_isinstance(value, "status_message", six.string_types) + self._property_status_message = value + + +class EnqueueResponse(Response): + """ + Response of tasks.enqueue endpoint. + + :param queued: Number of tasks queued (0 or 1) + :type queued: int + :param updated: Number of tasks updated (0 or 1) + :type updated: int + :param fields: Updated fields names and values + :type fields: dict + """ + _service = "tasks" + _action = "enqueue" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'fields': { + 'additionalProperties': True, + 'description': 'Updated fields names and values', + 'type': ['object', 'null'], + }, + 'queued': { + 'description': 'Number of tasks queued (0 or 1)', + 'enum': [0, 1], + 'type': ['integer', 'null'], + }, + 'updated': { + 'description': 'Number of tasks updated (0 or 1)', + 'enum': [0, 1], + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, queued=None, updated=None, fields=None, **kwargs): + super(EnqueueResponse, self).__init__(**kwargs) + self.queued = queued + self.updated = updated + self.fields = fields + + @schema_property('queued') + def queued(self): + return self._property_queued + + @queued.setter + def queued(self, value): + if value is None: + self._property_queued = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "queued", six.integer_types) + self._property_queued = value + + @schema_property('updated') + def updated(self): + return self._property_updated + + @updated.setter + def updated(self, value): + if value is None: + self._property_updated = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "updated", six.integer_types) + self._property_updated = value + + @schema_property('fields') + def fields(self): + return self._property_fields + + @fields.setter + def fields(self, value): + if value is None: + self._property_fields = None + return + + self.assert_isinstance(value, "fields", (dict,)) + self._property_fields = value + + +class FailedRequest(Request): + """ + Indicates that task has failed + + :param force: Allows forcing state change even if transition is not supported + :type force: bool + :param task: Task ID + :type task: str + :param status_reason: Reason for status change + :type status_reason: str + :param status_message: Extra information regarding status change + :type status_message: str + """ + + _service = "tasks" + _action = "failed" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': { + 'force': { + 'default': False, + 'description': 'Allows forcing state change even if transition is not supported', + 'type': ['boolean', 'null'], + }, + 'status_message': { + 'description': 'Extra information regarding status change', + 'type': 'string', + }, + 'status_reason': { + 'description': 'Reason for status change', + 'type': 'string', + }, + 'task': {'description': 'Task ID', 'type': 'string'}, + }, + 'required': ['task'], + 'type': 'object', + } + def __init__( + self, task, force=False, status_reason=None, status_message=None, **kwargs): + super(FailedRequest, self).__init__(**kwargs) + self.force = force + self.task = task + self.status_reason = status_reason + self.status_message = status_message + + @schema_property('force') + def force(self): + return self._property_force + + @force.setter + def force(self, value): + if value is None: + self._property_force = None + return + + self.assert_isinstance(value, "force", (bool,)) + self._property_force = value + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + @schema_property('status_reason') + def status_reason(self): + return self._property_status_reason + + @status_reason.setter + def status_reason(self, value): + if value is None: + self._property_status_reason = None + return + + self.assert_isinstance(value, "status_reason", six.string_types) + self._property_status_reason = value + + @schema_property('status_message') + def status_message(self): + return self._property_status_message + + @status_message.setter + def status_message(self, value): + if value is None: + self._property_status_message = None + return + + self.assert_isinstance(value, "status_message", six.string_types) + self._property_status_message = value + + +class FailedResponse(Response): + """ + Response of tasks.failed endpoint. + + :param updated: Number of tasks updated (0 or 1) + :type updated: int + :param fields: Updated fields names and values + :type fields: dict + """ + _service = "tasks" + _action = "failed" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'fields': { + 'additionalProperties': True, + 'description': 'Updated fields names and values', + 'type': ['object', 'null'], + }, + 'updated': { + 'description': 'Number of tasks updated (0 or 1)', + 'enum': [0, 1], + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, updated=None, fields=None, **kwargs): + super(FailedResponse, self).__init__(**kwargs) + self.updated = updated + self.fields = fields + + @schema_property('updated') + def updated(self): + return self._property_updated + + @updated.setter + def updated(self, value): + if value is None: + self._property_updated = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "updated", six.integer_types) + self._property_updated = value + + @schema_property('fields') + def fields(self): + return self._property_fields + + @fields.setter + def fields(self, value): + if value is None: + self._property_fields = None + return + + self.assert_isinstance(value, "fields", (dict,)) + self._property_fields = value + + +class GetAllRequest(Request): + """ + Get all the company's tasks and all public tasks + + :param id: List of IDs to filter by + :type id: Sequence[str] + :param name: Get only tasks whose name matches this pattern (python regular + expression syntax) + :type name: str + :param user: List of user IDs used to filter results by the task's creating + user + :type user: Sequence[str] + :param project: List of project IDs + :type project: Sequence[str] + :param page: Page number, returns a specific page out of the resulting list of + tasks + :type page: int + :param page_size: Page size, specifies the number of results returned in each + page (last page may contain fewer results) + :type page_size: int + :param order_by: List of field names to order by. When search_text is used, + '@text_score' can be used as a field representing the text score of returned + documents. Use '-' prefix to specify descending order. Optional, recommended + when using page + :type order_by: Sequence[str] + :param type: List of task types. One or more of: 'import', 'annotation', + 'training' or 'testing' (case insensitive) + :type type: Sequence[str] + :param tags: List of task tags. Use '-' prefix to exclude tags + :type tags: Sequence[str] + :param status: List of task status. + :type status: Sequence[TaskStatusEnum] + :param only_fields: List of task field names (nesting is supported using '.', + e.g. execution.model_labels). If provided, this list defines the query's + projection (only these fields will be returned for each result entry) + :type only_fields: Sequence[str] + :param parent: Parent ID + :type parent: str + :param status_changed: List of status changed constraint strings (utcformat, + epoch) with an optional prefix modifier (>, >=, <, <=) + :type status_changed: Sequence[str] + :param search_text: Free text search query + :type search_text: str + :param _all_: Multi-field pattern condition (all fields match pattern) + :type _all_: MultiFieldPatternData + :param _any_: Multi-field pattern condition (any field matches pattern) + :type _any_: MultiFieldPatternData + :param input.view.entries.dataset: List of input dataset IDs + :type input.view.entries.dataset: Sequence[str] + :param input.view.entries.version: List of input dataset version IDs + :type input.view.entries.version: Sequence[str] + """ + + _service = "tasks" + _action = "get_all" + _version = "1.9" + _schema = { + 'definitions': { + 'multi_field_pattern_data': { + 'properties': { + 'fields': { + 'description': 'List of field names', + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'pattern': { + 'description': 'Pattern string (regex)', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + }, + 'task_status_enum': { + 'enum': [ + 'created', + 'queued', + 'in_progress', + 'stopped', + 'published', + 'publishing', + 'closed', + 'failed', + 'unknown', + ], + 'type': 'string', + }, + }, + 'dependencies': {'page': ['page_size']}, + 'properties': { + '_all_': { + 'description': 'Multi-field pattern condition (all fields match pattern)', + 'oneOf': [ + {'$ref': '#/definitions/multi_field_pattern_data'}, + {'type': 'null'}, + ], + }, + '_any_': { + 'description': 'Multi-field pattern condition (any field matches pattern)', + 'oneOf': [ + {'$ref': '#/definitions/multi_field_pattern_data'}, + {'type': 'null'}, + ], + }, + 'id': { + 'description': 'List of IDs to filter by', + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'input.view.entries.dataset': { + 'description': 'List of input dataset IDs', + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'input.view.entries.version': { + 'description': 'List of input dataset version IDs', + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'name': { + 'description': 'Get only tasks whose name matches this pattern (python regular expression syntax)', + 'type': ['string', 'null'], + }, + 'only_fields': { + 'description': "List of task field names (nesting is supported using '.', e.g. execution.model_labels). If provided, this list defines the query's projection (only these fields will be returned for each result entry)", + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'order_by': { + 'description': "List of field names to order by. When search_text is used, '@text_score' can be used as a field representing the text score of returned documents. Use '-' prefix to specify descending order. Optional, recommended when using page", + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'page': { + 'description': 'Page number, returns a specific page out of the resulting list of tasks', + 'minimum': 0, + 'type': ['integer', 'null'], + }, + 'page_size': { + 'description': 'Page size, specifies the number of results returned in each page (last page may contain fewer results)', + 'minimum': 1, + 'type': ['integer', 'null'], + }, + 'parent': {'description': 'Parent ID', 'type': ['string', 'null']}, + 'project': { + 'description': 'List of project IDs', + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'search_text': { + 'description': 'Free text search query', + 'type': ['string', 'null'], + }, + 'status': { + 'description': 'List of task status.', + 'items': {'$ref': '#/definitions/task_status_enum'}, + 'type': ['array', 'null'], + }, + 'status_changed': { + 'description': 'List of status changed constraint strings (utcformat, epoch) with an optional prefix modifier (>, >=, <, <=)', + 'items': {'pattern': '^(>=|>|<=|<)?.*$', 'type': 'string'}, + 'type': ['array', 'null'], + }, + 'tags': { + 'description': "List of task tags. Use '-' prefix to exclude tags", + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'type': { + 'description': "List of task types. One or more of: 'import', 'annotation', 'training' or 'testing' (case insensitive)", + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'user': { + 'description': "List of user IDs used to filter results by the task's creating user", + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, id=None, name=None, user=None, project=None, page=None, page_size=None, order_by=None, type=None, tags=None, status=None, only_fields=None, parent=None, status_changed=None, search_text=None, _all_=None, _any_=None, input__view__entries__dataset=None, input__view__entries__version=None, **kwargs): + super(GetAllRequest, self).__init__(**kwargs) + self.id = id + self.name = name + self.user = user + self.project = project + self.page = page + self.page_size = page_size + self.order_by = order_by + self.type = type + self.tags = tags + self.status = status + self.only_fields = only_fields + self.parent = parent + self.status_changed = status_changed + self.search_text = search_text + self._all_ = _all_ + self._any_ = _any_ + self.input__view__entries__dataset = input__view__entries__dataset + self.input__view__entries__version = input__view__entries__version + + @schema_property('id') + def id(self): + return self._property_id + + @id.setter + def id(self, value): + if value is None: + self._property_id = None + return + + self.assert_isinstance(value, "id", (list, tuple)) + + self.assert_isinstance(value, "id", six.string_types, is_array=True) + self._property_id = value + + @schema_property('name') + def name(self): + return self._property_name + + @name.setter + def name(self, value): + if value is None: + self._property_name = None + return + + self.assert_isinstance(value, "name", six.string_types) + self._property_name = value + + @schema_property('user') + def user(self): + return self._property_user + + @user.setter + def user(self, value): + if value is None: + self._property_user = None + return + + self.assert_isinstance(value, "user", (list, tuple)) + + self.assert_isinstance(value, "user", six.string_types, is_array=True) + self._property_user = value + + @schema_property('project') + def project(self): + return self._property_project + + @project.setter + def project(self, value): + if value is None: + self._property_project = None + return + + self.assert_isinstance(value, "project", (list, tuple)) + + self.assert_isinstance(value, "project", six.string_types, is_array=True) + self._property_project = value + + @schema_property('page') + def page(self): + return self._property_page + + @page.setter + def page(self, value): + if value is None: + self._property_page = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "page", six.integer_types) + self._property_page = value + + @schema_property('page_size') + def page_size(self): + return self._property_page_size + + @page_size.setter + def page_size(self, value): + if value is None: + self._property_page_size = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "page_size", six.integer_types) + self._property_page_size = value + + @schema_property('order_by') + def order_by(self): + return self._property_order_by + + @order_by.setter + def order_by(self, value): + if value is None: + self._property_order_by = None + return + + self.assert_isinstance(value, "order_by", (list, tuple)) + + self.assert_isinstance(value, "order_by", six.string_types, is_array=True) + self._property_order_by = value + + @schema_property('type') + def type(self): + return self._property_type + + @type.setter + def type(self, value): + if value is None: + self._property_type = None + return + + self.assert_isinstance(value, "type", (list, tuple)) + + self.assert_isinstance(value, "type", six.string_types, is_array=True) + self._property_type = value + + @schema_property('tags') + def tags(self): + return self._property_tags + + @tags.setter + def tags(self, value): + if value is None: + self._property_tags = None + return + + self.assert_isinstance(value, "tags", (list, tuple)) + + self.assert_isinstance(value, "tags", six.string_types, is_array=True) + self._property_tags = value + + @schema_property('status') + def status(self): + return self._property_status + + @status.setter + def status(self, value): + if value is None: + self._property_status = None + return + + self.assert_isinstance(value, "status", (list, tuple)) + if any(isinstance(v, six.string_types) for v in value): + value = [TaskStatusEnum(v) if isinstance(v, six.string_types) else v for v in value] + else: + self.assert_isinstance(value, "status", TaskStatusEnum, is_array=True) + self._property_status = value + + @schema_property('only_fields') + def only_fields(self): + return self._property_only_fields + + @only_fields.setter + def only_fields(self, value): + if value is None: + self._property_only_fields = None + return + + self.assert_isinstance(value, "only_fields", (list, tuple)) + + self.assert_isinstance(value, "only_fields", six.string_types, is_array=True) + self._property_only_fields = value + + @schema_property('parent') + def parent(self): + return self._property_parent + + @parent.setter + def parent(self, value): + if value is None: + self._property_parent = None + return + + self.assert_isinstance(value, "parent", six.string_types) + self._property_parent = value + + @schema_property('status_changed') + def status_changed(self): + return self._property_status_changed + + @status_changed.setter + def status_changed(self, value): + if value is None: + self._property_status_changed = None + return + + self.assert_isinstance(value, "status_changed", (list, tuple)) + + self.assert_isinstance(value, "status_changed", six.string_types, is_array=True) + self._property_status_changed = value + + @schema_property('search_text') + def search_text(self): + return self._property_search_text + + @search_text.setter + def search_text(self, value): + if value is None: + self._property_search_text = None + return + + self.assert_isinstance(value, "search_text", six.string_types) + self._property_search_text = value + + @schema_property('_all_') + def _all_(self): + return self._property__all_ + + @_all_.setter + def _all_(self, value): + if value is None: + self._property__all_ = None + return + if isinstance(value, dict): + value = MultiFieldPatternData.from_dict(value) + else: + self.assert_isinstance(value, "_all_", MultiFieldPatternData) + self._property__all_ = value + + @schema_property('_any_') + def _any_(self): + return self._property__any_ + + @_any_.setter + def _any_(self, value): + if value is None: + self._property__any_ = None + return + if isinstance(value, dict): + value = MultiFieldPatternData.from_dict(value) + else: + self.assert_isinstance(value, "_any_", MultiFieldPatternData) + self._property__any_ = value + + @schema_property('input.view.entries.dataset') + def input__view__entries__dataset(self): + return self._property_input__view__entries__dataset + + @input__view__entries__dataset.setter + def input__view__entries__dataset(self, value): + if value is None: + self._property_input__view__entries__dataset = None + return + + self.assert_isinstance(value, "input__view__entries__dataset", (list, tuple)) + + self.assert_isinstance(value, "input__view__entries__dataset", six.string_types, is_array=True) + self._property_input__view__entries__dataset = value + + @schema_property('input.view.entries.version') + def input__view__entries__version(self): + return self._property_input__view__entries__version + + @input__view__entries__version.setter + def input__view__entries__version(self, value): + if value is None: + self._property_input__view__entries__version = None + return + + self.assert_isinstance(value, "input__view__entries__version", (list, tuple)) + + self.assert_isinstance(value, "input__view__entries__version", six.string_types, is_array=True) + self._property_input__view__entries__version = value + + +class GetAllResponse(Response): + """ + Response of tasks.get_all endpoint. + + :param tasks: List of tasks + :type tasks: Sequence[Task] + """ + _service = "tasks" + _action = "get_all" + _version = "1.9" + + _schema = { + 'definitions': { + 'augmentation': { + 'properties': { + 'crop_around_rois': { + 'description': 'Crop image data around all frame ROIs', + 'type': ['boolean', 'null'], + }, + 'sets': { + 'description': 'List of augmentation sets', + 'items': {'$ref': '#/definitions/augmentation_set'}, + 'type': ['array', 'null'], + }, + }, + 'type': 'object', + }, + 'augmentation_set': { + 'properties': { + 'arguments': { + 'additionalProperties': { + 'additionalProperties': True, + 'type': 'object', + }, + 'description': 'Arguments dictionary per custom augmentation type.', + 'type': ['object', 'null'], + }, + 'cls': { + 'description': 'Augmentation class', + 'type': ['string', 'null'], + }, + 'strength': { + 'description': 'Augmentation strength. Range [0,).', + 'minimum': 0, + 'type': ['number', 'null'], + }, + 'types': { + 'description': 'Augmentation type', + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + }, + 'type': 'object', + }, + 'execution': { + 'properties': { + 'dataviews': { + 'description': 'Additional dataviews for the task', + 'items': {'additionalProperties': True, 'type': 'object'}, + 'type': ['array', 'null'], + }, + 'framework': { + 'description': 'Framework related to the task. Case insensitive. Mandatory for Training tasks. ', + 'type': ['string', 'null'], + }, + 'model': { + 'description': 'Execution input model ID Not applicable for Register (Import) tasks', + 'type': ['string', 'null'], + }, + 'model_desc': { + 'additionalProperties': True, + 'description': 'Json object representing the Model descriptors', + 'type': ['object', 'null'], + }, + 'model_labels': { + 'additionalProperties': {'type': 'integer'}, + 'description': "Json object representing the ids of the labels in the model.\n The keys are the layers' names and the values are the IDs.\n Not applicable for Register (Import) tasks.\n Mandatory for Training tasks[z]", + 'type': ['object', 'null'], + }, + 'parameters': { + 'additionalProperties': True, + 'description': 'Json object containing the Task parameters', + 'type': ['object', 'null'], + }, + 'queue': { + 'description': 'Queue ID where task was queued.', + 'type': ['string', 'null'], + }, + 'test_split': { + 'description': 'Percentage of frames to use for testing only', + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + }, + 'filter_by_roi_enum': { + 'default': 'label_rules', + 'enum': ['disabled', 'no_rois', 'label_rules'], + 'type': 'string', + }, + 'filter_label_rule': { + 'properties': { + 'conf_range': { + 'description': 'Range of ROI confidence level in the frame (min, max). -1 for not applicable\n Both min and max can be either -1 or positive.\n 2nd number (max) must be either -1 or larger than or equal to the 1st number (min)', + 'items': {'type': 'number'}, + 'maxItems': 2, + 'minItems': 1, + 'type': 'array', + }, + 'count_range': { + 'description': 'Range of times ROI appears in the frame (min, max). -1 for not applicable.\n Both integers must be larger than or equal to -1.\n 2nd integer (max) must be either -1 or larger than or equal to the 1st integer (min)', + 'items': {'type': 'integer'}, + 'maxItems': 2, + 'minItems': 1, + 'type': 'array', + }, + 'label': { + 'description': "Lucene format query (see lucene query syntax).\nDefault search field is label.keyword and default operator is AND, so searching for:\n\n'Bus Stop' Blue\n\nis equivalent to:\n\nLabel.keyword:'Bus Stop' AND label.keyword:'Blue'", + 'type': 'string', + }, + }, + 'required': ['label'], + 'type': 'object', + }, + 'filter_rule': { + 'properties': { + 'dataset': { + 'description': "Dataset ID. Must be a dataset which is in the task's view. If set to '*' all datasets in View are used.", + 'type': 'string', + }, + 'filter_by_roi': { + '$ref': '#/definitions/filter_by_roi_enum', + 'description': 'Type of filter', + }, + 'frame_query': { + 'description': 'Frame filter, in Lucene query syntax', + 'type': 'string', + }, + 'label_rules': { + 'description': "List of FilterLabelRule ('AND' connection)\n\ndisabled - No filtering by ROIs. Select all frames, even if they don't have ROIs (all frames)\n\nno_rois - Select only frames without ROIs (empty frames)\n\nlabel_rules - Select frames according to label rules", + 'items': {'$ref': '#/definitions/filter_label_rule'}, + 'type': ['array', 'null'], + }, + 'sources_query': { + 'description': 'Sources filter, in Lucene query syntax. Filters sources in each frame.', + 'type': 'string', + }, + 'version': { + 'description': "Dataset version to apply rule to. Must belong to the dataset and be in the task's view. If set to '*' all version of the datasets in View are used.", + 'type': 'string', + }, + 'weight': { + 'description': 'Rule weight. Default is 1', + 'type': 'number', + }, + }, + 'required': ['filter_by_roi'], + 'type': 'object', + }, + 'filtering': { + 'properties': { + 'filtering_rules': { + 'description': "List of FilterRule ('OR' connection)", + 'items': {'$ref': '#/definitions/filter_rule'}, + 'type': ['array', 'null'], + }, + 'output_rois': { + 'description': "'all_in_frame' - all rois for a frame are returned\n\n'only_filtered' - only rois which led this frame to be selected\n\n'frame_per_roi' - single roi per frame. Frame can be returned multiple times with a different roi each time.\n\nNote: this should be used for Training tasks only\n\nNote: frame_per_roi implies that only filtered rois will be returned\n ", + 'oneOf': [ + {'$ref': '#/definitions/output_rois_enum'}, + {'type': 'null'}, + ], + }, + }, + 'type': 'object', + }, + 'input': { + 'properties': { + 'augmentation': { + 'description': 'Augmentation parameters. Only for training and testing tasks.', + 'oneOf': [ + {'$ref': '#/definitions/augmentation'}, + {'type': 'null'}, + ], + }, + 'dataviews': { + 'additionalProperties': {'type': 'string'}, + 'description': 'Key to DataView ID Mapping', + 'type': ['object', 'null'], + }, + 'frames_filter': { + 'description': 'Filtering params', + 'oneOf': [ + {'$ref': '#/definitions/filtering'}, + {'type': 'null'}, + ], + }, + 'iteration': { + 'description': 'Iteration parameters. Not applicable for register (import) tasks.', + 'oneOf': [ + {'$ref': '#/definitions/iteration'}, + {'type': 'null'}, + ], + }, + 'mapping': { + 'description': 'Mapping params (see common definitions section)', + 'oneOf': [ + {'$ref': '#/definitions/mapping'}, + {'type': 'null'}, + ], + }, + 'view': { + 'description': 'View params', + 'oneOf': [{'$ref': '#/definitions/view'}, {'type': 'null'}], + }, + }, + 'type': 'object', + }, + 'iteration': { + 'description': 'Sequential Iteration API configuration', + 'properties': { + 'infinite': { + 'description': 'Infinite iteration', + 'type': ['boolean', 'null'], + }, + 'jump': { + 'description': 'Jump entry', + 'oneOf': [{'$ref': '#/definitions/jump'}, {'type': 'null'}], + }, + 'limit': { + 'description': 'Maximum frames per task. If not passed, frames will end when no more matching frames are found, unless infinite is True.', + 'type': ['integer', 'null'], + }, + 'min_sequence': { + 'description': 'Length (in ms) of video clips to return. This is used in random order, and in sequential order only if jumping is provided and only for video frames', + 'type': ['integer', 'null'], + }, + 'order': { + 'description': "\n Input frames order. Values: 'sequential', 'random'\n In Sequential mode frames will be returned according to the order in which the frames were added to the dataset.", + 'type': ['string', 'null'], + }, + 'random_seed': { + 'description': 'Random seed used during iteration', + 'type': 'integer', + }, + }, + 'required': ['random_seed'], + 'type': 'object', + }, + 'jump': { + 'properties': { + 'time': { + 'description': 'Max time in milliseconds between frames', + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + }, + 'label_source': { + 'properties': { + 'dataset': { + 'description': "Source dataset id. '*' for all datasets in view", + 'type': ['string', 'null'], + }, + 'labels': { + 'description': "List of source labels (AND connection). '*' indicates any label. Labels must exist in at least one of the dataset versions in the task's view", + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'version': { + 'description': "Source dataset version id. Default is '*' (for all versions in dataset in the view) Version must belong to the selected dataset, and must be in the task's view[i]", + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + }, + 'last_metrics_event': { + 'properties': { + 'iter': { + 'description': 'Iteration number', + 'type': ['integer', 'null'], + }, + 'metric': { + 'description': 'Metric name', + 'type': ['string', 'null'], + }, + 'timestamp': { + 'description': 'Event report time (UTC)', + 'format': 'date-time', + 'type': ['string', 'null'], + }, + 'type': { + 'description': 'Event type', + 'type': ['string', 'null'], + }, + 'value': {'description': 'Value', 'type': ['number', 'null']}, + 'variant': { + 'description': 'Variant name', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + }, + 'last_metrics_variants': { + 'additionalProperties': { + '$ref': '#/definitions/last_metrics_event', + }, + 'description': 'Last metric events, one for each variant hash', + 'type': 'object', + }, + 'mapping': { + 'properties': { + 'rules': { + 'description': 'Rules list', + 'items': {'$ref': '#/definitions/mapping_rule'}, + 'type': ['array', 'null'], + }, + }, + 'type': 'object', + }, + 'mapping_rule': { + 'properties': { + 'source': { + 'description': 'Source label info', + 'oneOf': [ + {'$ref': '#/definitions/label_source'}, + {'type': 'null'}, + ], + }, + 'target': { + 'description': 'Target label name', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + }, + 'output': { + 'properties': { + 'destination': { + 'description': 'Storage id. This is where output files will be stored.', + 'type': ['string', 'null'], + }, + 'error': { + 'description': 'Last error text', + 'type': ['string', 'null'], + }, + 'model': { + 'description': 'Model id.', + 'type': ['string', 'null'], + }, + 'result': { + 'description': "Task result. Values: 'success', 'failure'", + 'type': ['string', 'null'], + }, + 'view': { + 'description': 'View params', + 'oneOf': [{'$ref': '#/definitions/view'}, {'type': 'null'}], + }, + }, + 'type': 'object', + }, + 'output_rois_enum': { + 'enum': ['all_in_frame', 'only_filtered', 'frame_per_roi'], + 'type': 'string', + }, + 'script': { + 'properties': { + 'binary': { + 'default': 'python', + 'description': 'Binary to use when running the script', + 'type': ['string', 'null'], + }, + 'branch': { + 'description': 'Repository branch id If not provided and tag not provided, default repository branch is used.', + 'type': ['string', 'null'], + }, + 'entry_point': { + 'description': 'Path to execute within the repository', + 'type': ['string', 'null'], + }, + 'repository': { + 'description': 'Name of the repository where the script is located', + 'type': ['string', 'null'], + }, + 'requirements': { + 'description': 'A JSON object containing requirements strings by key', + 'type': ['object', 'null'], + }, + 'tag': { + 'description': 'Repository tag', + 'type': ['string', 'null'], + }, + 'version_num': { + 'description': 'Version (changeset) number. Optional (default is head version) Unused if tag is provided.', + 'type': ['string', 'null'], + }, + 'working_dir': { + 'description': 'Path to the folder from which to run the script Default - root folder of repository[f]', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + }, + 'task': { + 'properties': { + 'comment': { + 'description': 'Free text comment', + 'type': ['string', 'null'], + }, + 'company': { + 'description': 'Company ID', + 'type': ['string', 'null'], + }, + 'completed': { + 'description': 'Task end time (UTC)', + 'format': 'date-time', + 'type': ['string', 'null'], + }, + 'created': { + 'description': 'Task creation time (UTC) ', + 'format': 'date-time', + 'type': ['string', 'null'], + }, + 'execution': { + 'description': 'Task execution params', + 'oneOf': [ + {'$ref': '#/definitions/execution'}, + {'type': 'null'}, + ], + }, + 'id': {'description': 'Task id', 'type': ['string', 'null']}, + 'input': { + 'description': 'Task input params', + 'oneOf': [ + {'$ref': '#/definitions/input'}, + {'type': 'null'}, + ], + }, + 'last_iteration': { + 'description': 'Last iteration reported for this task', + 'type': ['integer', 'null'], + }, + 'last_metrics': { + 'additionalProperties': { + '$ref': '#/definitions/last_metrics_variants', + }, + 'description': 'Last metric variants (hash to events), one for each metric hash', + 'type': ['object', 'null'], + }, + 'last_update': { + 'description': 'Last time this task was created, updated, changed or events for this task were reported', + 'format': 'date-time', + 'type': ['string', 'null'], + }, + 'last_worker': { + 'description': 'ID of last worker that handled the task', + 'type': ['string', 'null'], + }, + 'last_worker_report': { + 'description': 'Last time a worker reported while working on this task', + 'format': 'date-time', + 'type': ['string', 'null'], + }, + 'name': { + 'description': 'Task Name', + 'type': ['string', 'null'], + }, + 'output': { + 'description': 'Task output params', + 'oneOf': [ + {'$ref': '#/definitions/output'}, + {'type': 'null'}, + ], + }, + 'parent': { + 'description': 'Parent task id', + 'type': ['string', 'null'], + }, + 'project': { + 'description': 'Project ID of the project to which this task is assigned', + 'type': ['string', 'null'], + }, + 'published': { + 'description': 'Last status change time', + 'format': 'date-time', + 'type': ['string', 'null'], + }, + 'script': { + 'description': 'Script info', + 'oneOf': [ + {'$ref': '#/definitions/script'}, + {'type': 'null'}, + ], + }, + 'started': { + 'description': 'Task start time (UTC)', + 'format': 'date-time', + 'type': ['string', 'null'], + }, + 'status': { + 'description': '', + 'oneOf': [ + {'$ref': '#/definitions/task_status_enum'}, + {'type': 'null'}, + ], + }, + 'status_changed': { + 'description': 'Last status change time', + 'format': 'date-time', + 'type': ['string', 'null'], + }, + 'status_message': { + 'description': 'free text string representing info about the status', + 'type': ['string', 'null'], + }, + 'status_reason': { + 'description': 'Reason for last status change', + 'type': ['string', 'null'], + }, + 'tags': { + 'description': 'Tags list', + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'type': { + 'description': "Type of task. Values: 'dataset_import', 'annotation', 'training', 'testing'", + 'oneOf': [ + {'$ref': '#/definitions/task_type_enum'}, + {'type': 'null'}, + ], + }, + 'user': { + 'description': 'Associated user id', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + }, + 'task_status_enum': { + 'enum': [ + 'created', + 'queued', + 'in_progress', + 'stopped', + 'published', + 'publishing', + 'closed', + 'failed', + 'unknown', + ], + 'type': 'string', + }, + 'task_type_enum': { + 'enum': [ + 'dataset_import', + 'annotation', + 'annotation_manual', + 'training', + 'testing', + ], + 'type': 'string', + }, + 'view': { + 'properties': { + 'entries': { + 'description': 'List of view entries. All tasks must have at least one view.', + 'items': {'$ref': '#/definitions/view_entry'}, + 'type': ['array', 'null'], + }, + }, + 'type': 'object', + }, + 'view_entry': { + 'properties': { + 'dataset': { + 'description': 'Existing Dataset id', + 'type': ['string', 'null'], + }, + 'merge_with': { + 'description': 'Version ID to merge with', + 'type': ['string', 'null'], + }, + 'version': { + 'description': 'Version id of a version belonging to the dataset', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + }, + }, + 'properties': { + 'tasks': { + 'description': 'List of tasks', + 'items': {'$ref': '#/definitions/task'}, + 'type': ['array', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, tasks=None, **kwargs): + super(GetAllResponse, self).__init__(**kwargs) + self.tasks = tasks + + @schema_property('tasks') + def tasks(self): + return self._property_tasks + + @tasks.setter + def tasks(self, value): + if value is None: + self._property_tasks = None + return + + self.assert_isinstance(value, "tasks", (list, tuple)) + if any(isinstance(v, dict) for v in value): + value = [Task.from_dict(v) if isinstance(v, dict) else v for v in value] + else: + self.assert_isinstance(value, "tasks", Task, is_array=True) + self._property_tasks = value + + +class GetByIdRequest(Request): + """ + Gets task information + + :param task: Task ID + :type task: str + """ + + _service = "tasks" + _action = "get_by_id" + _version = "1.9" + _schema = { + 'definitions': {}, + 'properties': {'task': {'description': 'Task ID', 'type': 'string'}}, + 'required': ['task'], + 'type': 'object', + } + def __init__( + self, task, **kwargs): + super(GetByIdRequest, self).__init__(**kwargs) + self.task = task + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + +class GetByIdResponse(Response): + """ + Response of tasks.get_by_id endpoint. + + :param task: Task info + :type task: Task + """ + _service = "tasks" + _action = "get_by_id" + _version = "1.9" + + _schema = { + 'definitions': { + 'augmentation': { + 'properties': { + 'crop_around_rois': { + 'description': 'Crop image data around all frame ROIs', + 'type': ['boolean', 'null'], + }, + 'sets': { + 'description': 'List of augmentation sets', + 'items': {'$ref': '#/definitions/augmentation_set'}, + 'type': ['array', 'null'], + }, + }, + 'type': 'object', + }, + 'augmentation_set': { + 'properties': { + 'arguments': { + 'additionalProperties': { + 'additionalProperties': True, + 'type': 'object', + }, + 'description': 'Arguments dictionary per custom augmentation type.', + 'type': ['object', 'null'], + }, + 'cls': { + 'description': 'Augmentation class', + 'type': ['string', 'null'], + }, + 'strength': { + 'description': 'Augmentation strength. Range [0,).', + 'minimum': 0, + 'type': ['number', 'null'], + }, + 'types': { + 'description': 'Augmentation type', + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + }, + 'type': 'object', + }, + 'execution': { + 'properties': { + 'dataviews': { + 'description': 'Additional dataviews for the task', + 'items': {'additionalProperties': True, 'type': 'object'}, + 'type': ['array', 'null'], + }, + 'framework': { + 'description': 'Framework related to the task. Case insensitive. Mandatory for Training tasks. ', + 'type': ['string', 'null'], + }, + 'model': { + 'description': 'Execution input model ID Not applicable for Register (Import) tasks', + 'type': ['string', 'null'], + }, + 'model_desc': { + 'additionalProperties': True, + 'description': 'Json object representing the Model descriptors', + 'type': ['object', 'null'], + }, + 'model_labels': { + 'additionalProperties': {'type': 'integer'}, + 'description': "Json object representing the ids of the labels in the model.\n The keys are the layers' names and the values are the IDs.\n Not applicable for Register (Import) tasks.\n Mandatory for Training tasks[z]", + 'type': ['object', 'null'], + }, + 'parameters': { + 'additionalProperties': True, + 'description': 'Json object containing the Task parameters', + 'type': ['object', 'null'], + }, + 'queue': { + 'description': 'Queue ID where task was queued.', + 'type': ['string', 'null'], + }, + 'test_split': { + 'description': 'Percentage of frames to use for testing only', + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + }, + 'filter_by_roi_enum': { + 'default': 'label_rules', + 'enum': ['disabled', 'no_rois', 'label_rules'], + 'type': 'string', + }, + 'filter_label_rule': { + 'properties': { + 'conf_range': { + 'description': 'Range of ROI confidence level in the frame (min, max). -1 for not applicable\n Both min and max can be either -1 or positive.\n 2nd number (max) must be either -1 or larger than or equal to the 1st number (min)', + 'items': {'type': 'number'}, + 'maxItems': 2, + 'minItems': 1, + 'type': 'array', + }, + 'count_range': { + 'description': 'Range of times ROI appears in the frame (min, max). -1 for not applicable.\n Both integers must be larger than or equal to -1.\n 2nd integer (max) must be either -1 or larger than or equal to the 1st integer (min)', + 'items': {'type': 'integer'}, + 'maxItems': 2, + 'minItems': 1, + 'type': 'array', + }, + 'label': { + 'description': "Lucene format query (see lucene query syntax).\nDefault search field is label.keyword and default operator is AND, so searching for:\n\n'Bus Stop' Blue\n\nis equivalent to:\n\nLabel.keyword:'Bus Stop' AND label.keyword:'Blue'", + 'type': 'string', + }, + }, + 'required': ['label'], + 'type': 'object', + }, + 'filter_rule': { + 'properties': { + 'dataset': { + 'description': "Dataset ID. Must be a dataset which is in the task's view. If set to '*' all datasets in View are used.", + 'type': 'string', + }, + 'filter_by_roi': { + '$ref': '#/definitions/filter_by_roi_enum', + 'description': 'Type of filter', + }, + 'frame_query': { + 'description': 'Frame filter, in Lucene query syntax', + 'type': 'string', + }, + 'label_rules': { + 'description': "List of FilterLabelRule ('AND' connection)\n\ndisabled - No filtering by ROIs. Select all frames, even if they don't have ROIs (all frames)\n\nno_rois - Select only frames without ROIs (empty frames)\n\nlabel_rules - Select frames according to label rules", + 'items': {'$ref': '#/definitions/filter_label_rule'}, + 'type': ['array', 'null'], + }, + 'sources_query': { + 'description': 'Sources filter, in Lucene query syntax. Filters sources in each frame.', + 'type': 'string', + }, + 'version': { + 'description': "Dataset version to apply rule to. Must belong to the dataset and be in the task's view. If set to '*' all version of the datasets in View are used.", + 'type': 'string', + }, + 'weight': { + 'description': 'Rule weight. Default is 1', + 'type': 'number', + }, + }, + 'required': ['filter_by_roi'], + 'type': 'object', + }, + 'filtering': { + 'properties': { + 'filtering_rules': { + 'description': "List of FilterRule ('OR' connection)", + 'items': {'$ref': '#/definitions/filter_rule'}, + 'type': ['array', 'null'], + }, + 'output_rois': { + 'description': "'all_in_frame' - all rois for a frame are returned\n\n'only_filtered' - only rois which led this frame to be selected\n\n'frame_per_roi' - single roi per frame. Frame can be returned multiple times with a different roi each time.\n\nNote: this should be used for Training tasks only\n\nNote: frame_per_roi implies that only filtered rois will be returned\n ", + 'oneOf': [ + {'$ref': '#/definitions/output_rois_enum'}, + {'type': 'null'}, + ], + }, + }, + 'type': 'object', + }, + 'input': { + 'properties': { + 'augmentation': { + 'description': 'Augmentation parameters. Only for training and testing tasks.', + 'oneOf': [ + {'$ref': '#/definitions/augmentation'}, + {'type': 'null'}, + ], + }, + 'dataviews': { + 'additionalProperties': {'type': 'string'}, + 'description': 'Key to DataView ID Mapping', + 'type': ['object', 'null'], + }, + 'frames_filter': { + 'description': 'Filtering params', + 'oneOf': [ + {'$ref': '#/definitions/filtering'}, + {'type': 'null'}, + ], + }, + 'iteration': { + 'description': 'Iteration parameters. Not applicable for register (import) tasks.', + 'oneOf': [ + {'$ref': '#/definitions/iteration'}, + {'type': 'null'}, + ], + }, + 'mapping': { + 'description': 'Mapping params (see common definitions section)', + 'oneOf': [ + {'$ref': '#/definitions/mapping'}, + {'type': 'null'}, + ], + }, + 'view': { + 'description': 'View params', + 'oneOf': [{'$ref': '#/definitions/view'}, {'type': 'null'}], + }, + }, + 'type': 'object', + }, + 'iteration': { + 'description': 'Sequential Iteration API configuration', + 'properties': { + 'infinite': { + 'description': 'Infinite iteration', + 'type': ['boolean', 'null'], + }, + 'jump': { + 'description': 'Jump entry', + 'oneOf': [{'$ref': '#/definitions/jump'}, {'type': 'null'}], + }, + 'limit': { + 'description': 'Maximum frames per task. If not passed, frames will end when no more matching frames are found, unless infinite is True.', + 'type': ['integer', 'null'], + }, + 'min_sequence': { + 'description': 'Length (in ms) of video clips to return. This is used in random order, and in sequential order only if jumping is provided and only for video frames', + 'type': ['integer', 'null'], + }, + 'order': { + 'description': "\n Input frames order. Values: 'sequential', 'random'\n In Sequential mode frames will be returned according to the order in which the frames were added to the dataset.", + 'type': ['string', 'null'], + }, + 'random_seed': { + 'description': 'Random seed used during iteration', + 'type': 'integer', + }, + }, + 'required': ['random_seed'], + 'type': 'object', + }, + 'jump': { + 'properties': { + 'time': { + 'description': 'Max time in milliseconds between frames', + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + }, + 'label_source': { + 'properties': { + 'dataset': { + 'description': "Source dataset id. '*' for all datasets in view", + 'type': ['string', 'null'], + }, + 'labels': { + 'description': "List of source labels (AND connection). '*' indicates any label. Labels must exist in at least one of the dataset versions in the task's view", + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'version': { + 'description': "Source dataset version id. Default is '*' (for all versions in dataset in the view) Version must belong to the selected dataset, and must be in the task's view[i]", + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + }, + 'last_metrics_event': { + 'properties': { + 'iter': { + 'description': 'Iteration number', + 'type': ['integer', 'null'], + }, + 'metric': { + 'description': 'Metric name', + 'type': ['string', 'null'], + }, + 'timestamp': { + 'description': 'Event report time (UTC)', + 'format': 'date-time', + 'type': ['string', 'null'], + }, + 'type': { + 'description': 'Event type', + 'type': ['string', 'null'], + }, + 'value': {'description': 'Value', 'type': ['number', 'null']}, + 'variant': { + 'description': 'Variant name', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + }, + 'last_metrics_variants': { + 'additionalProperties': { + '$ref': '#/definitions/last_metrics_event', + }, + 'description': 'Last metric events, one for each variant hash', + 'type': 'object', + }, + 'mapping': { + 'properties': { + 'rules': { + 'description': 'Rules list', + 'items': {'$ref': '#/definitions/mapping_rule'}, + 'type': ['array', 'null'], + }, + }, + 'type': 'object', + }, + 'mapping_rule': { + 'properties': { + 'source': { + 'description': 'Source label info', + 'oneOf': [ + {'$ref': '#/definitions/label_source'}, + {'type': 'null'}, + ], + }, + 'target': { + 'description': 'Target label name', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + }, + 'output': { + 'properties': { + 'destination': { + 'description': 'Storage id. This is where output files will be stored.', + 'type': ['string', 'null'], + }, + 'error': { + 'description': 'Last error text', + 'type': ['string', 'null'], + }, + 'model': { + 'description': 'Model id.', + 'type': ['string', 'null'], + }, + 'result': { + 'description': "Task result. Values: 'success', 'failure'", + 'type': ['string', 'null'], + }, + 'view': { + 'description': 'View params', + 'oneOf': [{'$ref': '#/definitions/view'}, {'type': 'null'}], + }, + }, + 'type': 'object', + }, + 'output_rois_enum': { + 'enum': ['all_in_frame', 'only_filtered', 'frame_per_roi'], + 'type': 'string', + }, + 'script': { + 'properties': { + 'binary': { + 'default': 'python', + 'description': 'Binary to use when running the script', + 'type': ['string', 'null'], + }, + 'branch': { + 'description': 'Repository branch id If not provided and tag not provided, default repository branch is used.', + 'type': ['string', 'null'], + }, + 'entry_point': { + 'description': 'Path to execute within the repository', + 'type': ['string', 'null'], + }, + 'repository': { + 'description': 'Name of the repository where the script is located', + 'type': ['string', 'null'], + }, + 'requirements': { + 'description': 'A JSON object containing requirements strings by key', + 'type': ['object', 'null'], + }, + 'tag': { + 'description': 'Repository tag', + 'type': ['string', 'null'], + }, + 'version_num': { + 'description': 'Version (changeset) number. Optional (default is head version) Unused if tag is provided.', + 'type': ['string', 'null'], + }, + 'working_dir': { + 'description': 'Path to the folder from which to run the script Default - root folder of repository[f]', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + }, + 'task': { + 'properties': { + 'comment': { + 'description': 'Free text comment', + 'type': ['string', 'null'], + }, + 'company': { + 'description': 'Company ID', + 'type': ['string', 'null'], + }, + 'completed': { + 'description': 'Task end time (UTC)', + 'format': 'date-time', + 'type': ['string', 'null'], + }, + 'created': { + 'description': 'Task creation time (UTC) ', + 'format': 'date-time', + 'type': ['string', 'null'], + }, + 'execution': { + 'description': 'Task execution params', + 'oneOf': [ + {'$ref': '#/definitions/execution'}, + {'type': 'null'}, + ], + }, + 'id': {'description': 'Task id', 'type': ['string', 'null']}, + 'input': { + 'description': 'Task input params', + 'oneOf': [ + {'$ref': '#/definitions/input'}, + {'type': 'null'}, + ], + }, + 'last_iteration': { + 'description': 'Last iteration reported for this task', + 'type': ['integer', 'null'], + }, + 'last_metrics': { + 'additionalProperties': { + '$ref': '#/definitions/last_metrics_variants', + }, + 'description': 'Last metric variants (hash to events), one for each metric hash', + 'type': ['object', 'null'], + }, + 'last_update': { + 'description': 'Last time this task was created, updated, changed or events for this task were reported', + 'format': 'date-time', + 'type': ['string', 'null'], + }, + 'last_worker': { + 'description': 'ID of last worker that handled the task', + 'type': ['string', 'null'], + }, + 'last_worker_report': { + 'description': 'Last time a worker reported while working on this task', + 'format': 'date-time', + 'type': ['string', 'null'], + }, + 'name': { + 'description': 'Task Name', + 'type': ['string', 'null'], + }, + 'output': { + 'description': 'Task output params', + 'oneOf': [ + {'$ref': '#/definitions/output'}, + {'type': 'null'}, + ], + }, + 'parent': { + 'description': 'Parent task id', + 'type': ['string', 'null'], + }, + 'project': { + 'description': 'Project ID of the project to which this task is assigned', + 'type': ['string', 'null'], + }, + 'published': { + 'description': 'Last status change time', + 'format': 'date-time', + 'type': ['string', 'null'], + }, + 'script': { + 'description': 'Script info', + 'oneOf': [ + {'$ref': '#/definitions/script'}, + {'type': 'null'}, + ], + }, + 'started': { + 'description': 'Task start time (UTC)', + 'format': 'date-time', + 'type': ['string', 'null'], + }, + 'status': { + 'description': '', + 'oneOf': [ + {'$ref': '#/definitions/task_status_enum'}, + {'type': 'null'}, + ], + }, + 'status_changed': { + 'description': 'Last status change time', + 'format': 'date-time', + 'type': ['string', 'null'], + }, + 'status_message': { + 'description': 'free text string representing info about the status', + 'type': ['string', 'null'], + }, + 'status_reason': { + 'description': 'Reason for last status change', + 'type': ['string', 'null'], + }, + 'tags': { + 'description': 'Tags list', + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'type': { + 'description': "Type of task. Values: 'dataset_import', 'annotation', 'training', 'testing'", + 'oneOf': [ + {'$ref': '#/definitions/task_type_enum'}, + {'type': 'null'}, + ], + }, + 'user': { + 'description': 'Associated user id', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + }, + 'task_status_enum': { + 'enum': [ + 'created', + 'queued', + 'in_progress', + 'stopped', + 'published', + 'publishing', + 'closed', + 'failed', + 'unknown', + ], + 'type': 'string', + }, + 'task_type_enum': { + 'enum': [ + 'dataset_import', + 'annotation', + 'annotation_manual', + 'training', + 'testing', + ], + 'type': 'string', + }, + 'view': { + 'properties': { + 'entries': { + 'description': 'List of view entries. All tasks must have at least one view.', + 'items': {'$ref': '#/definitions/view_entry'}, + 'type': ['array', 'null'], + }, + }, + 'type': 'object', + }, + 'view_entry': { + 'properties': { + 'dataset': { + 'description': 'Existing Dataset id', + 'type': ['string', 'null'], + }, + 'merge_with': { + 'description': 'Version ID to merge with', + 'type': ['string', 'null'], + }, + 'version': { + 'description': 'Version id of a version belonging to the dataset', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + }, + }, + 'properties': { + 'task': { + 'description': 'Task info', + 'oneOf': [{'$ref': '#/definitions/task'}, {'type': 'null'}], + }, + }, + 'type': 'object', + } + def __init__( + self, task=None, **kwargs): + super(GetByIdResponse, self).__init__(**kwargs) + self.task = task + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + if isinstance(value, dict): + value = Task.from_dict(value) + else: + self.assert_isinstance(value, "task", Task) + self._property_task = value + + +class PublishRequest(Request): + """ + Mark a task status as published. + + For Annotation tasks - if any changes were committed by this task, a new version in the dataset together with an output view are created. + + For Training tasks - if a model was created, it should be set to ready. + + :param force: If not true, call fails if the task status is not 'stopped' + :type force: bool + :param publish_model: Indicates that the task output model (if exists) should + be published. Optional, the default value is True. + :type publish_model: bool + :param task: Task ID + :type task: str + :param status_reason: Reason for status change + :type status_reason: str + :param status_message: Extra information regarding status change + :type status_message: str + """ + + _service = "tasks" + _action = "publish" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': { + 'force': { + 'default': False, + 'description': "If not true, call fails if the task status is not 'stopped'", + 'type': ['boolean', 'null'], + }, + 'publish_model': { + 'description': 'Indicates that the task output model (if exists) should be published. Optional, the default value is True.', + 'type': ['boolean', 'null'], + }, + 'status_message': { + 'description': 'Extra information regarding status change', + 'type': 'string', + }, + 'status_reason': { + 'description': 'Reason for status change', + 'type': 'string', + }, + 'task': {'description': 'Task ID', 'type': 'string'}, + }, + 'required': ['task'], + 'type': 'object', + } + def __init__( + self, task, force=False, publish_model=None, status_reason=None, status_message=None, **kwargs): + super(PublishRequest, self).__init__(**kwargs) + self.force = force + self.publish_model = publish_model + self.task = task + self.status_reason = status_reason + self.status_message = status_message + + @schema_property('force') + def force(self): + return self._property_force + + @force.setter + def force(self, value): + if value is None: + self._property_force = None + return + + self.assert_isinstance(value, "force", (bool,)) + self._property_force = value + + @schema_property('publish_model') + def publish_model(self): + return self._property_publish_model + + @publish_model.setter + def publish_model(self, value): + if value is None: + self._property_publish_model = None + return + + self.assert_isinstance(value, "publish_model", (bool,)) + self._property_publish_model = value + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + @schema_property('status_reason') + def status_reason(self): + return self._property_status_reason + + @status_reason.setter + def status_reason(self, value): + if value is None: + self._property_status_reason = None + return + + self.assert_isinstance(value, "status_reason", six.string_types) + self._property_status_reason = value + + @schema_property('status_message') + def status_message(self): + return self._property_status_message + + @status_message.setter + def status_message(self, value): + if value is None: + self._property_status_message = None + return + + self.assert_isinstance(value, "status_message", six.string_types) + self._property_status_message = value + + +class PublishResponse(Response): + """ + Response of tasks.publish endpoint. + + :param committed_versions_results: Committed versions results + :type committed_versions_results: Sequence[dict] + :param updated: Number of tasks updated (0 or 1) + :type updated: int + :param fields: Updated fields names and values + :type fields: dict + """ + _service = "tasks" + _action = "publish" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'committed_versions_results': { + 'description': 'Committed versions results', + 'items': {'additionalProperties': True, 'type': 'object'}, + 'type': ['array', 'null'], + }, + 'fields': { + 'additionalProperties': True, + 'description': 'Updated fields names and values', + 'type': ['object', 'null'], + }, + 'updated': { + 'description': 'Number of tasks updated (0 or 1)', + 'enum': [0, 1], + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, committed_versions_results=None, updated=None, fields=None, **kwargs): + super(PublishResponse, self).__init__(**kwargs) + self.committed_versions_results = committed_versions_results + self.updated = updated + self.fields = fields + + @schema_property('committed_versions_results') + def committed_versions_results(self): + return self._property_committed_versions_results + + @committed_versions_results.setter + def committed_versions_results(self, value): + if value is None: + self._property_committed_versions_results = None + return + + self.assert_isinstance(value, "committed_versions_results", (list, tuple)) + + self.assert_isinstance(value, "committed_versions_results", (dict,), is_array=True) + self._property_committed_versions_results = value + + @schema_property('updated') + def updated(self): + return self._property_updated + + @updated.setter + def updated(self, value): + if value is None: + self._property_updated = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "updated", six.integer_types) + self._property_updated = value + + @schema_property('fields') + def fields(self): + return self._property_fields + + @fields.setter + def fields(self, value): + if value is None: + self._property_fields = None + return + + self.assert_isinstance(value, "fields", (dict,)) + self._property_fields = value + + +class ResetRequest(Request): + """ + Reset a task to its initial state, along with any information stored for it (statistics, frame updates etc.). + + :param force: If not true, call fails if the task status is 'completed' + :type force: bool + :param task: Task ID + :type task: str + :param status_reason: Reason for status change + :type status_reason: str + :param status_message: Extra information regarding status change + :type status_message: str + """ + + _service = "tasks" + _action = "reset" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': { + 'force': { + 'default': False, + 'description': "If not true, call fails if the task status is 'completed'", + 'type': ['boolean', 'null'], + }, + 'status_message': { + 'description': 'Extra information regarding status change', + 'type': 'string', + }, + 'status_reason': { + 'description': 'Reason for status change', + 'type': 'string', + }, + 'task': {'description': 'Task ID', 'type': 'string'}, + }, + 'required': ['task'], + 'type': 'object', + } + def __init__( + self, task, force=False, status_reason=None, status_message=None, **kwargs): + super(ResetRequest, self).__init__(**kwargs) + self.force = force + self.task = task + self.status_reason = status_reason + self.status_message = status_message + + @schema_property('force') + def force(self): + return self._property_force + + @force.setter + def force(self, value): + if value is None: + self._property_force = None + return + + self.assert_isinstance(value, "force", (bool,)) + self._property_force = value + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + @schema_property('status_reason') + def status_reason(self): + return self._property_status_reason + + @status_reason.setter + def status_reason(self, value): + if value is None: + self._property_status_reason = None + return + + self.assert_isinstance(value, "status_reason", six.string_types) + self._property_status_reason = value + + @schema_property('status_message') + def status_message(self): + return self._property_status_message + + @status_message.setter + def status_message(self, value): + if value is None: + self._property_status_message = None + return + + self.assert_isinstance(value, "status_message", six.string_types) + self._property_status_message = value + + +class ResetResponse(Response): + """ + Response of tasks.reset endpoint. + + :param deleted_indices: List of deleted ES indices that were removed as part of + the reset process + :type deleted_indices: Sequence[str] + :param dequeued: Response from queues.remove_task + :type dequeued: dict + :param frames: Response from frames.rollback + :type frames: dict + :param events: Response from events.delete_for_task + :type events: dict + :param deleted_models: Number of output models deleted by the reset + :type deleted_models: int + :param updated: Number of tasks updated (0 or 1) + :type updated: int + :param fields: Updated fields names and values + :type fields: dict + """ + _service = "tasks" + _action = "reset" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'deleted_indices': { + 'description': 'List of deleted ES indices that were removed as part of the reset process', + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'deleted_models': { + 'description': 'Number of output models deleted by the reset', + 'type': ['integer', 'null'], + }, + 'dequeued': { + 'additionalProperties': True, + 'description': 'Response from queues.remove_task', + 'type': ['object', 'null'], + }, + 'events': { + 'additionalProperties': True, + 'description': 'Response from events.delete_for_task', + 'type': ['object', 'null'], + }, + 'fields': { + 'additionalProperties': True, + 'description': 'Updated fields names and values', + 'type': ['object', 'null'], + }, + 'frames': { + 'additionalProperties': True, + 'description': 'Response from frames.rollback', + 'type': ['object', 'null'], + }, + 'updated': { + 'description': 'Number of tasks updated (0 or 1)', + 'enum': [0, 1], + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, deleted_indices=None, dequeued=None, frames=None, events=None, deleted_models=None, updated=None, fields=None, **kwargs): + super(ResetResponse, self).__init__(**kwargs) + self.deleted_indices = deleted_indices + self.dequeued = dequeued + self.frames = frames + self.events = events + self.deleted_models = deleted_models + self.updated = updated + self.fields = fields + + @schema_property('deleted_indices') + def deleted_indices(self): + return self._property_deleted_indices + + @deleted_indices.setter + def deleted_indices(self, value): + if value is None: + self._property_deleted_indices = None + return + + self.assert_isinstance(value, "deleted_indices", (list, tuple)) + + self.assert_isinstance(value, "deleted_indices", six.string_types, is_array=True) + self._property_deleted_indices = value + + @schema_property('dequeued') + def dequeued(self): + return self._property_dequeued + + @dequeued.setter + def dequeued(self, value): + if value is None: + self._property_dequeued = None + return + + self.assert_isinstance(value, "dequeued", (dict,)) + self._property_dequeued = value + + @schema_property('frames') + def frames(self): + return self._property_frames + + @frames.setter + def frames(self, value): + if value is None: + self._property_frames = None + return + + self.assert_isinstance(value, "frames", (dict,)) + self._property_frames = value + + @schema_property('events') + def events(self): + return self._property_events + + @events.setter + def events(self, value): + if value is None: + self._property_events = None + return + + self.assert_isinstance(value, "events", (dict,)) + self._property_events = value + + @schema_property('deleted_models') + def deleted_models(self): + return self._property_deleted_models + + @deleted_models.setter + def deleted_models(self, value): + if value is None: + self._property_deleted_models = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "deleted_models", six.integer_types) + self._property_deleted_models = value + + @schema_property('updated') + def updated(self): + return self._property_updated + + @updated.setter + def updated(self, value): + if value is None: + self._property_updated = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "updated", six.integer_types) + self._property_updated = value + + @schema_property('fields') + def fields(self): + return self._property_fields + + @fields.setter + def fields(self, value): + if value is None: + self._property_fields = None + return + + self.assert_isinstance(value, "fields", (dict,)) + self._property_fields = value + + +class SetRequirementsRequest(Request): + """ + Set the script requirements for a task + + :param task: Task ID + :type task: str + :param requirements: A JSON object containing requirements strings by key + :type requirements: dict + """ + + _service = "tasks" + _action = "set_requirements" + _version = "1.6" + _schema = { + 'definitions': {}, + 'properties': { + 'requirements': { + 'description': 'A JSON object containing requirements strings by key', + 'type': 'object', + }, + 'task': {'description': 'Task ID', 'type': 'string'}, + }, + 'required': ['task', 'requirements'], + 'type': 'object', + } + def __init__( + self, task, requirements, **kwargs): + super(SetRequirementsRequest, self).__init__(**kwargs) + self.task = task + self.requirements = requirements + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + @schema_property('requirements') + def requirements(self): + return self._property_requirements + + @requirements.setter + def requirements(self, value): + if value is None: + self._property_requirements = None + return + + self.assert_isinstance(value, "requirements", (dict,)) + self._property_requirements = value + + +class SetRequirementsResponse(Response): + """ + Response of tasks.set_requirements endpoint. + + :param updated: Number of tasks updated (0 or 1) + :type updated: int + :param fields: Updated fields names and values + :type fields: dict + """ + _service = "tasks" + _action = "set_requirements" + _version = "1.6" + + _schema = { + 'definitions': {}, + 'properties': { + 'fields': { + 'additionalProperties': True, + 'description': 'Updated fields names and values', + 'type': ['object', 'null'], + }, + 'updated': { + 'description': 'Number of tasks updated (0 or 1)', + 'enum': [0, 1], + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, updated=None, fields=None, **kwargs): + super(SetRequirementsResponse, self).__init__(**kwargs) + self.updated = updated + self.fields = fields + + @schema_property('updated') + def updated(self): + return self._property_updated + + @updated.setter + def updated(self, value): + if value is None: + self._property_updated = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "updated", six.integer_types) + self._property_updated = value + + @schema_property('fields') + def fields(self): + return self._property_fields + + @fields.setter + def fields(self, value): + if value is None: + self._property_fields = None + return + + self.assert_isinstance(value, "fields", (dict,)) + self._property_fields = value + + +class StartedRequest(Request): + """ + Mark a task status as in_progress. Optionally allows to set the task's execution progress. + + :param force: If not true, call fails if the task status is not 'not_started' + :type force: bool + :param task: Task ID + :type task: str + :param status_reason: Reason for status change + :type status_reason: str + :param status_message: Extra information regarding status change + :type status_message: str + """ + + _service = "tasks" + _action = "started" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': { + 'force': { + 'default': False, + 'description': "If not true, call fails if the task status is not 'not_started'", + 'type': ['boolean', 'null'], + }, + 'status_message': { + 'description': 'Extra information regarding status change', + 'type': 'string', + }, + 'status_reason': { + 'description': 'Reason for status change', + 'type': 'string', + }, + 'task': {'description': 'Task ID', 'type': 'string'}, + }, + 'required': ['task'], + 'type': 'object', + } + def __init__( + self, task, force=False, status_reason=None, status_message=None, **kwargs): + super(StartedRequest, self).__init__(**kwargs) + self.force = force + self.task = task + self.status_reason = status_reason + self.status_message = status_message + + @schema_property('force') + def force(self): + return self._property_force + + @force.setter + def force(self, value): + if value is None: + self._property_force = None + return + + self.assert_isinstance(value, "force", (bool,)) + self._property_force = value + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + @schema_property('status_reason') + def status_reason(self): + return self._property_status_reason + + @status_reason.setter + def status_reason(self, value): + if value is None: + self._property_status_reason = None + return + + self.assert_isinstance(value, "status_reason", six.string_types) + self._property_status_reason = value + + @schema_property('status_message') + def status_message(self): + return self._property_status_message + + @status_message.setter + def status_message(self, value): + if value is None: + self._property_status_message = None + return + + self.assert_isinstance(value, "status_message", six.string_types) + self._property_status_message = value + + +class StartedResponse(Response): + """ + Response of tasks.started endpoint. + + :param started: Number of tasks started (0 or 1) + :type started: int + :param updated: Number of tasks updated (0 or 1) + :type updated: int + :param fields: Updated fields names and values + :type fields: dict + """ + _service = "tasks" + _action = "started" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'fields': { + 'additionalProperties': True, + 'description': 'Updated fields names and values', + 'type': ['object', 'null'], + }, + 'started': { + 'description': 'Number of tasks started (0 or 1)', + 'enum': [0, 1], + 'type': ['integer', 'null'], + }, + 'updated': { + 'description': 'Number of tasks updated (0 or 1)', + 'enum': [0, 1], + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, started=None, updated=None, fields=None, **kwargs): + super(StartedResponse, self).__init__(**kwargs) + self.started = started + self.updated = updated + self.fields = fields + + @schema_property('started') + def started(self): + return self._property_started + + @started.setter + def started(self, value): + if value is None: + self._property_started = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "started", six.integer_types) + self._property_started = value + + @schema_property('updated') + def updated(self): + return self._property_updated + + @updated.setter + def updated(self, value): + if value is None: + self._property_updated = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "updated", six.integer_types) + self._property_updated = value + + @schema_property('fields') + def fields(self): + return self._property_fields + + @fields.setter + def fields(self, value): + if value is None: + self._property_fields = None + return + + self.assert_isinstance(value, "fields", (dict,)) + self._property_fields = value + + +class StopRequest(Request): + """ + Request to stop a running task + + :param force: If not true, call fails if the task status is not 'in_progress' + :type force: bool + :param task: Task ID + :type task: str + :param status_reason: Reason for status change + :type status_reason: str + :param status_message: Extra information regarding status change + :type status_message: str + """ + + _service = "tasks" + _action = "stop" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': { + 'force': { + 'default': False, + 'description': "If not true, call fails if the task status is not 'in_progress'", + 'type': ['boolean', 'null'], + }, + 'status_message': { + 'description': 'Extra information regarding status change', + 'type': 'string', + }, + 'status_reason': { + 'description': 'Reason for status change', + 'type': 'string', + }, + 'task': {'description': 'Task ID', 'type': 'string'}, + }, + 'required': ['task'], + 'type': 'object', + } + def __init__( + self, task, force=False, status_reason=None, status_message=None, **kwargs): + super(StopRequest, self).__init__(**kwargs) + self.force = force + self.task = task + self.status_reason = status_reason + self.status_message = status_message + + @schema_property('force') + def force(self): + return self._property_force + + @force.setter + def force(self, value): + if value is None: + self._property_force = None + return + + self.assert_isinstance(value, "force", (bool,)) + self._property_force = value + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + @schema_property('status_reason') + def status_reason(self): + return self._property_status_reason + + @status_reason.setter + def status_reason(self, value): + if value is None: + self._property_status_reason = None + return + + self.assert_isinstance(value, "status_reason", six.string_types) + self._property_status_reason = value + + @schema_property('status_message') + def status_message(self): + return self._property_status_message + + @status_message.setter + def status_message(self, value): + if value is None: + self._property_status_message = None + return + + self.assert_isinstance(value, "status_message", six.string_types) + self._property_status_message = value + + +class StopResponse(Response): + """ + Response of tasks.stop endpoint. + + :param updated: Number of tasks updated (0 or 1) + :type updated: int + :param fields: Updated fields names and values + :type fields: dict + """ + _service = "tasks" + _action = "stop" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'fields': { + 'additionalProperties': True, + 'description': 'Updated fields names and values', + 'type': ['object', 'null'], + }, + 'updated': { + 'description': 'Number of tasks updated (0 or 1)', + 'enum': [0, 1], + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, updated=None, fields=None, **kwargs): + super(StopResponse, self).__init__(**kwargs) + self.updated = updated + self.fields = fields + + @schema_property('updated') + def updated(self): + return self._property_updated + + @updated.setter + def updated(self, value): + if value is None: + self._property_updated = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "updated", six.integer_types) + self._property_updated = value + + @schema_property('fields') + def fields(self): + return self._property_fields + + @fields.setter + def fields(self, value): + if value is None: + self._property_fields = None + return + + self.assert_isinstance(value, "fields", (dict,)) + self._property_fields = value + + +class StoppedRequest(Request): + """ + Signal a task has stopped + + :param force: If not true, call fails if the task status is not 'stopped' + :type force: bool + :param task: Task ID + :type task: str + :param status_reason: Reason for status change + :type status_reason: str + :param status_message: Extra information regarding status change + :type status_message: str + """ + + _service = "tasks" + _action = "stopped" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': { + 'force': { + 'default': False, + 'description': "If not true, call fails if the task status is not 'stopped'", + 'type': ['boolean', 'null'], + }, + 'status_message': { + 'description': 'Extra information regarding status change', + 'type': 'string', + }, + 'status_reason': { + 'description': 'Reason for status change', + 'type': 'string', + }, + 'task': {'description': 'Task ID', 'type': 'string'}, + }, + 'required': ['task'], + 'type': 'object', + } + def __init__( + self, task, force=False, status_reason=None, status_message=None, **kwargs): + super(StoppedRequest, self).__init__(**kwargs) + self.force = force + self.task = task + self.status_reason = status_reason + self.status_message = status_message + + @schema_property('force') + def force(self): + return self._property_force + + @force.setter + def force(self, value): + if value is None: + self._property_force = None + return + + self.assert_isinstance(value, "force", (bool,)) + self._property_force = value + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + @schema_property('status_reason') + def status_reason(self): + return self._property_status_reason + + @status_reason.setter + def status_reason(self, value): + if value is None: + self._property_status_reason = None + return + + self.assert_isinstance(value, "status_reason", six.string_types) + self._property_status_reason = value + + @schema_property('status_message') + def status_message(self): + return self._property_status_message + + @status_message.setter + def status_message(self, value): + if value is None: + self._property_status_message = None + return + + self.assert_isinstance(value, "status_message", six.string_types) + self._property_status_message = value + + +class StoppedResponse(Response): + """ + Response of tasks.stopped endpoint. + + :param updated: Number of tasks updated (0 or 1) + :type updated: int + :param fields: Updated fields names and values + :type fields: dict + """ + _service = "tasks" + _action = "stopped" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'fields': { + 'additionalProperties': True, + 'description': 'Updated fields names and values', + 'type': ['object', 'null'], + }, + 'updated': { + 'description': 'Number of tasks updated (0 or 1)', + 'enum': [0, 1], + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, updated=None, fields=None, **kwargs): + super(StoppedResponse, self).__init__(**kwargs) + self.updated = updated + self.fields = fields + + @schema_property('updated') + def updated(self): + return self._property_updated + + @updated.setter + def updated(self, value): + if value is None: + self._property_updated = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "updated", six.integer_types) + self._property_updated = value + + @schema_property('fields') + def fields(self): + return self._property_fields + + @fields.setter + def fields(self, value): + if value is None: + self._property_fields = None + return + + self.assert_isinstance(value, "fields", (dict,)) + self._property_fields = value + + +class UpdateRequest(Request): + """ + Update task's runtime parameters + + :param task: ID of the task + :type task: str + :param name: Task name Unique within the company. + :type name: str + :param tags: Tags list + :type tags: Sequence[str] + :param comment: Free text comment + :type comment: str + :param project: Project ID of the project to which this task is assigned + :type project: str + :param output__error: Free text error + :type output__error: str + :param created: Task creation time (UTC) + :type created: datetime.datetime + """ + + _service = "tasks" + _action = "update" + _version = "1.5" + _schema = { + 'definitions': {}, + 'properties': { + 'comment': {'description': 'Free text comment ', 'type': 'string'}, + 'created': { + 'description': 'Task creation time (UTC) ', + 'format': 'date-time', + 'type': 'string', + }, + 'name': { + 'description': 'Task name Unique within the company.', + 'type': 'string', + }, + 'output__error': {'description': 'Free text error', 'type': 'string'}, + 'project': { + 'description': 'Project ID of the project to which this task is assigned', + 'type': 'string', + }, + 'tags': {'description': 'Tags list', 'items': {'type': 'string'}, 'type': 'array'}, + 'task': {'description': 'ID of the task', 'type': 'string'}, + }, + 'required': ['task'], + 'type': 'object', + } + def __init__( + self, task, name=None, tags=None, comment=None, project=None, output__error=None, created=None, **kwargs): + super(UpdateRequest, self).__init__(**kwargs) + self.task = task + self.name = name + self.tags = tags + self.comment = comment + self.project = project + self.output__error = output__error + self.created = created + + @schema_property('task') + def task(self): + return self._property_task + + @task.setter + def task(self, value): + if value is None: + self._property_task = None + return + + self.assert_isinstance(value, "task", six.string_types) + self._property_task = value + + @schema_property('name') + def name(self): + return self._property_name + + @name.setter + def name(self, value): + if value is None: + self._property_name = None + return + + self.assert_isinstance(value, "name", six.string_types) + self._property_name = value + + @schema_property('tags') + def tags(self): + return self._property_tags + + @tags.setter + def tags(self, value): + if value is None: + self._property_tags = None + return + + self.assert_isinstance(value, "tags", (list, tuple)) + + self.assert_isinstance(value, "tags", six.string_types, is_array=True) + self._property_tags = value + + @schema_property('comment') + def comment(self): + return self._property_comment + + @comment.setter + def comment(self, value): + if value is None: + self._property_comment = None + return + + self.assert_isinstance(value, "comment", six.string_types) + self._property_comment = value + + @schema_property('project') + def project(self): + return self._property_project + + @project.setter + def project(self, value): + if value is None: + self._property_project = None + return + + self.assert_isinstance(value, "project", six.string_types) + self._property_project = value + + @schema_property('output__error') + def output__error(self): + return self._property_output__error + + @output__error.setter + def output__error(self, value): + if value is None: + self._property_output__error = None + return + + self.assert_isinstance(value, "output__error", six.string_types) + self._property_output__error = value + + @schema_property('created') + def created(self): + return self._property_created + + @created.setter + def created(self, value): + if value is None: + self._property_created = None + return + + self.assert_isinstance(value, "created", six.string_types + (datetime,)) + if not isinstance(value, datetime): + value = parse_datetime(value) + self._property_created = value + + +class UpdateResponse(Response): + """ + Response of tasks.update endpoint. + + :param updated: Number of tasks updated (0 or 1) + :type updated: int + :param fields: Updated fields names and values + :type fields: dict + """ + _service = "tasks" + _action = "update" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'fields': { + 'additionalProperties': True, + 'description': 'Updated fields names and values', + 'type': ['object', 'null'], + }, + 'updated': { + 'description': 'Number of tasks updated (0 or 1)', + 'enum': [0, 1], + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, updated=None, fields=None, **kwargs): + super(UpdateResponse, self).__init__(**kwargs) + self.updated = updated + self.fields = fields + + @schema_property('updated') + def updated(self): + return self._property_updated + + @updated.setter + def updated(self, value): + if value is None: + self._property_updated = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "updated", six.integer_types) + self._property_updated = value + + @schema_property('fields') + def fields(self): + return self._property_fields + + @fields.setter + def fields(self, value): + if value is None: + self._property_fields = None + return + + self.assert_isinstance(value, "fields", (dict,)) + self._property_fields = value + + +class UpdateBatchRequest(BatchRequest): + """ + Updates a batch of tasks. + Headers + Content type should be 'application/json-lines'. + + """ + + _service = "tasks" + _action = "update_batch" + _version = "1.5" + _batched_request_cls = UpdateRequest + + +class UpdateBatchResponse(Response): + """ + Response of tasks.update_batch endpoint. + + :param updated: Number of tasks updated (0 or 1) + :type updated: int + """ + _service = "tasks" + _action = "update_batch" + _version = "1.5" + + _schema = { + 'definitions': {}, + 'properties': { + 'updated': { + 'description': 'Number of tasks updated (0 or 1)', + 'enum': [0, 1], + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + } + def __init__( + self, updated=None, **kwargs): + super(UpdateBatchResponse, self).__init__(**kwargs) + self.updated = updated + + @schema_property('updated') + def updated(self): + return self._property_updated + + @updated.setter + def updated(self, value): + if value is None: + self._property_updated = None + return + if isinstance(value, float) and value.is_integer(): + value = int(value) + + self.assert_isinstance(value, "updated", six.integer_types) + self._property_updated = value + + +class ValidateRequest(Request): + """ + Validate task properties (before create) + + :param name: Task name. Unique within the company. + :type name: str + :param tags: Tags list + :type tags: Sequence[str] + :param type: Type of task + :type type: TaskTypeEnum + :param comment: Free text comment + :type comment: str + :param parent: Parent task id Must be a completed task. + :type parent: str + :param project: Project ID of the project to which this task is assigned Must + exist[ab] + :type project: str + :param input: Task input params. (input view must be provided). + :type input: Input + :param output_dest: Output storage id Must be a reference to an existing + storage. + :type output_dest: str + :param execution: Task execution params + :type execution: Execution + :param script: Script info + :type script: Script + """ + + _service = "tasks" + _action = "validate" + _version = "1.9" + _schema = { + 'definitions': { + 'augmentation': { + 'properties': { + 'crop_around_rois': { + 'description': 'Crop image data around all frame ROIs', + 'type': ['boolean', 'null'], + }, + 'sets': { + 'description': 'List of augmentation sets', + 'items': {'$ref': '#/definitions/augmentation_set'}, + 'type': ['array', 'null'], + }, + }, + 'type': 'object', + }, + 'augmentation_set': { + 'properties': { + 'arguments': { + 'additionalProperties': { + 'additionalProperties': True, + 'type': 'object', + }, + 'description': 'Arguments dictionary per custom augmentation type.', + 'type': ['object', 'null'], + }, + 'cls': { + 'description': 'Augmentation class', + 'type': ['string', 'null'], + }, + 'strength': { + 'description': 'Augmentation strength. Range [0,).', + 'minimum': 0, + 'type': ['number', 'null'], + }, + 'types': { + 'description': 'Augmentation type', + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + }, + 'type': 'object', + }, + 'execution': { + 'properties': { + 'dataviews': { + 'description': 'Additional dataviews for the task', + 'items': {'additionalProperties': True, 'type': 'object'}, + 'type': ['array', 'null'], + }, + 'framework': { + 'description': 'Framework related to the task. Case insensitive. Mandatory for Training tasks. ', + 'type': ['string', 'null'], + }, + 'model': { + 'description': 'Execution input model ID Not applicable for Register (Import) tasks', + 'type': ['string', 'null'], + }, + 'model_desc': { + 'additionalProperties': True, + 'description': 'Json object representing the Model descriptors', + 'type': ['object', 'null'], + }, + 'model_labels': { + 'additionalProperties': {'type': 'integer'}, + 'description': "Json object representing the ids of the labels in the model.\n The keys are the layers' names and the values are the IDs.\n Not applicable for Register (Import) tasks.\n Mandatory for Training tasks[z]", + 'type': ['object', 'null'], + }, + 'parameters': { + 'additionalProperties': True, + 'description': 'Json object containing the Task parameters', + 'type': ['object', 'null'], + }, + 'queue': { + 'description': 'Queue ID where task was queued.', + 'type': ['string', 'null'], + }, + 'test_split': { + 'description': 'Percentage of frames to use for testing only', + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + }, + 'filter_by_roi_enum': { + 'default': 'label_rules', + 'enum': ['disabled', 'no_rois', 'label_rules'], + 'type': 'string', + }, + 'filter_label_rule': { + 'properties': { + 'conf_range': { + 'description': 'Range of ROI confidence level in the frame (min, max). -1 for not applicable\n Both min and max can be either -1 or positive.\n 2nd number (max) must be either -1 or larger than or equal to the 1st number (min)', + 'items': {'type': 'number'}, + 'maxItems': 2, + 'minItems': 1, + 'type': 'array', + }, + 'count_range': { + 'description': 'Range of times ROI appears in the frame (min, max). -1 for not applicable.\n Both integers must be larger than or equal to -1.\n 2nd integer (max) must be either -1 or larger than or equal to the 1st integer (min)', + 'items': {'type': 'integer'}, + 'maxItems': 2, + 'minItems': 1, + 'type': 'array', + }, + 'label': { + 'description': "Lucene format query (see lucene query syntax).\nDefault search field is label.keyword and default operator is AND, so searching for:\n\n'Bus Stop' Blue\n\nis equivalent to:\n\nLabel.keyword:'Bus Stop' AND label.keyword:'Blue'", + 'type': 'string', + }, + }, + 'required': ['label'], + 'type': 'object', + }, + 'filter_rule': { + 'properties': { + 'dataset': { + 'description': "Dataset ID. Must be a dataset which is in the task's view. If set to '*' all datasets in View are used.", + 'type': 'string', + }, + 'filter_by_roi': { + '$ref': '#/definitions/filter_by_roi_enum', + 'description': 'Type of filter', + }, + 'frame_query': { + 'description': 'Frame filter, in Lucene query syntax', + 'type': 'string', + }, + 'label_rules': { + 'description': "List of FilterLabelRule ('AND' connection)\n\ndisabled - No filtering by ROIs. Select all frames, even if they don't have ROIs (all frames)\n\nno_rois - Select only frames without ROIs (empty frames)\n\nlabel_rules - Select frames according to label rules", + 'items': {'$ref': '#/definitions/filter_label_rule'}, + 'type': ['array', 'null'], + }, + 'sources_query': { + 'description': 'Sources filter, in Lucene query syntax. Filters sources in each frame.', + 'type': 'string', + }, + 'version': { + 'description': "Dataset version to apply rule to. Must belong to the dataset and be in the task's view. If set to '*' all version of the datasets in View are used.", + 'type': 'string', + }, + 'weight': { + 'description': 'Rule weight. Default is 1', + 'type': 'number', + }, + }, + 'required': ['filter_by_roi'], + 'type': 'object', + }, + 'filtering': { + 'properties': { + 'filtering_rules': { + 'description': "List of FilterRule ('OR' connection)", + 'items': {'$ref': '#/definitions/filter_rule'}, + 'type': ['array', 'null'], + }, + 'output_rois': { + 'description': "'all_in_frame' - all rois for a frame are returned\n\n'only_filtered' - only rois which led this frame to be selected\n\n'frame_per_roi' - single roi per frame. Frame can be returned multiple times with a different roi each time.\n\nNote: this should be used for Training tasks only\n\nNote: frame_per_roi implies that only filtered rois will be returned\n ", + 'oneOf': [ + {'$ref': '#/definitions/output_rois_enum'}, + {'type': 'null'}, + ], + }, + }, + 'type': 'object', + }, + 'input': { + 'properties': { + 'augmentation': { + 'description': 'Augmentation parameters. Only for training and testing tasks.', + 'oneOf': [ + {'$ref': '#/definitions/augmentation'}, + {'type': 'null'}, + ], + }, + 'dataviews': { + 'additionalProperties': {'type': 'string'}, + 'description': 'Key to DataView ID Mapping', + 'type': ['object', 'null'], + }, + 'frames_filter': { + 'description': 'Filtering params', + 'oneOf': [ + {'$ref': '#/definitions/filtering'}, + {'type': 'null'}, + ], + }, + 'iteration': { + 'description': 'Iteration parameters. Not applicable for register (import) tasks.', + 'oneOf': [ + {'$ref': '#/definitions/iteration'}, + {'type': 'null'}, + ], + }, + 'mapping': { + 'description': 'Mapping params (see common definitions section)', + 'oneOf': [ + {'$ref': '#/definitions/mapping'}, + {'type': 'null'}, + ], + }, + 'view': { + 'description': 'View params', + 'oneOf': [{'$ref': '#/definitions/view'}, {'type': 'null'}], + }, + }, + 'type': 'object', + }, + 'iteration': { + 'description': 'Sequential Iteration API configuration', + 'properties': { + 'infinite': { + 'description': 'Infinite iteration', + 'type': ['boolean', 'null'], + }, + 'jump': { + 'description': 'Jump entry', + 'oneOf': [{'$ref': '#/definitions/jump'}, {'type': 'null'}], + }, + 'limit': { + 'description': 'Maximum frames per task. If not passed, frames will end when no more matching frames are found, unless infinite is True.', + 'type': ['integer', 'null'], + }, + 'min_sequence': { + 'description': 'Length (in ms) of video clips to return. This is used in random order, and in sequential order only if jumping is provided and only for video frames', + 'type': ['integer', 'null'], + }, + 'order': { + 'description': "\n Input frames order. Values: 'sequential', 'random'\n In Sequential mode frames will be returned according to the order in which the frames were added to the dataset.", + 'type': ['string', 'null'], + }, + 'random_seed': { + 'description': 'Random seed used during iteration', + 'type': 'integer', + }, + }, + 'required': ['random_seed'], + 'type': 'object', + }, + 'jump': { + 'properties': { + 'time': { + 'description': 'Max time in milliseconds between frames', + 'type': ['integer', 'null'], + }, + }, + 'type': 'object', + }, + 'label_source': { + 'properties': { + 'dataset': { + 'description': "Source dataset id. '*' for all datasets in view", + 'type': ['string', 'null'], + }, + 'labels': { + 'description': "List of source labels (AND connection). '*' indicates any label. Labels must exist in at least one of the dataset versions in the task's view", + 'items': {'type': 'string'}, + 'type': ['array', 'null'], + }, + 'version': { + 'description': "Source dataset version id. Default is '*' (for all versions in dataset in the view) Version must belong to the selected dataset, and must be in the task's view[i]", + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + }, + 'mapping': { + 'properties': { + 'rules': { + 'description': 'Rules list', + 'items': {'$ref': '#/definitions/mapping_rule'}, + 'type': ['array', 'null'], + }, + }, + 'type': 'object', + }, + 'mapping_rule': { + 'properties': { + 'source': { + 'description': 'Source label info', + 'oneOf': [ + {'$ref': '#/definitions/label_source'}, + {'type': 'null'}, + ], + }, + 'target': { + 'description': 'Target label name', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + }, + 'output_rois_enum': { + 'enum': ['all_in_frame', 'only_filtered', 'frame_per_roi'], + 'type': 'string', + }, + 'script': { + 'properties': { + 'binary': { + 'default': 'python', + 'description': 'Binary to use when running the script', + 'type': ['string', 'null'], + }, + 'branch': { + 'description': 'Repository branch id If not provided and tag not provided, default repository branch is used.', + 'type': ['string', 'null'], + }, + 'entry_point': { + 'description': 'Path to execute within the repository', + 'type': ['string', 'null'], + }, + 'repository': { + 'description': 'Name of the repository where the script is located', + 'type': ['string', 'null'], + }, + 'requirements': { + 'description': 'A JSON object containing requirements strings by key', + 'type': ['object', 'null'], + }, + 'tag': { + 'description': 'Repository tag', + 'type': ['string', 'null'], + }, + 'version_num': { + 'description': 'Version (changeset) number. Optional (default is head version) Unused if tag is provided.', + 'type': ['string', 'null'], + }, + 'working_dir': { + 'description': 'Path to the folder from which to run the script Default - root folder of repository[f]', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + }, + 'task_type_enum': { + 'enum': [ + 'dataset_import', + 'annotation', + 'annotation_manual', + 'training', + 'testing', + ], + 'type': 'string', + }, + 'view': { + 'properties': { + 'entries': { + 'description': 'List of view entries. All tasks must have at least one view.', + 'items': {'$ref': '#/definitions/view_entry'}, + 'type': ['array', 'null'], + }, + }, + 'type': 'object', + }, + 'view_entry': { + 'properties': { + 'dataset': { + 'description': 'Existing Dataset id', + 'type': ['string', 'null'], + }, + 'merge_with': { + 'description': 'Version ID to merge with', + 'type': ['string', 'null'], + }, + 'version': { + 'description': 'Version id of a version belonging to the dataset', + 'type': ['string', 'null'], + }, + }, + 'type': 'object', + }, + }, + 'properties': { + 'comment': {'description': 'Free text comment ', 'type': 'string'}, + 'execution': { + '$ref': '#/definitions/execution', + 'description': 'Task execution params', + }, + 'input': { + '$ref': '#/definitions/input', + 'description': 'Task input params. (input view must be provided).', + }, + 'name': { + 'description': 'Task name. Unique within the company.', + 'type': 'string', + }, + 'output_dest': { + 'description': 'Output storage id Must be a reference to an existing storage.', + 'type': 'string', + }, + 'parent': { + 'description': 'Parent task id Must be a completed task.', + 'type': 'string', + }, + 'project': { + 'description': 'Project ID of the project to which this task is assigned Must exist[ab]', + 'type': 'string', + }, + 'script': { + '$ref': '#/definitions/script', + 'description': 'Script info', + }, + 'tags': {'description': 'Tags list', 'items': {'type': 'string'}, 'type': 'array'}, + 'type': { + '$ref': '#/definitions/task_type_enum', + 'description': 'Type of task', + }, + }, + 'required': ['name', 'type'], + 'type': 'object', + } + def __init__( + self, name, type, tags=None, comment=None, parent=None, project=None, input=None, output_dest=None, execution=None, script=None, **kwargs): + super(ValidateRequest, self).__init__(**kwargs) + self.name = name + self.tags = tags + self.type = type + self.comment = comment + self.parent = parent + self.project = project + self.input = input + self.output_dest = output_dest + self.execution = execution + self.script = script + + @schema_property('name') + def name(self): + return self._property_name + + @name.setter + def name(self, value): + if value is None: + self._property_name = None + return + + self.assert_isinstance(value, "name", six.string_types) + self._property_name = value + + @schema_property('tags') + def tags(self): + return self._property_tags + + @tags.setter + def tags(self, value): + if value is None: + self._property_tags = None + return + + self.assert_isinstance(value, "tags", (list, tuple)) + + self.assert_isinstance(value, "tags", six.string_types, is_array=True) + self._property_tags = value + + @schema_property('type') + def type(self): + return self._property_type + + @type.setter + def type(self, value): + if value is None: + self._property_type = None + return + if isinstance(value, six.string_types): + try: + value = TaskTypeEnum(value) + except ValueError: + pass + else: + self.assert_isinstance(value, "type", enum.Enum) + self._property_type = value + + @schema_property('comment') + def comment(self): + return self._property_comment + + @comment.setter + def comment(self, value): + if value is None: + self._property_comment = None + return + + self.assert_isinstance(value, "comment", six.string_types) + self._property_comment = value + + @schema_property('parent') + def parent(self): + return self._property_parent + + @parent.setter + def parent(self, value): + if value is None: + self._property_parent = None + return + + self.assert_isinstance(value, "parent", six.string_types) + self._property_parent = value + + @schema_property('project') + def project(self): + return self._property_project + + @project.setter + def project(self, value): + if value is None: + self._property_project = None + return + + self.assert_isinstance(value, "project", six.string_types) + self._property_project = value + + @schema_property('input') + def input(self): + return self._property_input + + @input.setter + def input(self, value): + if value is None: + self._property_input = None + return + if isinstance(value, dict): + value = Input.from_dict(value) + else: + self.assert_isinstance(value, "input", Input) + self._property_input = value + + @schema_property('output_dest') + def output_dest(self): + return self._property_output_dest + + @output_dest.setter + def output_dest(self, value): + if value is None: + self._property_output_dest = None + return + + self.assert_isinstance(value, "output_dest", six.string_types) + self._property_output_dest = value + + @schema_property('execution') + def execution(self): + return self._property_execution + + @execution.setter + def execution(self, value): + if value is None: + self._property_execution = None + return + if isinstance(value, dict): + value = Execution.from_dict(value) + else: + self.assert_isinstance(value, "execution", Execution) + self._property_execution = value + + @schema_property('script') + def script(self): + return self._property_script + + @script.setter + def script(self, value): + if value is None: + self._property_script = None + return + if isinstance(value, dict): + value = Script.from_dict(value) + else: + self.assert_isinstance(value, "script", Script) + self._property_script = value + + +class ValidateResponse(Response): + """ + Response of tasks.validate endpoint. + + """ + _service = "tasks" + _action = "validate" + _version = "1.9" + + _schema = {'additionalProperties': False, 'definitions': {}, 'type': 'object'} + + +response_mapping = { + GetByIdRequest: GetByIdResponse, + GetAllRequest: GetAllResponse, + CreateRequest: CreateResponse, + ValidateRequest: ValidateResponse, + UpdateRequest: UpdateResponse, + UpdateBatchRequest: UpdateBatchResponse, + EditRequest: EditResponse, + ResetRequest: ResetResponse, + DeleteRequest: DeleteResponse, + StartedRequest: StartedResponse, + StopRequest: StopResponse, + StoppedRequest: StoppedResponse, + FailedRequest: FailedResponse, + CloseRequest: CloseResponse, + PublishRequest: PublishResponse, + EnqueueRequest: EnqueueResponse, + DequeueRequest: DequeueResponse, + SetRequirementsRequest: SetRequirementsResponse, +} diff --git a/trains/backend_api/session/__init__.py b/trains/backend_api/session/__init__.py new file mode 100644 index 00000000..e8ece3a8 --- /dev/null +++ b/trains/backend_api/session/__init__.py @@ -0,0 +1,7 @@ +from .session import Session +from .datamodel import DataModel, NonStrictDataModel, schema_property, StringEnum +from .request import Request, BatchRequest, CompoundRequest +from .response import Response +from .token_manager import TokenManager +from .errors import TimeoutExpiredError, ResultNotReadyError +from .callresult import CallResult diff --git a/trains/backend_api/session/apimodel.py b/trains/backend_api/session/apimodel.py new file mode 100644 index 00000000..2d67be9f --- /dev/null +++ b/trains/backend_api/session/apimodel.py @@ -0,0 +1,8 @@ +from .datamodel import DataModel + + +class ApiModel(DataModel): + """ API-related data model """ + _service = None + _action = None + _version = None diff --git a/trains/backend_api/session/callresult.py b/trains/backend_api/session/callresult.py new file mode 100644 index 00000000..40bd5d1d --- /dev/null +++ b/trains/backend_api/session/callresult.py @@ -0,0 +1,131 @@ +import sys +import time + +from ...backend_api.utils import get_response_cls + +from .response import ResponseMeta, Response +from .errors import ResultNotReadyError, TimeoutExpiredError + + +class CallResult(object): + @property + def meta(self): + return self.__meta + + @property + def response(self): + return self.__response + + @property + def response_data(self): + return self.__response_data + + @property + def async_accepted(self): + return self.meta.result_code == 202 + + @property + def request_cls(self): + return self.__request_cls + + def __init__(self, meta, response=None, response_data=None, request_cls=None, session=None): + assert isinstance(meta, ResponseMeta) + if response and not isinstance(response, Response): + raise ValueError('response should be an instance of %s' % Response.__name__) + elif response_data and not isinstance(response_data, dict): + raise TypeError('data should be an instance of {}'.format(dict.__name__)) + + self.__meta = meta + self.__response = response + self.__request_cls = request_cls + self.__session = session + self.__async_result = None + + if response_data is not None: + self.__response_data = response_data + elif response is not None: + try: + self.__response_data = response.to_dict() + except AttributeError: + raise TypeError('response should be an instance of {}'.format(Response.__name__)) + else: + self.__response_data = None + + @classmethod + def from_result(cls, res, request_cls=None, logger=None, service=None, action=None, session=None): + """ From requests result """ + response_cls = get_response_cls(request_cls) + try: + data = res.json() + except ValueError: + service = service or (request_cls._service if request_cls else 'unknown') + action = action or (request_cls._action if request_cls else 'unknown') + return cls(request_cls=request_cls, meta=ResponseMeta.from_raw_data( + status_code=res.status_code, text=res.text, endpoint='%(service)s.%(action)s' % locals())) + if 'meta' not in data: + raise ValueError('Missing meta section in response payload') + try: + meta = ResponseMeta(**data['meta']) + # TODO: validate meta? + # meta.validate() + except Exception as ex: + raise ValueError('Failed parsing meta section in response payload (data=%s, error=%s)' % (data, ex)) + + response = None + response_data = None + try: + response_data = data.get('data', {}) + if response_cls: + response = response_cls(**response_data) + # TODO: validate response? + # response.validate() + except Exception as e: + if logger: + logger.warn('Failed parsing response: %s' % str(e)) + return cls(meta=meta, response=response, response_data=response_data, request_cls=request_cls, session=session) + + def ok(self): + return self.meta.result_code == 200 + + def ready(self): + if not self.async_accepted: + return True + session = self.__session + res = session.send_request(service='async', action='result', json=dict(id=self.meta.id), async_enable=False) + if res.status_code != session._async_status_code: + self.__async_result = CallResult.from_result(res=res, request_cls=self.request_cls, logger=session._logger) + return True + + def result(self): + if not self.async_accepted: + return self + if self.__async_result is None: + raise ResultNotReadyError(self._format_msg('Timeout expired'), call_id=self.meta.id) + return self.__async_result + + def wait(self, timeout=None, poll_interval=5, verbose=False): + if not self.async_accepted: + return self + session = self.__session + poll_interval = max(1, poll_interval) + remaining = max(0, timeout) if timeout else sys.maxsize + while remaining > 0: + if not self.ready(): + # Still pending, log and continue + if verbose and session._logger: + progress = ('waiting forever' + if timeout is False + else '%.1f/%.1f seconds remaining' % (remaining, float(timeout or 0))) + session._logger.info('Waiting for asynchronous call %s (%s)' + % (self.request_cls.__name__, progress)) + time.sleep(poll_interval) + remaining -= poll_interval + continue + # We've got something (good or bad, we don't know), create a call result and return + return self.result() + + # Timeout expired, return the asynchronous call's result (we've got nothing better to report) + raise TimeoutExpiredError(self._format_msg('Timeout expired'), call_id=self.meta.id) + + def _format_msg(self, msg): + return msg + ' for call %s (%s)' % (self.request_cls.__name__, self.meta.id) diff --git a/trains/backend_api/session/datamodel.py b/trains/backend_api/session/datamodel.py new file mode 100644 index 00000000..f859ea55 --- /dev/null +++ b/trains/backend_api/session/datamodel.py @@ -0,0 +1,145 @@ +import keyword + +import enum +import json +import warnings +from datetime import datetime + +import jsonschema +from enum import Enum + +import six + + +def format_date(obj): + if isinstance(obj, datetime): + return str(obj) + + +class SchemaProperty(property): + def __init__(self, name=None, *args, **kwargs): + super(SchemaProperty, self).__init__(*args, **kwargs) + self.name = name + + def setter(self, fset): + return type(self)(self.name, self.fget, fset, self.fdel, self.__doc__) + + +def schema_property(name): + def init(*args, **kwargs): + return SchemaProperty(name, *args, **kwargs) + return init + + +class DataModel(object): + """ Data Model""" + _schema = None + _data_props_list = None + + @classmethod + def _get_data_props(cls): + props = cls._data_props_list + if props is None: + props = {} + for c in cls.__mro__: + props.update({k: getattr(v, 'name', k) for k, v in vars(c).items() + if isinstance(v, property)}) + cls._data_props_list = props + return props.copy() + + @classmethod + def _to_base_type(cls, value): + if isinstance(value, DataModel): + return value.to_dict() + elif isinstance(value, enum.Enum): + return value.value + elif isinstance(value, list): + return [cls._to_base_type(model) for model in value] + return value + + def to_dict(self, only=None, except_=None): + prop_values = {v: getattr(self, k) for k, v in self._get_data_props().items()} + return { + k: self._to_base_type(v) + for k, v in prop_values.items() + if v is not None and (not only or k in only) and (not except_ or k not in except_) + } + + def validate(self, schema=None): + jsonschema.validate( + self.to_dict(), + schema or self._schema, + types=dict(array=(list, tuple), integer=six.integer_types), + ) + + def __repr__(self): + return '<{}.{}: {}>'.format( + self.__module__.split('.')[-1], + type(self).__name__, + json.dumps( + self.to_dict(), + indent=4, + default=format_date, + ) + ) + + @staticmethod + def assert_isinstance(value, field_name, expected, is_array=False): + if not is_array: + if not isinstance(value, expected): + raise TypeError("Expected %s of type %s, got %s" % (field_name, expected, type(value).__name__)) + return + + if not all(isinstance(x, expected) for x in value): + raise TypeError( + "Expected %s of type list[%s], got %s" % ( + field_name, + expected, + ", ".join(set(type(x).__name__ for x in value)), + ) + ) + + @staticmethod + def normalize_key(prop_key): + if keyword.iskeyword(prop_key): + prop_key += '_' + return prop_key.replace('.', '__') + + @classmethod + def from_dict(cls, dct, strict=False): + """ + Create an instance from a dictionary while ignoring unnecessary keys + """ + allowed_keys = cls._get_data_props().values() + invalid_keys = set(dct).difference(allowed_keys) + if strict and invalid_keys: + raise ValueError("Invalid keys %s" % tuple(invalid_keys)) + return cls(**{cls.normalize_key(key): value for key, value in dct.items() if key not in invalid_keys}) + + +class UnusedKwargsWarning(UserWarning): + pass + + +class NonStrictDataModelMixin(object): + """ + NonStrictDataModelMixin + + :summary: supplies an __init__ method that warns about unused keywords + """ + def __init__(self, **kwargs): + unexpected = [key for key in kwargs if not key.startswith('_')] + if unexpected: + message = '{}: unused keyword argument(s) {}' \ + .format(type(self).__name__, unexpected) + warnings.warn(message, UnusedKwargsWarning) + + +class NonStrictDataModel(DataModel, NonStrictDataModelMixin): + pass + + +class StringEnum(Enum): + + def __str__(self): + return self.value diff --git a/trains/backend_api/session/defs.py b/trains/backend_api/session/defs.py new file mode 100644 index 00000000..2b0f1be3 --- /dev/null +++ b/trains/backend_api/session/defs.py @@ -0,0 +1,7 @@ +from ...backend_config import EnvEntry + + +ENV_HOST = EnvEntry("TRAINS_API_HOST", "ALG_API_HOST") +ENV_ACCESS_KEY = EnvEntry("TRAINS_API_ACCESS_KEY", "ALG_API_ACCESS_KEY") +ENV_SECRET_KEY = EnvEntry("TRAINS_API_SECRET_KEY", "ALG_API_SECRET_KEY") +ENV_VERBOSE = EnvEntry("TRAINS_API_VERBOSE", "ALG_API_VERBOSE", type=bool, default=False) diff --git a/trains/backend_api/session/errors.py b/trains/backend_api/session/errors.py new file mode 100644 index 00000000..ab5c335c --- /dev/null +++ b/trains/backend_api/session/errors.py @@ -0,0 +1,17 @@ +class SessionError(Exception): + pass + + +class AsyncError(SessionError): + def __init__(self, msg, *args, **kwargs): + super(AsyncError, self).__init__(msg, *args) + for k, v in kwargs.items(): + setattr(self, k, v) + + +class TimeoutExpiredError(SessionError): + pass + + +class ResultNotReadyError(SessionError): + pass diff --git a/trains/backend_api/session/request.py b/trains/backend_api/session/request.py new file mode 100644 index 00000000..7640b5a9 --- /dev/null +++ b/trains/backend_api/session/request.py @@ -0,0 +1,76 @@ +import abc + +import jsonschema +import six + +from .apimodel import ApiModel +from .datamodel import DataModel + + +class Request(ApiModel): + _method = 'get' + + def __init__(self, **kwargs): + if kwargs: + raise ValueError('Unsupported keyword arguments: %s' % ', '.join(kwargs.keys())) + + +@six.add_metaclass(abc.ABCMeta) +class BatchRequest(Request): + + _batched_request_cls = abc.abstractproperty() + + _schema_errors = (jsonschema.SchemaError, jsonschema.ValidationError, jsonschema.FormatError, + jsonschema.RefResolutionError) + + def __init__(self, requests, validate_requests=False, allow_raw_requests=True, **kwargs): + super(BatchRequest, self).__init__(**kwargs) + self._validate_requests = validate_requests + self._allow_raw_requests = allow_raw_requests + self._property_requests = None + self.requests = requests + + @property + def requests(self): + return self._property_requests + + @requests.setter + def requests(self, value): + assert issubclass(self._batched_request_cls, Request) + assert isinstance(value, (list, tuple)) + if not self._allow_raw_requests: + if any(isinstance(x, dict) for x in value): + value = [self._batched_request_cls(**x) if isinstance(x, dict) else x for x in value] + assert all(isinstance(x, self._batched_request_cls) for x in value) + + self._property_requests = value + + def validate(self): + if not self._validate_requests or self._allow_raw_requests: + return + for i, req in enumerate(self.requests): + try: + req.validate() + except (jsonschema.SchemaError, jsonschema.ValidationError, + jsonschema.FormatError, jsonschema.RefResolutionError) as e: + raise Exception('Validation error in batch item #%d: %s' % (i, str(e))) + + def get_json(self): + return [r if isinstance(r, dict) else r.to_dict() for r in self.requests] + + +class CompoundRequest(Request): + _item_prop_name = 'item' + + def _get_item(self): + item = getattr(self, self._item_prop_name, None) + if item is None: + raise ValueError('Item property is empty or missing') + assert isinstance(item, DataModel) + return item + + def to_dict(self): + return self._get_item().to_dict() + + def validate(self): + return self._get_item().validate(self._schema) diff --git a/trains/backend_api/session/response.py b/trains/backend_api/session/response.py new file mode 100644 index 00000000..4e6d159c --- /dev/null +++ b/trains/backend_api/session/response.py @@ -0,0 +1,49 @@ +import requests + +import jsonmodels.models +import jsonmodels.fields +import jsonmodels.errors + +from .apimodel import ApiModel +from .datamodel import NonStrictDataModelMixin + + +class Response(ApiModel, NonStrictDataModelMixin): + pass + + +class _ResponseEndpoint(jsonmodels.models.Base): + name = jsonmodels.fields.StringField() + requested_version = jsonmodels.fields.FloatField() + actual_version = jsonmodels.fields.FloatField() + + +class ResponseMeta(jsonmodels.models.Base): + @property + def is_valid(self): + return self._is_valid + + @classmethod + def from_raw_data(cls, status_code, text, endpoint=None): + return cls(is_valid=False, result_code=status_code, result_subcode=0, result_msg=text, + endpoint=_ResponseEndpoint(name=(endpoint or 'unknown'))) + + def __init__(self, is_valid=True, **kwargs): + super(ResponseMeta, self).__init__(**kwargs) + self._is_valid = is_valid + + id = jsonmodels.fields.StringField(required=True) + trx = jsonmodels.fields.StringField(required=True) + endpoint = jsonmodels.fields.EmbeddedField([_ResponseEndpoint], required=True) + result_code = jsonmodels.fields.IntField(required=True) + result_subcode = jsonmodels.fields.IntField() + result_msg = jsonmodels.fields.StringField(required=True) + error_stack = jsonmodels.fields.StringField() + + def __str__(self): + if self.result_code == requests.codes.ok: + return "<%d: %s/v%.1f>" % (self.result_code, self.endpoint.name, self.endpoint.actual_version) + elif self._is_valid: + return "<%d/%d: %s/v%.1f (%s)>" % (self.result_code, self.result_subcode, self.endpoint.name, + self.endpoint.actual_version, self.result_msg) + return "<%d/%d: %s (%s)>" % (self.result_code, self.result_subcode, self.endpoint.name, self.result_msg) diff --git a/trains/backend_api/session/session.py b/trains/backend_api/session/session.py new file mode 100644 index 00000000..901e429e --- /dev/null +++ b/trains/backend_api/session/session.py @@ -0,0 +1,425 @@ +import json as json_lib +import sys +import types +from socket import gethostname + +import requests +import six +from pyhocon import ConfigTree +from requests.auth import HTTPBasicAuth + +from .callresult import CallResult +from .defs import ENV_VERBOSE, ENV_HOST, ENV_ACCESS_KEY, ENV_SECRET_KEY +from .request import Request, BatchRequest +from .token_manager import TokenManager +from ..config import load +from ..utils import get_http_session_with_retry +from ..version import __version__ + + +class LoginError(Exception): + pass + + +class Session(TokenManager): + """ TRAINS API Session class. """ + + _AUTHORIZATION_HEADER = "Authorization" + _WORKER_HEADER = "X-Trains-Worker" + _ASYNC_HEADER = "X-Trains-Async" + _CLIENT_HEADER = "X-Trains-Client" + + _async_status_code = 202 + _session_requests = 0 + _session_initial_timeout = (1.0, 10) + _session_timeout = (5.0, None) + + # TODO: add requests.codes.gateway_timeout once we support async commits + _retry_codes = [ + requests.codes.bad_gateway, + requests.codes.service_unavailable, + requests.codes.bandwidth_limit_exceeded, + requests.codes.too_many_requests, + ] + + @property + def access_key(self): + return self.__access_key + + @property + def secret_key(self): + return self.__secret_key + + @property + def host(self): + return self.__host + + @property + def worker(self): + return self.__worker + + def __init__( + self, + worker=None, + api_key=None, + secret_key=None, + host=None, + logger=None, + verbose=None, + initialize_logging=True, + client=None, + config=None, + **kwargs + ): + + if config is not None: + self.config = config + else: + self.config = load() + if initialize_logging: + self.config.initialize_logging() + + token_expiration_threshold_sec = self.config.get( + "auth.token_expiration_threshold_sec", 60 + ) + + super(Session, self).__init__( + token_expiration_threshold_sec=token_expiration_threshold_sec, **kwargs + ) + + self._verbose = verbose if verbose is not None else ENV_VERBOSE.get() + self._logger = logger + + self.__access_key = api_key or ENV_ACCESS_KEY.get( + default=(self.config.get("api.credentials.access_key", None) or + "EGRTCO8JMSIGI6S39GTP43NFWXDQOW") + ) + if not self.access_key: + raise ValueError( + "Missing access_key. Please set in configuration file or pass in session init." + ) + + self.__secret_key = secret_key or ENV_SECRET_KEY.get( + default=(self.config.get("api.credentials.secret_key", None) or + "x!XTov_G-#vspE*Y(h$Anm&DIc5Ou-F)jsl$PdOyj5wG1&E!Z8") + ) + if not self.secret_key: + raise ValueError( + "Missing secret_key. Please set in configuration file or pass in session init." + ) + + host = host or ENV_HOST.get(default=self.config.get("api.host")) + if not host: + raise ValueError("host is required in init or config") + + self.__host = host.strip("/") + http_retries_config = self.config.get( + "api.http.retries", ConfigTree() + ).as_plain_ordered_dict() + http_retries_config["status_forcelist"] = self._retry_codes + self.__http_session = get_http_session_with_retry(**http_retries_config) + + self.__worker = worker or gethostname() + + self.__max_req_size = self.config.get("api.http.max_req_size") + if not self.__max_req_size: + raise ValueError("missing max request size") + + self.client = client or "api-{}".format(__version__) + + self.refresh_token() + + def _send_request( + self, + service, + action, + version=None, + method="get", + headers=None, + auth=None, + data=None, + json=None, + refresh_token_if_unauthorized=True, + ): + """ Internal implementation for making a raw API request. + - Constructs the api endpoint name + - Injects the worker id into the headers + - Allows custom authorization using a requests auth object + - Intercepts `Unauthorized` responses and automatically attempts to refresh the session token once in this + case (only once). This is done since permissions are embedded in the token, and addresses a case where + server-side permissions have changed but are not reflected in the current token. Refreshing the token will + generate a token with the updated permissions. + """ + host = self.host + headers = headers.copy() if headers else {} + headers[self._WORKER_HEADER] = self.worker + headers[self._CLIENT_HEADER] = self.client + + token_refreshed_on_error = False + url = ( + "{host}/v{version}/{service}.{action}" + if version + else "{host}/{service}.{action}" + ).format(**locals()) + while True: + res = self.__http_session.request( + method, url, headers=headers, auth=auth, data=data, json=json, + timeout=self._session_initial_timeout if self._session_requests < 1 else self._session_timeout, + ) + if ( + refresh_token_if_unauthorized + and res.status_code == requests.codes.unauthorized + and not token_refreshed_on_error + ): + # it seems we're unauthorized, so we'll try to refresh our token once in case permissions changed since + # the last time we got the token, and try again + self.refresh_token() + token_refreshed_on_error = True + # try again + continue + if ( + res.status_code == requests.codes.service_unavailable + and self.config.get("api.http.wait_on_maintenance_forever", True) + ): + self._logger.warn( + "Service unavailable: {} is undergoing maintenance, retrying...".format( + host + ) + ) + continue + break + self._session_requests += 1 + return res + + def send_request( + self, + service, + action, + version=None, + method="get", + headers=None, + data=None, + json=None, + async_enable=False, + ): + """ + Send a raw API request. + :param service: service name + :param action: action name + :param version: version number (default is the preconfigured api version) + :param method: method type (default is 'get') + :param headers: request headers (authorization and content type headers will be automatically added) + :param json: json to send in the request body (jsonable object or builtin types construct. if used, + content type will be application/json) + :param data: Dictionary, bytes, or file-like object to send in the request body + :param async_enable: whether request is asynchronous + :return: requests Response instance + """ + headers = headers.copy() if headers else {} + headers[self._AUTHORIZATION_HEADER] = "Bearer {}".format(self.token) + if async_enable: + headers[self._ASYNC_HEADER] = "1" + return self._send_request( + service=service, + action=action, + version=version, + method=method, + headers=headers, + data=data, + json=json, + ) + + def send_request_batch( + self, + service, + action, + version=None, + headers=None, + data=None, + json=None, + method="get", + ): + """ + Send a raw batch API request. Batch requests always use application/json-lines content type. + :param service: service name + :param action: action name + :param version: version number (default is the preconfigured api version) + :param headers: request headers (authorization and content type headers will be automatically added) + :param json: iterable of json items (batched items, jsonable objects or builtin types constructs). These will + be sent as a multi-line payload in the request body. + :param data: iterable of bytes objects (batched items). These will be sent as a multi-line payload in the + request body. + :param method: HTTP method + :return: requests Response instance + """ + if not all( + isinstance(x, (list, tuple, type(None), types.GeneratorType)) + for x in (data, json) + ): + raise ValueError("Expecting list, tuple or generator in 'data' or 'json'") + + if not data and not json: + raise ValueError( + "Missing data (data or json), batch requests are meaningless without it." + ) + + headers = headers.copy() if headers else {} + headers["Content-Type"] = "application/json-lines" + + if data: + req_data = "\n".join(data) + else: + req_data = "\n".join(json_lib.dumps(x) for x in json) + + cur = 0 + results = [] + while True: + size = self.__max_req_size + slice = req_data[cur : cur + size] + if not slice: + break + if len(slice) < size: + # this is the remainder, no need to search for newline + pass + elif slice[-1] != "\n": + # search for the last newline in order to send a coherent request + size = slice.rfind("\n") + 1 + # readjust the slice + slice = req_data[cur : cur + size] + res = self.send_request( + method=method, + service=service, + action=action, + data=slice, + headers=headers, + version=version, + ) + results.append(res) + if res.status_code != requests.codes.ok: + break + cur += size + return results + + def validate_request(self, req_obj): + """ Validate an API request against the current version and the request's schema """ + + try: + # make sure we're using a compatible version for this request + # validate the request (checks required fields and specific field version restrictions) + validate = req_obj.validate + except AttributeError: + raise TypeError( + '"req_obj" parameter must be an backend_api.session.Request object' + ) + + validate() + + def send_async(self, req_obj): + """ + Asynchronously sends an API request using a request object. + :param req_obj: The request object + :type req_obj: Request + :return: CallResult object containing the raw response, response metadata and parsed response object. + """ + return self.send(req_obj=req_obj, async_enable=True) + + def send(self, req_obj, async_enable=False, headers=None): + """ + Sends an API request using a request object. + :param req_obj: The request object + :type req_obj: Request + :param async_enable: Request this method be executed in an asynchronous manner + :param headers: Additional headers to send with request + :return: CallResult object containing the raw response, response metadata and parsed response object. + """ + self.validate_request(req_obj) + + if isinstance(req_obj, BatchRequest): + # TODO: support async for batch requests as well + if async_enable: + raise NotImplementedError( + "Async behavior is currently not implemented for batch requests" + ) + + json_data = req_obj.get_json() + res = self.send_request_batch( + service=req_obj._service, + action=req_obj._action, + version=req_obj._version, + json=json_data, + method=req_obj._method, + headers=headers, + ) + # TODO: handle multiple results in this case + try: + res = next(r for r in res if r.status_code != 200) + except StopIteration: + # all are 200 + res = res[0] + else: + res = self.send_request( + service=req_obj._service, + action=req_obj._action, + version=req_obj._version, + json=req_obj.to_dict(), + method=req_obj._method, + async_enable=async_enable, + headers=headers, + ) + + call_result = CallResult.from_result( + res=res, + request_cls=req_obj.__class__, + logger=self._logger, + service=req_obj._service, + action=req_obj._action, + session=self, + ) + + return call_result + + def _do_refresh_token(self, old_token, exp=None): + """ TokenManager abstract method implementation. + Here we ignore the old token and simply obtain a new token. + """ + verbose = self._verbose and self._logger + if verbose: + self._logger.info( + "Refreshing token from {} (access_key={}, exp={})".format( + self.host, self.access_key, exp + ) + ) + + auth = HTTPBasicAuth(self.access_key, self.secret_key) + try: + data = {"expiration_sec": exp} if exp else {} + res = self._send_request( + service="auth", + action="login", + auth=auth, + json=data, + refresh_token_if_unauthorized=False, + ) + try: + resp = res.json() + except ValueError: + resp = {} + if res.status_code != 200: + msg = resp.get("meta", {}).get("result_msg", res.reason) + raise LoginError( + "Failed getting token (error {} from {}): {}".format( + res.status_code, self.host, msg + ) + ) + if verbose: + self._logger.info("Received new token") + return resp["data"]["token"] + except LoginError: + six.reraise(*sys.exc_info()) + except Exception as ex: + raise LoginError(str(ex)) + + def __str__(self): + return "{self.__class__.__name__}[{self.host}, {self.access_key}/{secret_key}]".format( + self=self, secret_key=self.secret_key[:5] + "*" * (len(self.secret_key) - 5) + ) diff --git a/trains/backend_api/session/token_manager.py b/trains/backend_api/session/token_manager.py new file mode 100644 index 00000000..7d42d82f --- /dev/null +++ b/trains/backend_api/session/token_manager.py @@ -0,0 +1,95 @@ +import sys +from abc import ABCMeta, abstractmethod +from time import time + +import jwt +import six + + +@six.add_metaclass(ABCMeta) +class TokenManager(object): + + @property + def token_expiration_threshold_sec(self): + return self.__token_expiration_threshold_sec + + @token_expiration_threshold_sec.setter + def token_expiration_threshold_sec(self, value): + self.__token_expiration_threshold_sec = value + + @property + def req_token_expiration_sec(self): + """ Token expiration sec requested when refreshing token """ + return self.__req_token_expiration_sec + + @req_token_expiration_sec.setter + def req_token_expiration_sec(self, value): + assert isinstance(value, (type(None), int)) + self.__req_token_expiration_sec = value + + @property + def token_expiration_sec(self): + return self.__token_expiration_sec + + @property + def token(self): + return self._get_token() + + @property + def raw_token(self): + return self.__token + + def __init__( + self, + token=None, + req_token_expiration_sec=None, + token_history=None, + token_expiration_threshold_sec=60, + **kwargs + ): + super(TokenManager, self).__init__() + assert isinstance(token_history, (type(None), dict)) + self.token_expiration_threshold_sec = token_expiration_threshold_sec + self.req_token_expiration_sec = req_token_expiration_sec + self._set_token(token) + + def _calc_token_valid_period_sec(self, token, exp=None, at_least_sec=None): + if token: + try: + exp = exp or self._get_token_exp(token) + if at_least_sec: + at_least_sec = max(at_least_sec, self.token_expiration_threshold_sec) + else: + at_least_sec = self.token_expiration_threshold_sec + return max(0, (exp - time() - at_least_sec)) + except Exception: + pass + return 0 + + @classmethod + def _get_token_exp(cls, token): + """ Get token expiration time. If not present, assume forever """ + return jwt.decode(token, verify=False).get('exp', sys.maxsize) + + def _set_token(self, token): + if token: + self.__token = token + self.__token_expiration_sec = self._get_token_exp(token) + else: + self.__token = None + self.__token_expiration_sec = 0 + + def get_token_valid_period_sec(self): + return self._calc_token_valid_period_sec(self.__token, self.token_expiration_sec) + + def _get_token(self): + if self.get_token_valid_period_sec() <= 0: + self.refresh_token() + return self.__token + + @abstractmethod + def _do_refresh_token(self, old_token, exp=None): + pass + + def refresh_token(self): + self._set_token(self._do_refresh_token(self.__token, exp=self.req_token_expiration_sec)) diff --git a/trains/backend_api/utils.py b/trains/backend_api/utils.py new file mode 100644 index 00000000..e2756798 --- /dev/null +++ b/trains/backend_api/utils.py @@ -0,0 +1,86 @@ +import ssl +import sys + +import requests +from requests.adapters import HTTPAdapter +## from requests.packages.urllib3.util.retry import Retry +from urllib3.util import Retry +from urllib3 import PoolManager +import six + +if six.PY3: + from functools import lru_cache +elif six.PY2: + # python 2 support + from backports.functools_lru_cache import lru_cache + + +@lru_cache() +def get_config(): + from ..backend_config import Config + config = Config(verbose=False) + config.reload() + return config + + +class TLSv1HTTPAdapter(HTTPAdapter): + def init_poolmanager(self, connections, maxsize, block=False, **pool_kwargs): + self.poolmanager = PoolManager(num_pools=connections, + maxsize=maxsize, + block=block, + ssl_version=ssl.PROTOCOL_TLSv1_2) + + +def get_http_session_with_retry( + total=0, + connect=None, + read=None, + redirect=None, + status=None, + status_forcelist=None, + backoff_factor=0, + backoff_max=None, + pool_connections=None, + pool_maxsize=None): + if not all(isinstance(x, (int, type(None))) for x in (total, connect, read, redirect, status)): + raise ValueError('Bad configuration. All retry count values must be null or int') + + if status_forcelist and not all(isinstance(x, int) for x in status_forcelist): + raise ValueError('Bad configuration. Retry status_forcelist must be null or list of ints') + + pool_maxsize = ( + pool_maxsize + if pool_maxsize is not None + else get_config().get('api.http.pool_maxsize', 512) + ) + + pool_connections = ( + pool_connections + if pool_connections is not None + else get_config().get('api.http.pool_connections', 512) + ) + + session = requests.Session() + + if backoff_max is not None: + Retry.BACKOFF_MAX = backoff_max + + retry = Retry( + total=total, connect=connect, read=read, redirect=redirect, status=status, + status_forcelist=status_forcelist, backoff_factor=backoff_factor) + + adapter = TLSv1HTTPAdapter(max_retries=retry, pool_connections=pool_connections, pool_maxsize=pool_maxsize) + session.mount('http://', adapter) + session.mount('https://', adapter) + return session + + +def get_response_cls(request_cls): + """ Extract a request's response class using the mapping found in the module defining the request's service """ + for req_cls in request_cls.mro(): + module = sys.modules[req_cls.__module__] + if hasattr(module, 'action_mapping'): + return module.action_mapping[(request_cls._action, request_cls._version)][1] + elif hasattr(module, 'response_mapping'): + return module.response_mapping[req_cls] + raise TypeError('no response class!') diff --git a/trains/backend_api/version.py b/trains/backend_api/version.py new file mode 100644 index 00000000..afced147 --- /dev/null +++ b/trains/backend_api/version.py @@ -0,0 +1 @@ +__version__ = '2.0.0' diff --git a/trains/backend_config/__init__.py b/trains/backend_config/__init__.py new file mode 100644 index 00000000..cdd0ce01 --- /dev/null +++ b/trains/backend_config/__init__.py @@ -0,0 +1,4 @@ +from .defs import Environment +from .config import Config, ConfigEntry +from .errors import ConfigurationError +from .environment import EnvEntry diff --git a/trains/backend_config/bucket_config.py b/trains/backend_config/bucket_config.py new file mode 100644 index 00000000..3a855772 --- /dev/null +++ b/trains/backend_config/bucket_config.py @@ -0,0 +1,291 @@ +import abc +import warnings +from operator import itemgetter + +import furl +import six +from attr import attrib, attrs + + +def _none_to_empty_string(maybe_string): + return maybe_string if maybe_string is not None else "" + + +def _url_stripper(bucket): + bucket = _none_to_empty_string(bucket) + bucket = bucket.strip("\"'").rstrip("/") + return bucket + + +@attrs +class S3BucketConfig(object): + bucket = attrib(type=str, converter=_url_stripper, default="") + host = attrib(type=str, converter=_none_to_empty_string, default="") + key = attrib(type=str, converter=_none_to_empty_string, default="") + secret = attrib(type=str, converter=_none_to_empty_string, default="") + multipart = attrib(type=bool, default=True) + acl = attrib(type=str, converter=_none_to_empty_string, default="") + secure = attrib(type=bool, default=True) + region = attrib(type=str, converter=_none_to_empty_string, default="") + + def update(self, key, secret, multipart=True, region=None): + self.key = key + self.secret = secret + self.multipart = multipart + self.region = region + + def is_valid(self): + return self.key and self.secret + + def get_bucket_host(self): + return self.bucket, self.host + + @classmethod + def from_list(cls, dict_list, log=None): + if not isinstance(dict_list, (tuple, list)) or not all( + isinstance(x, dict) for x in dict_list + ): + raise ValueError("Expecting a list of configurations dictionaries") + configs = [cls(**entry) for entry in dict_list] + valid_configs = [conf for conf in configs if conf.is_valid()] + if log and len(valid_configs) < len(configs): + log.warn( + "Invalid bucket configurations detected for {}".format( + ", ".join( + "/".join((config.host, config.bucket)) + for config in configs + if config not in valid_configs + ) + ) + ) + return configs + + +BucketConfig = S3BucketConfig + + +@six.add_metaclass(abc.ABCMeta) +class BaseBucketConfigurations(object): + def __init__(self, buckets=None, *_, **__): + self._buckets = buckets or [] + self._prefixes = None + + def _update_prefixes(self, refresh=True): + if self._prefixes and not refresh: + return + prefixes = ( + (config, self._get_prefix_from_bucket_config(config)) + for config in self._buckets + ) + self._prefixes = sorted(prefixes, key=itemgetter(1), reverse=True) + + @abc.abstractmethod + def _get_prefix_from_bucket_config(self, config): + pass + + +class S3BucketConfigurations(BaseBucketConfigurations): + def __init__( + self, buckets=None, default_key="", default_secret="", default_region="" + ): + super(S3BucketConfigurations, self).__init__() + self._buckets = buckets if buckets else list() + self._default_key = default_key + self._default_secret = default_secret + self._default_region = default_region + self._default_multipart = True + + @classmethod + def from_config(cls, s3_configuration): + config_list = S3BucketConfig.from_list( + s3_configuration.get("credentials", default=None) + ) + + default_key = s3_configuration.get("key", default="") + default_secret = s3_configuration.get("secret", default="") + default_region = s3_configuration.get("region", default="") + + default_key = _none_to_empty_string(default_key) + default_secret = _none_to_empty_string(default_secret) + default_region = _none_to_empty_string(default_region) + + return cls(config_list, default_key, default_secret, default_region) + + def add_config(self, bucket_config): + self._buckets.insert(0, bucket_config) + self._prefixes = None + + def remove_config(self, bucket_config): + self._buckets.remove(bucket_config) + self._prefixes = None + + def get_config_by_bucket(self, bucket, host=None): + try: + return next( + bucket_config + for bucket_config in self._buckets + if (bucket, host) == bucket_config.get_bucket_host() + ) + except StopIteration: + pass + + return None + + def update_config_with_defaults(self, bucket_config): + bucket_config.update( + key=self._default_key, + secret=self._default_secret, + region=bucket_config.region or self._default_region, + multipart=bucket_config.multipart or self._default_multipart, + ) + + def _get_prefix_from_bucket_config(self, config): + scheme = "s3" + prefix = furl.furl() + + if config.host: + prefix.set( + scheme=scheme, + netloc=config.host.lower(), + path=config.bucket.lower() if config.bucket else "", + ) + else: + prefix.set(scheme=scheme, path=config.bucket.lower()) + bucket = prefix.path.segments[0] + prefix.path.segments.pop(0) + prefix.set(netloc=bucket) + + return str(prefix) + + def get_config_by_uri(self, uri): + """ + Get the credentials for an AWS S3 bucket from the config + :param uri: URI of bucket, directory or file + :return: bucket config + :rtype: S3BucketConfig + """ + + def find_match(uri): + self._update_prefixes(refresh=False) + uri = uri.lower() + res = ( + config + for config, prefix in self._prefixes + if prefix is not None and uri.startswith(prefix) + ) + + try: + return next(res) + except StopIteration: + return None + + match = find_match(uri) + + if match: + return match + + parsed = furl.furl(uri) + + if parsed.port: + host = parsed.netloc + parts = parsed.path.segments + bucket = parts[0] if parts else None + else: + host = None + bucket = parsed.netloc + + return S3BucketConfig( + key=self._default_key, + secret=self._default_secret, + region=self._default_region, + multipart=True, + bucket=bucket, + host=host, + ) + + +BucketConfigurations = S3BucketConfigurations + + +@attrs +class GSBucketConfig(object): + bucket = attrib(type=str) + subdir = attrib(type=str, converter=_url_stripper, default="") + project = attrib(type=str, default=None) + credentials_json = attrib(type=str, default=None) + + def update(self, **kwargs): + for item in kwargs: + if not hasattr(self, item): + warnings.warn("Unexpected argument {} for update. Ignored".format(item)) + else: + setattr(self, item, kwargs[item]) + + +class GSBucketConfigurations(BaseBucketConfigurations): + def __init__(self, buckets=None, default_project=None, default_credentials=None): + super(GSBucketConfigurations, self).__init__(buckets) + self._default_project = default_project + self._default_credentials = default_credentials + + self._update_prefixes() + + @classmethod + def from_config(cls, gs_configuration): + if gs_configuration is None: + return cls() + + config_list = gs_configuration.get("credentials", default=list()) + buckets_configs = [GSBucketConfig(**entry) for entry in config_list] + + default_project = gs_configuration.get("project", default=None) + default_credentials = gs_configuration.get("credentials_json", default=None) + + return cls(buckets_configs, default_project, default_credentials) + + def add_config(self, bucket_config): + self._buckets.insert(0, bucket_config) + self._update_prefixes() + + def remove_config(self, bucket_config): + self._buckets.remove(bucket_config) + self._update_prefixes() + + def update_config_with_defaults(self, bucket_config): + bucket_config.update( + project=bucket_config.project or self._default_project, + credentials_json=bucket_config.credentials_json + or self._default_credentials, + ) + + def get_config_by_uri(self, uri): + """ + Get the credentials for a Google Storage bucket from the config + :param uri: URI of bucket, directory or file + :return: bucket config + :rtype: GSBucketConfig + """ + + res = ( + config + for config, prefix in self._prefixes + if prefix is not None and uri.lower().startswith(prefix) + ) + + try: + return next(res) + except StopIteration: + pass + + parsed = furl.furl(uri) + + return GSBucketConfig( + bucket=parsed.netloc, + subdir=str(parsed.path), + project=self._default_project, + credentials_json=self._default_credentials, + ) + + def _get_prefix_from_bucket_config(self, config): + prefix = furl.furl(scheme="gs", netloc=config.bucket, path=config.subdir) + return str(prefix) diff --git a/trains/backend_config/config.py b/trains/backend_config/config.py new file mode 100644 index 00000000..409caaa9 --- /dev/null +++ b/trains/backend_config/config.py @@ -0,0 +1,412 @@ +from __future__ import print_function + +import functools +import json +import os +import sys +import warnings +from fnmatch import fnmatch +from logging import Logger +from os.path import expanduser +from typing import Any, Text + +import pyhocon +import six +from pathlib2 import Path +from pyhocon import ConfigTree +from pyparsing import ( + ParseFatalException, + ParseException, + RecursiveGrammarException, + ParseSyntaxException, +) +from six.moves.urllib.parse import urlparse +from watchdog.observers import Observer + +from .bucket_config import S3BucketConfig +from .defs import ( + Environment, + DEFAULT_CONFIG_FOLDER, + LOCAL_CONFIG_PATHS, + ENV_CONFIG_PATHS, + LOCAL_CONFIG_FILES, + LOCAL_CONFIG_FILE_OVERRIDE_VAR, + ENV_CONFIG_PATH_OVERRIDE_VAR, +) +from .defs import is_config_file +from .entry import Entry, NotSet +from .errors import ConfigurationError +from .log import initialize as initialize_log, logger +from .reloader import ConfigReloader +from .utils import get_options + +log = logger(__file__) + + +class ConfigEntry(Entry): + logger = None + + def __init__(self, config, *keys, **kwargs): + # type: (Config, Text, Any) -> None + super(ConfigEntry, self).__init__(*keys, **kwargs) + self.config = config + + def _get(self, key): + # type: (Text) -> Any + return self.config.get(key, NotSet) + + def error(self, message): + # type: (Text) -> None + log.error(message.capitalize()) + + +class Config(object): + """ + Represents a server configuration. + If watch=True, will watch configuration folders for changes and reload itself. + NOTE: will not watch folders that were created after initialization. + """ + + # used in place of None in Config.get as default value because None is a valid value + _MISSING = object() + + def __init__( + self, + config_folder=None, + env=None, + verbose=True, + relative_to=None, + app=None, + watch=False, + is_server=False, + **_ + ): + self._app = app + self._verbose = verbose + self._folder_name = config_folder or DEFAULT_CONFIG_FOLDER + self._roots = [] + self._config = ConfigTree() + self._env = env or os.environ.get("TRAINS_ENV", Environment.default) + self.config_paths = set() + self.watch = watch + self.is_server = is_server + if watch: + self.observer = Observer() + self.observer.start() + self.handler = ConfigReloader(self) + + if self._verbose: + print("Config env:%s" % str(self._env)) + + if not self._env: + raise ValueError( + "Missing environment in either init of environment variable" + ) + if self._env not in get_options(Environment): + raise ValueError("Invalid environment %s" % env) + if relative_to is not None: + self.load_relative_to(relative_to) + + @property + def root(self): + return self.roots[0] if self.roots else None + + @property + def roots(self): + return self._roots + + @roots.setter + def roots(self, value): + self._roots = value + + @property + def env(self): + return self._env + + def logger(self, path=None): + return logger(path) + + def load_relative_to(self, *module_paths): + def normalize(p): + return Path(os.path.abspath(str(p))).with_name(self._folder_name) + + self.roots = list(map(normalize, module_paths)) + self.reload() + if self.watch: + for path in self.config_paths: + self.observer.schedule(self.handler, str(path), recursive=True) + + def _reload(self): + env = self._env + config = self._config.copy() + + if self.is_server: + env_config_paths = ENV_CONFIG_PATHS + else: + env_config_paths = [] + + env_config_path_override = os.environ.get(ENV_CONFIG_PATH_OVERRIDE_VAR) + if env_config_path_override: + env_config_paths = [expanduser(env_config_path_override)] + + # merge configuration from root and other environment config paths + config = functools.reduce( + lambda cfg, path: ConfigTree.merge_configs( + cfg, + self._read_recursive_for_env(path, env, verbose=self._verbose), + copy_trees=True, + ), + self.roots + env_config_paths, + config, + ) + + # merge configuration from local configuration paths + config = functools.reduce( + lambda cfg, path: ConfigTree.merge_configs( + cfg, self._read_recursive(path, verbose=self._verbose), copy_trees=True + ), + LOCAL_CONFIG_PATHS, + config, + ) + + local_config_files = LOCAL_CONFIG_FILES + local_config_override = os.environ.get(LOCAL_CONFIG_FILE_OVERRIDE_VAR) + if local_config_override: + local_config_files = [expanduser(local_config_override)] + + # merge configuration from local configuration files + config = functools.reduce( + lambda cfg, file_path: ConfigTree.merge_configs( + cfg, + self._read_single_file(file_path, verbose=self._verbose), + copy_trees=True, + ), + local_config_files, + config, + ) + + config["env"] = env + return config + + def replace(self, config): + self._config = config + + def reload(self): + self.replace(self._reload()) + + def initialize_logging(self): + logging_config = self._config.get("logging", None) + if not logging_config: + return False + + # handle incomplete file handlers + deleted = [] + handlers = logging_config.get("handlers", {}) + for name, handler in list(handlers.items()): + cls = handler.get("class", None) + is_file = cls and "FileHandler" in cls + if cls is None or (is_file and "filename" not in handler): + deleted.append(name) + del handlers[name] + elif is_file: + file = Path(handler.get("filename")) + if not file.is_file(): + file.parent.mkdir(parents=True, exist_ok=True) + file.touch() + + # remove dependency in deleted handlers + root_logger = logging_config.get("root", None) + loggers = list(logging_config.get("loggers", {}).values()) + ( + [root_logger] if root_logger else [] + ) + for logger in loggers: + handlers = logger.get("handlers", None) + if not handlers: + continue + logger["handlers"] = [h for h in handlers if h not in deleted] + + extra = None + if self._app: + extra = {"app": self._app} + initialize_log(logging_config, extra=extra) + return True + + def __getitem__(self, key): + return self._config[key] + + def get(self, key, default=_MISSING): + value = self._config.get(key, default) + if value is self._MISSING and not default: + raise KeyError( + "Unable to find value for key '{}' and default value was not provided.".format( + key + ) + ) + return value + + def to_dict(self): + return self._config.as_plain_ordered_dict() + + def as_json(self): + return json.dumps(self.to_dict(), indent=2) + + def _read_recursive_for_env(self, root_path_str, env, verbose=True): + root_path = Path(root_path_str) + if root_path.exists(): + default_config = self._read_recursive( + root_path / Environment.default, verbose=verbose + ) + env_config = self._read_recursive( + root_path / env, verbose=verbose + ) # None is ok, will return empty config + config = ConfigTree.merge_configs(default_config, env_config, True) + else: + config = ConfigTree() + + return config + + def _read_recursive(self, conf_root, verbose=True): + conf = ConfigTree() + if not conf_root: + return conf + conf_root = Path(conf_root) + + if not conf_root.exists(): + if verbose: + print("No config in %s" % str(conf_root)) + return conf + + if self.watch: + self.config_paths.add(conf_root) + + if verbose: + print("Loading config from %s" % str(conf_root)) + for root, dirs, files in os.walk(str(conf_root)): + + rel_dir = str(Path(root).relative_to(conf_root)) + if rel_dir == ".": + rel_dir = "" + prefix = rel_dir.replace("/", ".") + + for filename in files: + if not is_config_file(filename): + continue + + if prefix != "": + key = prefix + "." + Path(filename).stem + else: + key = Path(filename).stem + + file_path = str(Path(root) / filename) + + conf.put(key, self._read_single_file(file_path, verbose=verbose)) + + return conf + + @staticmethod + def _read_single_file(file_path, verbose=True): + if not file_path or not Path(file_path).is_file(): + return ConfigTree() + + if verbose: + print("Loading config from file %s" % file_path) + + try: + return pyhocon.ConfigFactory.parse_file(file_path) + except ParseSyntaxException as ex: + msg = "Failed parsing {0} ({1.__class__.__name__}): (at char {1.loc}, line:{1.lineno}, col:{1.column})".format( + file_path, ex + ) + six.reraise( + ConfigurationError, + ConfigurationError(msg, file_path=file_path), + sys.exc_info()[2], + ) + except (ParseException, ParseFatalException, RecursiveGrammarException) as ex: + msg = "Failed parsing {0} ({1.__class__.__name__}): {1}".format( + file_path, ex + ) + six.reraise(ConfigurationError, ConfigurationError(msg), sys.exc_info()[2]) + except Exception as ex: + print("Failed loading %s: %s" % (file_path, ex)) + raise + + def get_config_for_bucket(self, base_url, extra_configurations=None): + """ + Get the credentials for an AWS S3 bucket from the config + :param base_url: URL of bucket + :param extra_configurations: + :return: bucket config + :rtype: bucket config + """ + + warnings.warn( + "Use backend_config.bucket_config.BucketList.get_config_for_uri", + DeprecationWarning, + ) + configs = S3BucketConfig.from_list(self.get("sdk.aws.s3.credentials", [])) + if extra_configurations: + configs.extend(extra_configurations) + + def find_match(host=None, bucket=None): + if not host and not bucket: + raise ValueError("host or bucket required") + try: + if host: + res = { + config + for config in configs + if (config.host and fnmatch(host, config.host)) + and ( + not bucket + or not config.bucket + or fnmatch(bucket.lower(), config.bucket.lower()) + ) + } + else: + res = { + config + for config in configs + if config.bucket + and fnmatch(bucket.lower(), config.bucket.lower()) + } + return next(iter(res)) + except StopIteration: + pass + + parsed = urlparse(base_url) + parts = Path(parsed.path.strip("/")).parts + if parsed.netloc: + # We have a netloc (either an actual hostname or an AWS bucket name). + # First, we'll try with the netloc as host, but if we don't find anything, we'll try without a host and + # with the netloc as the bucket name + match = None + if parts: + # try host/bucket only if path parts contain any element + match = find_match(host=parsed.netloc, bucket=parts[0]) + if not match: + # no path parts or no config found for host/bucket, try netloc as bucket + match = find_match(bucket=parsed.netloc) + else: + # No netloc, so we'll simply search by bucket + match = find_match(bucket=parts[0]) + + if match: + return match + + non_aws_s3_host_suffix = ":9000" + if parsed.netloc.endswith(non_aws_s3_host_suffix): + host = parsed.netloc + bucket = parts[0] if parts else None + else: + host = None + bucket = parsed.netloc + + return S3BucketConfig( + key=self.get("sdk.aws.s3.key", None), + secret=self.get("sdk.aws.s3.secret", None), + region=self.get("sdk.aws.s3.region", None), + multipart=True, + bucket=bucket, + host=host, + ) diff --git a/trains/backend_config/converters.py b/trains/backend_config/converters.py new file mode 100644 index 00000000..96bc574b --- /dev/null +++ b/trains/backend_config/converters.py @@ -0,0 +1,46 @@ +import base64 +from distutils.util import strtobool +from typing import Union, Optional, Text, Any, TypeVar, Callable, Tuple + +import six + +ConverterType = TypeVar("ConverterType", bound=Callable[[Any], Any]) + + +def base64_to_text(value): + # type: (Any) -> Text + return base64.b64decode(value).decode("utf-8") + + +def text_to_bool(value): + # type: (Text) -> bool + return bool(strtobool(value)) + + +def any_to_bool(value): + # type: (Optional[Union[int, float, Text]]) -> bool + if isinstance(value, six.text_type): + return text_to_bool(value) + return bool(value) + + +def or_(*converters, **kwargs): + # type: (ConverterType, Tuple[Exception, ...]) -> ConverterType + """ + Wrapper that implements an "optional converter" pattern. Allows specifying a converter + for which a set of exceptions is ignored (and the original value is returned) + :param converter: A converter callable + :param exceptions: A tuple of exception types to ignore + """ + # noinspection PyUnresolvedReferences + exceptions = kwargs.get("exceptions", (ValueError, TypeError)) + + def wrapper(value): + for converter in converters: + try: + return converter(value) + except exceptions: + pass + return value + + return wrapper diff --git a/trains/backend_config/defs.py b/trains/backend_config/defs.py new file mode 100644 index 00000000..c31ffc60 --- /dev/null +++ b/trains/backend_config/defs.py @@ -0,0 +1,53 @@ +from os.path import expanduser +from pathlib2 import Path + +ENV_VAR = 'TRAINS_ENV' +""" Name of system environment variable that can be used to specify the config environment name """ + + +DEFAULT_CONFIG_FOLDER = 'config' +""" Default config folder to search for when loading relative to a given path """ + + +ENV_CONFIG_PATHS = [ +] + + +""" Environment-related config paths """ + + +LOCAL_CONFIG_PATHS = [ + '/etc/opt/trains', # used by servers for docker-generated configuration + expanduser('~/.trains/config'), +] +""" Local config paths, not related to environment """ + + +LOCAL_CONFIG_FILES = [ + expanduser('~/trains.conf'), # used for workstation configuration (end-users, workers) +] +""" Local config files (not paths) """ + + +LOCAL_CONFIG_FILE_OVERRIDE_VAR = 'TRAINS_CONFIG_FILE' +""" Local config file override environment variable. If this is set, no other local config files will be used. """ + + +ENV_CONFIG_PATH_OVERRIDE_VAR = 'TRAINS_CONFIG_PATH' +""" +Environment-related config path override environment variable. If this is set, no other env config path will be used. +""" + + +class Environment(object): + """ Supported environment names """ + default = 'default' + demo = 'demo' + local = 'local' + + +CONFIG_FILE_EXTENSION = '.conf' + + +def is_config_file(path): + return Path(path).suffix == CONFIG_FILE_EXTENSION diff --git a/trains/backend_config/entry.py b/trains/backend_config/entry.py new file mode 100644 index 00000000..2b175af4 --- /dev/null +++ b/trains/backend_config/entry.py @@ -0,0 +1,96 @@ +import abc +from typing import Optional, Any, Tuple, Text, Callable, Dict + +import six + +from .converters import any_to_bool + +NotSet = object() + +Converter = Callable[[Any], Any] + + +@six.add_metaclass(abc.ABCMeta) +class Entry(object): + """ + Configuration entry definition + """ + + @classmethod + def default_conversions(cls): + # type: () -> Dict[Any, Converter] + return { + bool: any_to_bool, + six.text_type: lambda s: six.text_type(s).strip(), + } + + def __init__(self, key, *more_keys, **kwargs): + # type: (Text, Text, Any) -> None + """ + :param key: Entry's key (at least one). + :param more_keys: More alternate keys for this entry. + :param type: Value type. If provided, will be used choosing a default conversion or + (if none exists) for casting the environment value. + :param converter: Value converter. If provided, will be used to convert the environment value. + :param default: Default value. If provided, will be used as the default value on calls to get() and get_pair() + in case no value is found for any key and no specific default value was provided in the call. + Default value is None. + :param help: Help text describing this entry + """ + self.keys = (key,) + more_keys + self.type = kwargs.pop("type", six.text_type) + self.converter = kwargs.pop("converter", None) + self.default = kwargs.pop("default", None) + self.help = kwargs.pop("help", None) + + def __str__(self): + return str(self.key) + + @property + def key(self): + return self.keys[0] + + def convert(self, value, converter=None): + # type: (Any, Converter) -> Optional[Any] + converter = converter or self.converter + if not converter: + converter = self.default_conversions().get(self.type, self.type) + return converter(value) + + def get_pair(self, default=NotSet, converter=None): + # type: (Any, Converter) -> Optional[Tuple[Text, Any]] + for key in self.keys: + value = self._get(key) + if value is NotSet: + continue + try: + value = self.convert(value, converter) + except Exception as ex: + self.error("invalid value {key}={value}: {ex}".format(**locals())) + break + return key, value + result = self.default if default is NotSet else default + return self.key, result + + def get(self, default=NotSet, converter=None): + # type: (Any, Converter) -> Optional[Any] + return self.get_pair(default=default, converter=converter)[1] + + def set(self, value): + # type: (Any, Any) -> (Text, Any) + key, _ = self.get_pair(default=None, converter=None) + self._set(key, str(value)) + + def _set(self, key, value): + # type: (Text, Text) -> None + pass + + @abc.abstractmethod + def _get(self, key): + # type: (Text) -> Any + pass + + @abc.abstractmethod + def error(self, message): + # type: (Text) -> None + pass diff --git a/trains/backend_config/environment.py b/trains/backend_config/environment.py new file mode 100644 index 00000000..30ca80ba --- /dev/null +++ b/trains/backend_config/environment.py @@ -0,0 +1,25 @@ +from os import getenv, environ + +from .converters import text_to_bool +from .entry import Entry, NotSet + + +class EnvEntry(Entry): + @classmethod + def default_conversions(cls): + conversions = super(EnvEntry, cls).default_conversions().copy() + conversions[bool] = text_to_bool + return conversions + + def _get(self, key): + value = getenv(key, "").strip() + return value or NotSet + + def _set(self, key, value): + environ[key] = value + + def __str__(self): + return "env:{}".format(super(EnvEntry, self).__str__()) + + def error(self, message): + print("Environment configuration: {}".format(message)) diff --git a/trains/backend_config/errors.py b/trains/backend_config/errors.py new file mode 100644 index 00000000..85acc6c1 --- /dev/null +++ b/trains/backend_config/errors.py @@ -0,0 +1,5 @@ +class ConfigurationError(Exception): + + def __init__(self, msg, file_path=None, *args): + super(ConfigurationError, self).__init__(msg, *args) + self.file_path = file_path diff --git a/trains/backend_config/log.py b/trains/backend_config/log.py new file mode 100644 index 00000000..97b373c9 --- /dev/null +++ b/trains/backend_config/log.py @@ -0,0 +1,30 @@ +import logging.config + +from pathlib2 import Path + + +def logger(path=None): + name = "trains" + if path: + p = Path(path) + module = (p.parent if p.stem.startswith('_') else p).stem + name = "trains.%s" % module + return logging.getLogger(name) + + +def initialize(logging_config=None, extra=None): + if extra is not None: + from logging import Logger + + class _Logger(Logger): + __extra = extra.copy() + + def _log(self, level, msg, args, exc_info=None, extra=None, **kwargs): + extra = extra or {} + extra.update(self.__extra) + super(_Logger, self)._log(level, msg, args, exc_info=exc_info, extra=extra, **kwargs) + + Logger.manager.loggerClass = _Logger + + if logging_config is not None: + logging.config.dictConfig(dict(logging_config)) diff --git a/trains/backend_config/reloader.py b/trains/backend_config/reloader.py new file mode 100644 index 00000000..0e2a621d --- /dev/null +++ b/trains/backend_config/reloader.py @@ -0,0 +1,32 @@ +import logging + +from watchdog.events import FileSystemEventHandler, FileCreatedEvent, FileDeletedEvent, FileModifiedEvent, \ + FileMovedEvent + +from .defs import is_config_file +from .log import logger + + +log = logger(__file__) +log.setLevel(logging.DEBUG) + + +class ConfigReloader(FileSystemEventHandler): + + def __init__(self, config): + self.config = config + + def reload(self): + try: + self.config.reload() + except Exception as ex: + log.warning('failed loading configuration: %s: %s', type(ex), ex) + + def on_any_event(self, event): + if not ( + is_config_file(event.src_path) and + isinstance(event, (FileCreatedEvent, FileDeletedEvent, FileModifiedEvent, FileMovedEvent)) + ): + return + log.debug('reloading configuration - triggered by %s', event) + self.reload() diff --git a/trains/backend_config/utils.py b/trains/backend_config/utils.py new file mode 100644 index 00000000..f5a29230 --- /dev/null +++ b/trains/backend_config/utils.py @@ -0,0 +1,9 @@ + +def get_items(cls): + """ get key/value items from an enum-like class (members represent enumeration key/value) """ + return {k: v for k, v in vars(cls).items() if not k.startswith('_')} + + +def get_options(cls): + """ get options from an enum-like class (members represent enumeration key/value) """ + return get_items(cls).values() diff --git a/trains/backend_interface/__init__.py b/trains/backend_interface/__init__.py new file mode 100644 index 00000000..216ed683 --- /dev/null +++ b/trains/backend_interface/__init__.py @@ -0,0 +1,2 @@ +""" High-level abstractions for backend API """ +from .task import Task, TaskStatusEnum, TaskEntry diff --git a/trains/backend_interface/base.py b/trains/backend_interface/base.py new file mode 100644 index 00000000..1a16f9d3 --- /dev/null +++ b/trains/backend_interface/base.py @@ -0,0 +1,147 @@ +import abc + +import requests.exceptions +import six +from ..backend_api import Session +from ..backend_api.session import BatchRequest + +from ..config import config_obj +from ..config.defs import LOG_LEVEL_ENV_VAR, API_ACCESS_KEY, API_SECRET_KEY +from ..debugging import get_logger +from ..backend_api.version import __version__ +from .session import SendError, SessionInterface + + +class InterfaceBase(SessionInterface): + """ Base class for a backend manager class """ + _default_session = None + + @property + def session(self): + return self._session + + @property + def log(self): + return self._log + + def __init__(self, session=None, log=None, **kwargs): + super(InterfaceBase, self).__init__() + self._session = session or self._get_default_session() + self._log = log or self._create_log() + + def _create_log(self): + log = get_logger(str(self.__class__.__name__)) + try: + log.setLevel(LOG_LEVEL_ENV_VAR.get(default=log.level)) + except TypeError as ex: + raise ValueError('Invalid log level defined in environment variable `%s`: %s' % (LOG_LEVEL_ENV_VAR, ex)) + return log + + @classmethod + def _send(cls, session, req, ignore_errors=False, raise_on_errors=True, log=None, async_enable=False): + """ Convenience send() method providing a standardized error reporting """ + while True: + try: + res = session.send(req, async_enable=async_enable) + if res.meta.result_code in (200, 202) or ignore_errors: + return res + + if isinstance(req, BatchRequest): + error_msg = 'Action failed %s' % res.meta + else: + error_msg = 'Action failed %s (%s)' \ + % (res.meta, ', '.join('%s=%s' % p for p in req.to_dict().items())) + if log: + log.error(error_msg) + + if res.meta.result_code <= 500: + # Proper backend error/bad status code - raise or return + if raise_on_errors: + raise SendError(res, error_msg) + return res + + except requests.exceptions.BaseHTTPError as e: + log.error('failed sending %s: %s' % (str(req), str(e))) + + # Infrastructure error + if log: + log.info('retrying request %s' % str(req)) + + def send(self, req, ignore_errors=False, raise_on_errors=True, async_enable=False): + return self._send(session=self.session, req=req, ignore_errors=ignore_errors, raise_on_errors=raise_on_errors, + log=self.log, async_enable=async_enable) + + @classmethod + def _get_default_session(cls): + if not InterfaceBase._default_session: + InterfaceBase._default_session = Session( + initialize_logging=False, + client='sdk-%s' % __version__, + config=config_obj, + api_key=API_ACCESS_KEY.get(), + secret_key=API_SECRET_KEY.get(), + ) + return InterfaceBase._default_session + + @classmethod + def _set_default_session(cls, session): + """ + Set a new default session to the system + + Warning: Use only for debug and testing + :param session: The new default session + """ + + InterfaceBase._default_session = session + + @property + def default_session(self): + if hasattr(self, '_session'): + return self._session + return self._get_default_session() + + +@six.add_metaclass(abc.ABCMeta) +class IdObjectBase(InterfaceBase): + + def __init__(self, id, session=None, log=None, **kwargs): + super(IdObjectBase, self).__init__(session, log, **kwargs) + self._data = None + self._id = None + self.id = self.normalize_id(id) + + @property + def id(self): + return self._id + + @id.setter + def id(self, value): + should_reload = value is not None and value != self._id + self._id = value + if should_reload: + self.reload() + + @property + def data(self): + if self._data is None: + self.reload() + return self._data + + @abc.abstractmethod + def _reload(self): + pass + + def reload(self): + if not self.id: + raise ValueError('Failed reloading %s: missing id' % type(self).__name__) + self._data = self._reload() + + @classmethod + def normalize_id(cls, id): + return id.strip() if id else None + + @classmethod + def resolve_id(cls, obj): + if isinstance(obj, cls): + return obj.id + return obj diff --git a/trains/backend_interface/metrics/__init__.py b/trains/backend_interface/metrics/__init__.py new file mode 100644 index 00000000..2f0fba05 --- /dev/null +++ b/trains/backend_interface/metrics/__init__.py @@ -0,0 +1,4 @@ +""" Metrics management and batching support """ +from .interface import Metrics +from .reporter import Reporter +from .events import ScalarEvent, VectorEvent, PlotEvent, ImageEvent diff --git a/trains/backend_interface/metrics/events.py b/trains/backend_interface/metrics/events.py new file mode 100644 index 00000000..896540f8 --- /dev/null +++ b/trains/backend_interface/metrics/events.py @@ -0,0 +1,258 @@ +import abc +import time +from threading import Lock + +import attr +import cv2 +import numpy as np +import pathlib2 +import six +from ...backend_api.services import events +from six.moves.urllib.parse import urlparse, urlunparse + +from ...config import config + + +@six.add_metaclass(abc.ABCMeta) +class MetricsEventAdapter(object): + """ + Adapter providing all the base attributes required by a metrics event and defining an interface used by the + metrics manager when batching and writing events. + """ + + _default_nan_value = 0. + """ Default value used when a np.nan value is encountered """ + + @attr.attrs(cmp=False, slots=True) + class FileEntry(object): + """ File entry used to report on file data that needs to be uploaded prior to sending the event """ + + event = attr.attrib() + + name = attr.attrib() + """ File name """ + + stream = attr.attrib() + """ File-like object containing the file's data """ + + url_prop = attr.attrib() + """ Property name that should be updated with the uploaded url """ + + key_prop = attr.attrib() + + upload_uri = attr.attrib() + + url = attr.attrib(default=None) + + exception = attr.attrib(default=None) + + def set_exception(self, exp): + self.exception = exp + self.event.upload_exception = exp + + @property + def metric(self): + return self._metric + + @metric.setter + def metric(self, value): + self._metric = value + + @property + def variant(self): + return self._variant + + def __init__(self, metric, variant, iter=None, timestamp=None, task=None, gen_timestamp_if_none=True): + if not timestamp and gen_timestamp_if_none: + timestamp = int(time.time() * 1000) + self._metric = metric + self._variant = variant + self._iter = iter + self._timestamp = timestamp + self._task = task + + # Try creating an event just to trigger validation + _ = self.get_api_event() + self.upload_exception = None + + @abc.abstractmethod + def get_api_event(self): + """ Get an API event instance """ + pass + + def get_file_entry(self): + """ Get information for a file that should be uploaded before this event is sent """ + pass + + def update(self, task=None, **kwargs): + """ Update event properties """ + if task: + self._task = task + + def _get_base_dict(self): + """ Get a dict with the base attributes """ + res = dict( + task=self._task, + timestamp=self._timestamp, + metric=self._metric, + variant=self._variant + ) + if self._iter is not None: + res.update(iter=self._iter) + return res + + @classmethod + def _convert_np_nan(cls, val): + if np.isnan(val) or np.isinf(val): + return cls._default_nan_value + return val + + +class ScalarEvent(MetricsEventAdapter): + """ Scalar event adapter """ + + def __init__(self, metric, variant, value, iter, **kwargs): + self._value = self._convert_np_nan(value) + super(ScalarEvent, self).__init__(metric=metric, variant=variant, iter=iter, **kwargs) + + def get_api_event(self): + return events.MetricsScalarEvent( + value=self._value, + **self._get_base_dict()) + + +class VectorEvent(MetricsEventAdapter): + """ Vector event adapter """ + + def __init__(self, metric, variant, values, iter, **kwargs): + self._values = [self._convert_np_nan(v) for v in values] + super(VectorEvent, self).__init__(metric=metric, variant=variant, iter=iter, **kwargs) + + def get_api_event(self): + return events.MetricsVectorEvent( + values=self._values, + **self._get_base_dict()) + + +class PlotEvent(MetricsEventAdapter): + """ Plot event adapter """ + + def __init__(self, metric, variant, plot_str, iter=None, **kwargs): + self._plot_str = plot_str + super(PlotEvent, self).__init__(metric=metric, variant=variant, iter=iter, **kwargs) + + def get_api_event(self): + return events.MetricsPlotEvent( + plot_str=self._plot_str, + **self._get_base_dict()) + + +class ImageEventNoUpload(MetricsEventAdapter): + + def __init__(self, metric, variant, src, iter=0, **kwargs): + parts = urlparse(src) + self._url = urlunparse((parts.scheme, parts.netloc, '', '', '', '')) + self._key = urlunparse(('', '', parts.path, parts.params, parts.query, parts.fragment)) + super(ImageEventNoUpload, self).__init__(metric, variant, iter=iter, **kwargs) + + def get_api_event(self): + return events.MetricsImageEvent( + url=self._url, + key=self._key, + **self._get_base_dict()) + + +class ImageEvent(MetricsEventAdapter): + """ Image event adapter """ + _format = '.' + str(config.get('metrics.images.format', 'JPEG')).upper().lstrip('.') + _quality = int(config.get('metrics.images.quality', 87)) + _subsampling = int(config.get('metrics.images.subsampling', 0)) + + _metric_counters = {} + _metric_counters_lock = Lock() + _image_file_history_size = int(config.get('metrics.file_history_size', 5)) + + def __init__(self, metric, variant, image_data, iter=0, upload_uri=None, + image_file_history_size=None, **kwargs): + if not hasattr(image_data, 'shape'): + raise ValueError('Image must have a shape attribute') + self._image_data = image_data + self._url = None + self._key = None + self._count = self._get_metric_count(metric, variant) + if not image_file_history_size: + image_file_history_size = self._image_file_history_size + if image_file_history_size < 1: + self._filename = '%s_%s_%08d' % (metric, variant, self._count) + else: + self._filename = '%s_%s_%08d' % (metric, variant, self._count % image_file_history_size) + self._upload_uri = upload_uri + super(ImageEvent, self).__init__(metric, variant, iter=iter, **kwargs) + + @classmethod + def _get_metric_count(cls, metric, variant, next=True): + """ Returns the next count number for the given metric/variant (rotates every few calls) """ + counters = cls._metric_counters + key = '%s_%s' % (metric, variant) + try: + cls._metric_counters_lock.acquire() + value = counters.get(key, -1) + if next: + value = counters[key] = value + 1 + return value + finally: + cls._metric_counters_lock.release() + + def get_api_event(self): + return events.MetricsImageEvent( + url=self._url, + key=self._key, + **self._get_base_dict()) + + def update(self, url=None, key=None, **kwargs): + super(ImageEvent, self).update(**kwargs) + if url is not None: + self._url = url + if key is not None: + self._key = key + + def get_file_entry(self): + # don't provide file in case this event is out of the history window + last_count = self._get_metric_count(self.metric, self.variant, next=False) + if abs(self._count - last_count) > self._image_file_history_size: + output = None + else: + image_data = self._image_data + if not isinstance(image_data, np.ndarray): + # try conversion, if it fails we'll leave it to the user. + image_data = np.ndarray(image_data, dtype=np.uint8) + image_data = np.atleast_3d(image_data) + if image_data.dtype != np.uint8: + if np.issubdtype(image_data.dtype, np.floating) and image_data.max() <= 1.0: + image_data = (image_data*255).astype(np.uint8) + else: + image_data = image_data.astype(np.uint8) + shape = image_data.shape + height, width, channel = shape[:3] + if channel == 1: + image_data = np.reshape(image_data, (height, width)) + + # serialize image + _, img_bytes = cv2.imencode( + self._format, image_data, + params=(cv2.IMWRITE_JPEG_QUALITY, self._quality), + ) + + output = six.BytesIO(img_bytes.tostring()) + output.seek(0) + + filename = str(pathlib2.Path(self._filename).with_suffix(self._format.lower())) + + return self.FileEntry( + event=self, + name=filename, + stream=output, + url_prop='url', + key_prop='key', + upload_uri=self._upload_uri + ) diff --git a/trains/backend_interface/metrics/interface.py b/trains/backend_interface/metrics/interface.py new file mode 100644 index 00000000..13a9b91b --- /dev/null +++ b/trains/backend_interface/metrics/interface.py @@ -0,0 +1,192 @@ +from functools import partial +from multiprocessing.pool import ThreadPool +from threading import Lock +from time import time + +from humanfriendly import format_timespan +from ...backend_api.services import events as api_events +from ..base import InterfaceBase +from ...config import config +from ...debugging import get_logger +from ...storage import StorageHelper + +from .events import MetricsEventAdapter + + +upload_pool = ThreadPool(processes=1) +file_upload_pool = ThreadPool(processes=config.get('network.metrics.file_upload_threads', 4)) + +log = get_logger('metrics') + + +class Metrics(InterfaceBase): + """ Metrics manager and batch writer """ + _storage_lock = Lock() + _file_upload_starvation_warning_sec = config.get('network.metrics.file_upload_starvation_warning_sec', None) + + @property + def storage_key_prefix(self): + return self._storage_key_prefix + + def _get_storage(self, storage_uri=None): + """ Storage helper used to upload files """ + try: + # use a lock since this storage object will be requested by thread pool threads, so we need to make sure + # any singleton initialization will occur only once + self._storage_lock.acquire() + storage_uri = storage_uri or self._storage_uri + return StorageHelper.get(storage_uri) + except Exception as e: + log.error('Failed getting storage helper for %s: %s' % (storage_uri, str(e))) + finally: + self._storage_lock.release() + + def __init__(self, session, task_id, storage_uri, storage_uri_suffix='metrics', log=None): + super(Metrics, self).__init__(session, log=log) + self._task_id = task_id + self._storage_uri = storage_uri.rstrip('/') if storage_uri else None + self._storage_key_prefix = storage_uri_suffix.strip('/') if storage_uri_suffix else None + self._file_related_event_time = None + self._file_upload_time = None + + def write_events(self, events, async_enable=True, callback=None, **kwargs): + """ + Write events to the backend, uploading any required files. + :param events: A list of event objects + :param async_enable: If True, upload is performed asynchronously and an AsyncResult object is returned, otherwise a + blocking call is made and the upload result is returned. + :param callback: A optional callback called when upload was completed in case async is True + :return: .backend_api.session.CallResult if async is False otherwise AsyncResult. Note that if no events were + sent, None will be returned. + """ + if not events: + return + + storage_uri = kwargs.pop('storage_uri', self._storage_uri) + + if not async_enable: + return self._do_write_events(events, storage_uri) + + def safe_call(*args, **kwargs): + try: + return self._do_write_events(*args, **kwargs) + except Exception as e: + return e + + return upload_pool.apply_async( + safe_call, + args=(events, storage_uri), + callback=partial(self._callback_wrapper, callback)) + + def _callback_wrapper(self, callback, res): + """ A wrapper for the async callback for handling common errors """ + if not res: + # no result yet + return + elif isinstance(res, Exception): + # error + self.log.error('Error trying to send metrics: %s' % str(res)) + elif not res.ok(): + # bad result, log error + self.log.error('Failed reporting metrics: %s' % str(res.meta)) + # call callback, even if we received an error + if callback: + callback(res) + + def _do_write_events(self, events, storage_uri=None): + """ Sends an iterable of events as a series of batch operations. note: metric send does not raise on error""" + assert isinstance(events, (list, tuple)) + assert all(isinstance(x, MetricsEventAdapter) for x in events) + + # def event_key(ev): + # return (ev.metric, ev.variant) + # + # events = sorted(events, key=event_key) + # multiple_events_for = [k for k, v in groupby(events, key=event_key) if len(list(v)) > 1] + # if multiple_events_for: + # log.warning( + # 'More than one metrics event sent for these metric/variant combinations in a report: %s' % + # ', '.join('%s/%s' % k for k in multiple_events_for)) + + storage_uri = storage_uri or self._storage_uri + + now = time() + + def update_and_get_file_entry(ev): + entry = ev.get_file_entry() + kwargs = {} + if entry: + e_storage_uri = entry.upload_uri or storage_uri + self._file_related_event_time = now + # if we have an entry (with or without a stream), we'll generate the URL and store it in the event + filename = entry.name + key = '/'.join(x for x in (self._storage_key_prefix, ev.metric, ev.variant, filename.strip('/')) if x) + url = '/'.join(x.strip('/') for x in (e_storage_uri, key)) + kwargs[entry.key_prop] = key + kwargs[entry.url_prop] = url + if not entry.stream: + # if entry has no stream, we won't upload it + entry = None + else: + if not hasattr(entry.stream, 'read'): + raise ValueError('Invalid file object %s' % entry.stream) + entry.url = url + ev.update(task=self._task_id, **kwargs) + return entry + + # prepare event needing file upload + entries = [] + for ev in events: + try: + e = update_and_get_file_entry(ev) + if e: + entries.append(e) + except Exception as ex: + log.warning(str(ex)) + + # upload the needed files + if entries: + # upload files + def upload(e): + upload_uri = e.upload_uri or storage_uri + + try: + storage = self._get_storage(upload_uri) + url = storage.upload_from_stream(e.stream, e.url) + e.event.update(url=url) + except Exception as exp: + log.debug("Failed uploading to {} ({})".format( + upload_uri if upload_uri else "(Could not calculate upload uri)", + exp, + )) + + e.set_exception(exp) + + res = file_upload_pool.map_async(upload, entries) + res.wait() + + # remember the last time we uploaded a file + self._file_upload_time = time() + + t_f, t_u, t_ref = \ + (self._file_related_event_time, self._file_upload_time, self._file_upload_starvation_warning_sec) + if t_f and t_u and t_ref and (t_f - t_u) > t_ref: + log.warning('Possible metrics file upload starvation: files were not uploaded for %s' % + format_timespan(t_ref)) + + # send the events in a batched request + good_events = [ev for ev in events if ev.upload_exception is None] + error_events = [ev for ev in events if ev.upload_exception is not None] + + if error_events: + log.error("Not uploading {}/{} events because the data upload failed".format( + len(error_events), + len(events), + )) + + if good_events: + batched_requests = [api_events.AddRequest(event=ev.get_api_event()) for ev in good_events] + req = api_events.AddBatchRequest(requests=batched_requests) + return self.send(req, raise_on_errors=False) + + return None diff --git a/trains/backend_interface/metrics/reporter.py b/trains/backend_interface/metrics/reporter.py new file mode 100644 index 00000000..a5a6ade8 --- /dev/null +++ b/trains/backend_interface/metrics/reporter.py @@ -0,0 +1,457 @@ +import collections +import json + +import cv2 +import six + +from ..base import InterfaceBase +from ..setupuploadmixin import SetupUploadMixin +from ...utilities.async_manager import AsyncManagerMixin +from ...utilities.plotly import create_2d_histogram_plot, create_value_matrix, create_3d_surface, \ + create_2d_scatter_series, create_3d_scatter_series, create_line_plot, plotly_scatter3d_layout_dict +from ...utilities.py3_interop import AbstractContextManager +from .events import ScalarEvent, VectorEvent, ImageEvent, PlotEvent, ImageEventNoUpload + + +class Reporter(InterfaceBase, AbstractContextManager, SetupUploadMixin, AsyncManagerMixin): + """ + A simple metrics reporter class. + This class caches reports and supports both a explicit flushing and context-based flushing. To ensure reports are + sent to the backend, please use (assuming an instance of Reporter named 'reporter'): + - use the context manager feature (which will automatically flush when exiting the context): + with reporter: + reporter.report... + ... + - explicitly call flush: + reporter.report... + ... + reporter.flush() + """ + + def __init__(self, metrics, flush_threshold=10, async_enable=False): + """ + Create a reporter + :param metrics: A Metrics manager instance that handles actual reporting, uploads etc. + :type metrics: .backend_interface.metrics.Metrics + :param flush_threshold: Events flush threshold. This determines the threshold over which cached reported events + are flushed and sent to the backend. + :type flush_threshold: int + """ + log = metrics.log.getChild('reporter') + log.setLevel(log.level) + super(Reporter, self).__init__(session=metrics.session, log=log) + self._metrics = metrics + self._flush_threshold = flush_threshold + self._events = [] + self._bucket_config = None + self._storage_uri = None + self._async_enable = async_enable + + def _set_storage_uri(self, value): + value = '/'.join(x for x in (value.rstrip('/'), self._metrics.storage_key_prefix) if x) + self._storage_uri = value + + storage_uri = property(None, _set_storage_uri) + + @property + def flush_threshold(self): + return self._flush_threshold + + @flush_threshold.setter + def flush_threshold(self, value): + self._flush_threshold = max(0, value) + + @property + def async_enable(self): + return self._async_enable + + @async_enable.setter + def async_enable(self, value): + self._async_enable = bool(value) + + def _report(self, ev): + self._events.append(ev) + if len(self._events) >= self._flush_threshold: + self._write() + + def _write(self): + if not self._events: + return + # print('reporting %d events' % len(self._events)) + res = self._metrics.write_events(self._events, async_enable=self._async_enable, storage_uri=self._storage_uri) + if self._async_enable: + self._add_async_result(res) + self._events = [] + + def flush(self): + """ + Flush cached reports to backend. + """ + self._write() + # wait for all reports + if self.get_num_results() > 0: + self.wait_for_results() + + def report_scalar(self, title, series, value, iter): + """ + Report a scalar value + :param title: Title (AKA metric) + :type title: str + :param series: Series (AKA variant) + :type series: str + :param value: Reported value + :type value: float + :param iter: Iteration number + :type value: int + """ + ev = ScalarEvent(metric=self._normalize_name(title), + variant=self._normalize_name(series), value=value, iter=iter) + self._report(ev) + + def report_vector(self, title, series, values, iter): + """ + Report a vector of values + :param title: Title (AKA metric) + :type title: str + :param series: Series (AKA variant) + :type series: str + :param values: Reported values + :type value: [float] + :param iter: Iteration number + :type value: int + """ + if not isinstance(values, collections.Iterable): + raise ValueError('values: expected an iterable') + ev = VectorEvent(metric=self._normalize_name(title), + variant=self._normalize_name(series), values=values, iter=iter) + self._report(ev) + + def report_plot(self, title, series, plot, iter): + """ + Report a Plotly chart + :param title: Title (AKA metric) + :type title: str + :param series: Series (AKA variant) + :type series: str + :param plot: A JSON describing a plotly chart (see https://help.plot.ly/json-chart-schema/) + :type plot: str or dict + :param iter: Iteration number + :type value: int + """ + if isinstance(plot, dict): + plot = json.dumps(plot) + elif not isinstance(plot, six.string_types): + raise ValueError('Plot should be a string or a dict') + ev = PlotEvent(metric=self._normalize_name(title), + variant=self._normalize_name(series), plot_str=plot, iter=iter) + self._report(ev) + + def report_image(self, title, series, src, iter): + """ + Report an image. + :param title: Title (AKA metric) + :type title: str + :param series: Series (AKA variant) + :type series: str + :param src: Image source URI. This URI will be used by the webapp and workers when trying to obtain the image + for presentation of processing. Currently only http(s), file and s3 schemes are supported. + :type src: str + :param iter: Iteration number + :type value: int + """ + ev = ImageEventNoUpload(metric=self._normalize_name(title), + variant=self._normalize_name(series), iter=iter, src=src) + self._report(ev) + + def report_image_and_upload(self, title, series, iter, path=None, matrix=None, upload_uri=None, + max_image_history=None): + """ + Report an image and upload its contents. Image is uploaded to a preconfigured bucket (see setup_upload()) with + a key (filename) describing the task ID, title, series and iteration. + :param title: Title (AKA metric) + :type title: str + :param series: Series (AKA variant) + :type series: str + :param iter: Iteration number + :type value: int + :param path: A path to an image file. Required unless matrix is provided. + :type path: str + :param matrix: A 3D numpy.ndarray object containing image data (BGR). Required unless filename is provided. + :type matrix: str + :param max_image_history: maximum number of image to store per metric/variant combination + use negative value for unlimited. default is set in global configuration (default=5) + """ + if not self._storage_uri and not upload_uri: + raise ValueError('Upload configuration is required (use setup_upload())') + if len([x for x in (path, matrix) if x is not None]) != 1: + raise ValueError('Expected only one of [filename, matrix]') + kwargs = dict(metric=self._normalize_name(title), + variant=self._normalize_name(series), iter=iter, image_file_history_size=max_image_history) + if matrix is None: + matrix = cv2.imread(path) + ev = ImageEvent(image_data=matrix, upload_uri=upload_uri, **kwargs) + self._report(ev) + + def report_histogram(self, title, series, histogram, iter, labels=None, xlabels=None, comment=None): + """ + Report an histogram bar plot + :param title: Title (AKA metric) + :type title: str + :param series: Series (AKA variant) + :type series: str + :param histogram: The histogram data. + A row for each dataset(bar in a bar group). A column for each bucket. + :type histogram: numpy array + :param iter: Iteration number + :type value: int + :param labels: The labels for each bar group. + :type labels: list of strings. + :param xlabels: The labels of the x axis. + :type xlabels: List of strings. + :param comment: comment underneath the title + :type comment: str + """ + plotly_dict = create_2d_histogram_plot( + np_row_wise=histogram, + title=title, + labels=labels, + series=series, + xlabels=xlabels, + comment=comment, + ) + + return self.report_plot( + title=self._normalize_name(title), + series=self._normalize_name(series), + plot=plotly_dict, + iter=iter, + ) + + def report_line_plot(self, title, series, iter, xtitle, ytitle, mode='lines', reverse_xaxis=False, comment=None): + """ + Report a (possibly multiple) line plot. + + :param title: Title (AKA metric) + :type title: str + :param series: All the series' data, one for each line in the plot. + :type series: An iterable of LineSeriesInfo. + :param iter: Iteration number + :type iter: int + :param xtitle: x-axis title + :type xtitle: str + :param ytitle: y-axis title + :type ytitle: str + :param mode: 'lines' / 'markers' / 'lines+markers' + :type mode: str + :param reverse_xaxis: If true X axis will be displayed from high to low (reversed) + :type reverse_xaxis: bool + :param comment: comment underneath the title + :type comment: str + """ + + plotly_dict = create_line_plot( + title=title, + series=series, + xtitle=xtitle, + ytitle=ytitle, + mode=mode, + reverse_xaxis=reverse_xaxis, + comment=comment, + ) + + return self.report_plot( + title=self._normalize_name(title), + series='', + plot=plotly_dict, + iter=iter, + ) + + def report_2d_scatter(self, title, series, data, iter, mode='lines', xtitle=None, ytitle=None, labels=None, + comment=None): + """ + Report a 2d scatter graph (with lines) + + :param title: Title (AKA metric) + :type title: str + :param series: Series (AKA variant) + :type series: str + :param data: A scattered data: pairs of x,y as rows in a numpy array + :type scatter: ndarray + :param iter: Iteration number + :type iter: int + :param mode: (type str) 'lines'/'markers'/'lines+markers' + :param xtitle: optional x-axis title + :param ytitle: optional y-axis title + :param labels: label (text) per point in the scatter (in the same order) + :param comment: comment underneath the title + :type comment: str + """ + plotly_dict = create_2d_scatter_series( + np_row_wise=data, + title=title, + series_name=series, + mode=mode, + xtitle=xtitle, + ytitle=ytitle, + labels=labels, + comment=comment, + ) + + return self.report_plot( + title=self._normalize_name(title), + series=self._normalize_name(series), + plot=plotly_dict, + iter=iter, + ) + + def report_3d_scatter(self, title, series, data, iter, labels=None, mode='lines', color=((217, 217, 217, 0.14),), + marker_size=5, line_width=0.8, xtitle=None, ytitle=None, ztitle=None, fill=None, + comment=None): + """ + Report a 3d scatter graph (with markers) + + :param title: Title (AKA metric) + :type title: str + :param series: Series (AKA variant) + :type series: str + :param data: A scattered data: pairs of x,y,z as rows in a numpy array. or list of numpy arrays + :type data: ndarray. + :param iter: Iteration number + :type iter: int + :param labels: label (text) per point in the scatter (in the same order) + :type labels: str + :param mode: (type str) 'lines'/'markers'/'lines+markers' + :param color: list of RGBA colors [(217, 217, 217, 0.14),] + :param marker_size: marker size in px + :param line_width: line width in px + :param xtitle: optional x-axis title + :param ytitle: optional y-axis title + :param ztitle: optional z-axis title + :param comment: comment underneath the title + """ + data_series = data if isinstance(data, list) else [data] + + def get_labels(i): + if labels and isinstance(labels, list): + try: + item = labels[i] + except IndexError: + item = labels[-1] + if isinstance(item, list): + return item + return labels + + plotly_obj = plotly_scatter3d_layout_dict( + title=title, + xaxis_title=xtitle, + yaxis_title=ytitle, + zaxis_title=ztitle, + comment=comment, + ) + + for i, values in enumerate(data_series): + plotly_obj = create_3d_scatter_series( + np_row_wise=values, + title=title, + series_name=series[i] if isinstance(series, list) else None, + labels=get_labels(i), + plotly_obj=plotly_obj, + mode=mode, + line_width=line_width, + marker_size=marker_size, + color=color, + fill_axis=fill, + ) + + return self.report_plot( + title=self._normalize_name(title), + series=self._normalize_name(series) if not isinstance(series, list) else None, + plot=plotly_obj, + iter=iter, + ) + + def report_value_matrix(self, title, series, data, iter, xlabels=None, ylabels=None, comment=None): + """ + Report a heat-map matrix + + :param title: Title (AKA metric) + :type title: str + :param series: Series (AKA variant) + :type series: str + :param data: A heat-map matrix (example: confusion matrix) + :type data: ndarray + :param iter: Iteration number + :type iter: int + :param xlabels: optional label per column of the matrix + :param ylabels: optional label per row of the matrix + :param comment: comment underneath the title + """ + + plotly_dict = create_value_matrix( + np_value_matrix=data, + title=title, + xlabels=xlabels, + ylabels=ylabels, + series=series, + comment=comment, + ) + + return self.report_plot( + title=self._normalize_name(title), + series=self._normalize_name(series), + plot=plotly_dict, + iter=iter, + ) + + def report_value_surface(self, title, series, data, iter, xlabels=None, ylabels=None, + xtitle=None, ytitle=None, ztitle=None, camera=None, comment=None): + """ + Report a 3d surface (same data as heat-map matrix, only presented differently) + + :param title: Title (AKA metric) + :type title: str + :param series: Series (AKA variant) + :type series: str + :param data: A heat-map matrix (example: confusion matrix) + :type data: ndarray + :param iter: Iteration number + :type iter: int + :param xlabels: optional label per column of the matrix + :param ylabels: optional label per row of the matrix + :param xtitle: optional x-axis title + :param ytitle: optional y-axis title + :param ztitle: optional z-axis title + :param camera: X,Y,Z camera position. def: (1,1,1) + :param comment: comment underneath the title + """ + + plotly_dict = create_3d_surface( + np_value_matrix=data, + title=title + '/' + series, + xlabels=xlabels, + ylabels=ylabels, + series=series, + xtitle=xtitle, + ytitle=ytitle, + ztitle=ztitle, + camera=camera, + comment=comment, + ) + + return self.report_plot( + title=self._normalize_name(title), + series=self._normalize_name(series), + plot=plotly_dict, + iter=iter, + ) + + @classmethod + def _normalize_name(cls, name): + if not name: + return name + return name.replace('$', '/').replace('.', '/') + + def __exit__(self, exc_type, exc_val, exc_tb): + # don't flush in case an exception was raised + if not exc_type: + self.flush() diff --git a/trains/backend_interface/model.py b/trains/backend_interface/model.py new file mode 100644 index 00000000..c6ffb37e --- /dev/null +++ b/trains/backend_interface/model.py @@ -0,0 +1,408 @@ +from collections import namedtuple +from functools import partial + +import six +from pathlib2 import Path + +from ..backend_api.services import models +from .base import IdObjectBase +from .util import make_message +from ..storage import StorageHelper +from ..utilities.async_manager import AsyncManagerMixin + +ModelPackage = namedtuple('ModelPackage', 'weights design') + + +class ModelDoesNotExistError(Exception): + pass + + +class _StorageUriMixin(object): + @property + def upload_storage_uri(self): + """ A URI into which models are uploaded """ + return self._upload_storage_uri + + @upload_storage_uri.setter + def upload_storage_uri(self, value): + self._upload_storage_uri = value.rstrip('/') if value else None + + +class DummyModel(models.Model, _StorageUriMixin): + def __init__(self, upload_storage_uri=None, *args, **kwargs): + super(DummyModel, self).__init__(*args, **kwargs) + self.upload_storage_uri = upload_storage_uri + + def update(self, **kwargs): + for k, v in kwargs.items(): + setattr(self, k, v) + + +class Model(IdObjectBase, AsyncManagerMixin, _StorageUriMixin): + """ Manager for backend model objects """ + + _EMPTY_MODEL_ID = 'empty' + + @property + def model_id(self): + return self.id + + @property + def storage(self): + return StorageHelper.get(self.upload_storage_uri) + + def __init__(self, upload_storage_uri, cache_dir, model_id=None, + upload_storage_suffix='models', session=None, log=None): + super(Model, self).__init__(id=model_id, session=session, log=log) + self._upload_storage_suffix = upload_storage_suffix + if model_id == self._EMPTY_MODEL_ID: + # Set an empty data object + self._data = models.Model() + else: + self._data = None + self._cache_dir = cache_dir + self.upload_storage_uri = upload_storage_uri + + def publish(self): + self.send(models.SetReadyRequest(model=self.id, publish_task=False)) + self.reload() + + def _reload(self): + """ Reload the model object """ + if self.id == self._EMPTY_MODEL_ID: + return + res = self.send(models.GetByIdRequest(model=self.id)) + return res.response.model + + def _upload_model(self, model_file, async_enable=False, target_filename=None, cb=None): + if not self.upload_storage_uri: + raise ValueError('Model has no storage URI defined (nowhere to upload to)') + helper = self.storage + target_filename = target_filename or Path(model_file).name + dest_path = '/'.join((self.upload_storage_uri, self._upload_storage_suffix or '.', target_filename)) + result = helper.upload( + src_path=model_file, + dest_path=dest_path, + async_enable=async_enable, + cb=partial(self._upload_callback, cb=cb), + ) + if async_enable: + def msg(num_results): + self.log.info("Waiting for previous model to upload (%d pending, %s)" % (num_results, dest_path)) + + self._add_async_result(result, wait_on_max_results=2, wait_cb=msg) + return dest_path + + def _upload_callback(self, res, cb=None): + if res is None: + self.log.debug('Starting model upload') + elif res is False: + self.log.info('Failed model upload') + else: + self.log.info('Completed model upload to %s' % res) + if cb: + cb(res) + + @staticmethod + def _wrap_design(design): + """ + Wrap design text with a dictionary. + + In the backend, the design is a dictionary with a 'design' key in it. + For the client, it is a text. This function wraps a design string with + the proper dictionary. + + :param design: If it is a dictionary, it mast have a 'design' key in it. + In that case, return design as-is. + If it is a string, return the dictionary {'design': design}. + If it is None (or any False value), return the dictionary {'design': ''} + + :return: A proper design dictionary according to design parameter. + """ + if isinstance(design, dict): + if 'design' not in design: + raise ValueError('design dictionary must have \'design\' key in it') + + return design + + return {'design': design if design else ''} + + @staticmethod + def _unwrap_design(design): + """ + Unwrap design text from a dictionary. + + In the backend, the design is a dictionary with a 'design' key in it. + For the client, it is a text. This function unwraps a design string from + the dictionary. + + :param design: If it is a dictionary with a 'design' key in it, return + design['design']. + If it is a dictionary without 'design' key, return the first value + in it's values list. + If it is an empty dictionary, None, or any other False value, + return an empty string. + If it is a string, return design as-is. + + :return: The design string according to design parameter. + """ + if not design: + return '' + + if isinstance(design, six.string_types): + return design + + if isinstance(design, dict): + if 'design' in design: + return design['design'] + + return list(design.values())[0] + + raise ValueError('design must be a string or a dictionary with at least one value') + + def update(self, model_file=None, design=None, labels=None, name=None, comment=None, tags=None, + task_id=None, project_id=None, parent_id=None, uri=None, framework=None, + upload_storage_uri=None, target_filename=None, iteration=None): + """ Update model weights file and various model properties """ + + if self.id is None: + if upload_storage_uri: + self.upload_storage_uri = upload_storage_uri + self._create_empty_model(self.upload_storage_uri) + + # upload model file if needed and get uri + uri = uri or (self._upload_model(model_file, target_filename=target_filename) if model_file else self.data.uri) + # update fields + design = self._wrap_design(design) if design else self.data.design + name = name or self.data.name + comment = comment or self.data.comment + tags = tags or self.data.tags + labels = labels or self.data.labels + task = task_id or self.data.task + project = project_id or self.data.project + parent = parent_id or self.data.parent + + self.send(models.EditRequest( + model=self.id, + uri=uri, + name=name, + comment=comment, + tags=tags, + labels=labels, + design=design, + task=task, + project=project, + parent=parent, + framework=framework or self.data.framework, + iteration=iteration, + )) + self.reload() + + def update_and_upload(self, model_file, design=None, labels=None, name=None, comment=None, + tags=None, task_id=None, project_id=None, parent_id=None, framework=None, async_enable=False, + target_filename=None, cb=None, iteration=None): + """ Update the given model for a given task ID """ + if async_enable: + def callback(uploaded_uri): + if uploaded_uri is None: + return + + # If not successful, mark model as failed_uploading + if uploaded_uri is False: + uploaded_uri = '{}/failed_uploading'.format(self._upload_storage_uri) + + self.update( + uri=uploaded_uri, + task_id=task_id, + name=name, + comment=comment, + tags=tags, + design=design, + labels=labels, + project_id=project_id, + parent_id=parent_id, + framework=framework, + iteration=iteration, + ) + + if cb: + cb(model_file) + + uri = self._upload_model(model_file, async_enable=async_enable, target_filename=target_filename, cb=callback) + return uri + else: + uri = self._upload_model(model_file, async_enable=async_enable, target_filename=target_filename) + self.update( + uri=uri, + task_id=task_id, + name=name, + comment=comment, + tags=tags, + design=design, + labels=labels, + project_id=project_id, + parent_id=parent_id, + framework=framework, + ) + + return uri + + def _complete_update_for_task(self, uri, task_id=None, name=None, comment=None, tags=None, override_model_id=None, + cb=None): + if self._data: + name = name or self.data.name + comment = comment or self.data.comment + tags = tags or self.data.tags + uri = (uri or self.data.uri) if not override_model_id else None + + res = self.send( + models.UpdateForTaskRequest(task=task_id, uri=uri, name=name, comment=comment, tags=tags, + override_model_id=override_model_id)) + if self.id is None: + # update the model id. in case it was just created, this will trigger a reload of the model object + self.id = res.response.id + else: + self.reload() + try: + if cb: + cb(uri) + except Exception as ex: + self.log.warning('Failed calling callback on complete_update_for_task: %s' % str(ex)) + pass + + def update_for_task_and_upload( + self, model_file, task_id, name=None, comment=None, tags=None, override_model_id=None, target_filename=None, + async_enable=False, cb=None, iteration=None): + """ Update the given model for a given task ID """ + if async_enable: + callback = partial( + self._complete_update_for_task, task_id=task_id, name=name, comment=comment, tags=tags, + override_model_id=override_model_id, cb=cb) + uri = self._upload_model(model_file, target_filename=target_filename, async_enable=async_enable, cb=callback) + return uri + else: + uri = self._upload_model(model_file, target_filename=target_filename, async_enable=async_enable) + self._complete_update_for_task(uri, task_id, name, comment, tags, override_model_id) + _ = self.send(models.UpdateForTaskRequest(task=task_id, uri=uri, name=name, comment=comment, tags=tags, + override_model_id=override_model_id, iteration=iteration)) + return uri + + def update_for_task(self, task_id, uri=None, name=None, comment=None, tags=None, override_model_id=None): + self._complete_update_for_task(uri, task_id, name, comment, tags, override_model_id) + + @property + def model_design(self): + """ Get the model design. For now, this is stored as a single key in the design dict. """ + try: + return self._unwrap_design(self.data.design) + except ValueError: + # no design is yet specified + return None + + @property + def labels(self): + try: + return self.data.labels + except ValueError: + # no labels is yet specified + return None + + @property + def name(self): + try: + return self.data.name + except ValueError: + # no name is yet specified + return None + + @property + def comment(self): + try: + return self.data.comment + except ValueError: + # no comment is yet specified + return None + + @property + def tags(self): + return self.data.tags + + @property + def locked(self): + if self.id is None: + return False + return bool(self.data.ready) + + def download_model_weights(self): + """ Download the model weights into a local file in our cache """ + uri = self.data.uri + helper = StorageHelper.get(uri, logger=self._log, verbose=True) + return helper.download_to_file(uri, force_cache=True) + + @property + def cache_dir(self): + return self._cache_dir + + def save_model_design_file(self): + """ Download model description file into a local file in our cache_dir """ + design = self.model_design + filename = self.data.name + '.txt' + p = Path(self.cache_dir) / filename + # we always write the original model design to file, to prevent any mishaps + # if p.is_file(): + # return str(p) + p.parent.mkdir(parents=True, exist_ok=True) + p.write_text(six.text_type(design)) + return str(p) + + def get_model_package(self): + """ Get a named tuple containing the model's weights and design """ + return ModelPackage(weights=self.download_model_weights(), design=self.save_model_design_file()) + + def get_model_design(self): + """ Get model description (text) """ + return self.model_design + + @classmethod + def get_all(cls, session, log=None, **kwargs): + req = models.GetAllRequest(**kwargs) + res = cls._send(session=session, req=req, log=log) + return res + + def clone(self, name, comment=None, child=True, tags=None, task=None, ready=True): + """ + Clone this model into a new model. + :param name: Name for the new model + :param comment: Optional comment for the new model + :param child: Should the new model be a child of this model? (default True) + :return: The new model's ID + """ + data = self.data + assert isinstance(data, models.Model) + parent = self.id if child else None + req = models.CreateRequest( + uri=data.uri, + name=name, + labels=data.labels, + comment=comment or data.comment, + tags=tags or data.tags, + framework=data.framework, + design=data.design, + ready=ready, + project=data.project, + parent=parent, + task=task, + ) + res = self.send(req) + return res.response.id + + def _create_empty_model(self, upload_storage_uri=None): + upload_storage_uri = upload_storage_uri or self.upload_storage_uri + name = make_message('Anonymous model %(time)s') + uri = '{}/uploading_file'.format(upload_storage_uri or 'file://') + req = models.CreateRequest(uri=uri, name=name, labels={}) + res = self.send(req) + if not res: + return False + self.id = res.response.id + return True diff --git a/trains/backend_interface/session.py b/trains/backend_interface/session.py new file mode 100644 index 00000000..30206a05 --- /dev/null +++ b/trains/backend_interface/session.py @@ -0,0 +1,28 @@ +from abc import ABCMeta, abstractmethod + +import six + + +class SendError(Exception): + """ A session send() error class """ + @property + def result(self): + return self._result + + def __init__(self, result, *args, **kwargs): + super(SendError, self).__init__(*args, **kwargs) + self._result = result + + +@six.add_metaclass(ABCMeta) +class SessionInterface(object): + """ Session wrapper interface providing a session property and a send convenience method """ + + @property + @abstractmethod + def session(self): + pass + + @abstractmethod + def send(self, req, ignore_errors=False, raise_on_errors=True, async_enable=False): + pass diff --git a/trains/backend_interface/setupuploadmixin.py b/trains/backend_interface/setupuploadmixin.py new file mode 100644 index 00000000..d21f79ed --- /dev/null +++ b/trains/backend_interface/setupuploadmixin.py @@ -0,0 +1,43 @@ +from abc import abstractproperty + +from ..backend_config.bucket_config import S3BucketConfig +from ..storage import StorageHelper + + +class SetupUploadMixin(object): + log = abstractproperty() + storage_uri = abstractproperty() + + def setup_upload( + self, bucket_name, host=None, access_key=None, secret_key=None, region=None, multipart=True, https=True): + """ + Setup upload options (currently only S3 is supported) + :param bucket_name: AWS bucket name + :type bucket_name: str + :param host: Hostname. Only required in case a Non-AWS S3 solution such as a local Minio server is used) + :type host: str + :param access_key: AWS access key. If not provided, we'll attempt to obtain the key from the + configuration file (bucket-specific, than global) + :type access_key: str + :param secret_key: AWS secret key. If not provided, we'll attempt to obtain the secret from the + configuration file (bucket-specific, than global) + :type secret_key: str + :param multipart: Server supports multipart. Only required when using a Non-AWS S3 solution that doesn't support + multipart. + :type multipart: bool + :param https: Server supports HTTPS. Only required when using a Non-AWS S3 solution that only supports HTTPS. + :type https: bool + :param region: Bucket region. Required if the bucket doesn't reside in the default region (us-east-1) + :type region: str + """ + self._bucket_config = S3BucketConfig( + bucket=bucket_name, + host=host, + key=access_key, + secret=secret_key, + multipart=multipart, + secure=https, + region=region + ) + self.storage_uri = ('s3://%(host)s/%(bucket_name)s' if host else 's3://%(bucket_name)s') % locals() + StorageHelper.add_configuration(self._bucket_config, log=self.log) diff --git a/trains/backend_interface/task/__init__.py b/trains/backend_interface/task/__init__.py new file mode 100644 index 00000000..c4ee113a --- /dev/null +++ b/trains/backend_interface/task/__init__.py @@ -0,0 +1 @@ +from .task import Task, TaskEntry, TaskStatusEnum diff --git a/trains/backend_interface/task/access.py b/trains/backend_interface/task/access.py new file mode 100644 index 00000000..9dc4040e --- /dev/null +++ b/trains/backend_interface/task/access.py @@ -0,0 +1,85 @@ +import itertools +import operator + +from abc import abstractproperty + +import six +from pathlib2 import Path + + +class AccessMixin(object): + """ A mixin providing task fields access functionality """ + session = abstractproperty() + data = abstractproperty() + cache_dir = abstractproperty() + log = abstractproperty() + + def _get_task_property(self, prop_path, raise_on_error=True, log_on_error=True, default=None): + obj = self.data + props = prop_path.split('.') + for i in range(len(props)): + obj = getattr(obj, props[i], None) + if obj is None: + msg = 'Task has no %s section defined' % '.'.join(props[:i + 1]) + if log_on_error: + self.log.info(msg) + if raise_on_error: + raise ValueError(msg) + return default + return obj + + def _set_task_property(self, prop_path, value, raise_on_error=True, log_on_error=True): + props = prop_path.split('.') + if len(props) > 1: + obj = self._get_task_property('.'.join(props[:-1]), raise_on_error=raise_on_error, + log_on_error=log_on_error) + else: + obj = self.data + setattr(obj, props[-1], value) + + def save_exec_model_design_file(self, filename='model_design.txt', use_cache=False): + """ Save execution model design to file """ + p = Path(self.cache_dir) / filename + if use_cache and p.is_file(): + return str(p) + desc = self._get_task_property('execution.model_desc') + try: + design = six.next(six.itervalues(desc)) + except StopIteration: + design = None + if not design: + raise ValueError('Task has no design in execution.model_desc') + p.parent.mkdir(parents=True, exist_ok=True) + p.write_text('%s' % design) + return str(p) + + def get_parameters(self): + return self._get_task_property('execution.parameters') + + def get_label_num_description(self): + """ Get a dict of label number to a string representing all labels associated with this number on the + model labels + """ + model_labels = self._get_task_property('execution.model_labels') + label_getter = operator.itemgetter(0) + num_getter = operator.itemgetter(1) + groups = list(itertools.groupby(sorted(model_labels.items(), key=num_getter), key=num_getter)) + if any(len(set(label_getter(x) for x in group)) > 1 for _, group in groups): + raise ValueError("Multiple labels mapped to same model index not supported") + return {key: ','.join(label_getter(x) for x in group) for key, group in groups} + + def get_output_destination(self, extra_path=None, **kwargs): + """ Get the task's output destination, with an optional suffix """ + return self._get_task_property('output.destination', **kwargs) + + def get_num_of_classes(self): + """ number of classes based on the task's labels """ + model_labels = self.data.execution.model_labels + expected_num_of_classes = 0 + for labels, index in model_labels.items(): + expected_num_of_classes += 1 if int(index) > 0 else 0 + num_of_classes = int(max(model_labels.values())) + if num_of_classes != expected_num_of_classes: + self.log.warn('The highest label index is %d, while there are %d non-bg labels' % + (num_of_classes, expected_num_of_classes)) + return num_of_classes + 1 # +1 is meant for bg! diff --git a/trains/backend_interface/task/args.py b/trains/backend_interface/task/args.py new file mode 100644 index 00000000..63f639b1 --- /dev/null +++ b/trains/backend_interface/task/args.py @@ -0,0 +1,314 @@ +import yaml + +from six import PY2 +from argparse import _StoreAction, ArgumentError, _StoreConstAction, _SubParsersAction, SUPPRESS +from copy import copy + +from ...utilities.args import call_original_argparser + + +class _Arguments(object): + _prefix_sep = '/' + # TODO: separate dict and argparse after we add UI support + _prefix_dict = 'dict' + _prefix_sep + _prefix_args = 'argparse' + _prefix_sep + _prefix_tf_defines = 'TF_DEFINE' + _prefix_sep + + class _ProxyDictWrite(dict): + """ Dictionary wrapper that updates an arguments instance on any item set in the dictionary """ + def __init__(self, arguments, *args, **kwargs): + super(_Arguments._ProxyDictWrite, self).__init__(*args, **kwargs) + self._arguments = arguments + + def __setitem__(self, key, value): + super(_Arguments._ProxyDictWrite, self).__setitem__(key, value) + if self._arguments: + self._arguments.copy_from_dict(self) + + class _ProxyDictReadOnly(dict): + """ Dictionary wrapper that prevents modifications to the dictionary """ + def __init__(self, *args, **kwargs): + super(_Arguments._ProxyDictReadOnly, self).__init__(*args, **kwargs) + + def __setitem__(self, key, value): + pass + + def __init__(self, task): + super(_Arguments, self).__init__() + self._task = task + + def set_defaults(self, *dicts, **kwargs): + self._task.set_parameters(*dicts, **kwargs) + + def add_argument(self, option_strings, type=None, default=None, help=None): + if not option_strings: + raise Exception('Expected at least one argument name (option string)') + name = option_strings[0].strip('- \t') if isinstance(option_strings, list) else option_strings.strip('- \t') + # TODO: add argparse prefix + # name = self._prefix_args + name + self._task.set_parameter(name=name, value=default, description=help) + + def connect(self, parser): + self._task.connect_argparse(parser) + + @classmethod + def _add_to_defaults(cls, a_parser, defaults, a_args=None, a_namespace=None, a_parsed_args=None): + actions = [ + a for a in a_parser._actions + if isinstance(a, _StoreAction) or isinstance(a, _StoreConstAction) + ] + args_dict = {} + try: + if isinstance(a_parsed_args, dict): + args_dict = a_parsed_args + else: + if a_parsed_args: + args_dict = a_parsed_args.__dict__ + else: + args_dict = call_original_argparser(a_parser, args=a_args, namespace=a_namespace).__dict__ + defaults_ = { + a.dest: args_dict.get(a.dest) if (args_dict.get(a.dest) is not None) else '' + for a in actions + } + except Exception: + # don't crash us if we failed parsing the inputs + defaults_ = { + a.dest: a.default if a.default is not None else '' + for a in actions + } + + full_args_dict = copy(defaults) + full_args_dict.update(args_dict) + defaults.update(defaults_) + + # deal with sub parsers + sub_parsers = [ + a for a in a_parser._actions + if isinstance(a, _SubParsersAction) + ] + for sub_parser in sub_parsers: + if sub_parser.dest and sub_parser.dest != SUPPRESS: + defaults[sub_parser.dest] = full_args_dict.get(sub_parser.dest) + for choice in sub_parser.choices.values(): + # recursively parse + defaults = cls._add_to_defaults( + a_parser=choice, + defaults=defaults, + a_parsed_args=a_parsed_args or full_args_dict + ) + + return defaults + + def copy_defaults_from_argparse(self, parser, args=None, namespace=None, parsed_args=None): + task_defaults = {} + self._add_to_defaults(parser, task_defaults, args, namespace, parsed_args) + + # Make sure we didn't miss anything + if parsed_args: + for k, v in parsed_args.__dict__.items(): + if k not in task_defaults: + if type(v) == None: + task_defaults[k] = '' + elif type(v) in (str, int, float, bool, list): + task_defaults[k] = v + + # Verify arguments + for k, v in task_defaults.items(): + try: + if type(v) is list: + task_defaults[k] = '[' + ', '.join(map("{0}".format, v)) + ']' + elif type(v) not in (str, int, float, bool): + task_defaults[k] = str(v) + except Exception: + del task_defaults[k] + # Add prefix, TODO: add argparse prefix + # task_defaults = dict([(self._prefix_args + k, v) for k, v in task_defaults.items()]) + task_defaults = dict([(k, v) for k, v in task_defaults.items()]) + # Store to task + self._task.update_parameters(task_defaults) + + @classmethod + def _find_parser_action(cls, a_parser, name): + # find by name + _actions = [(a_parser, a) for a in a_parser._actions if a.dest == name] + if _actions: + return _actions + # iterate over subparsers + _actions = [] + sub_parsers = [a for a in a_parser._actions if isinstance(a, _SubParsersAction)] + for sub_parser in sub_parsers: + for choice in sub_parser.choices.values(): + # recursively parse + _action = cls._find_parser_action(choice, name) + if _action: + _actions.extend(_action) + return _actions + + def copy_to_parser(self, parser, parsed_args): + # todo: change to argparse prefix only + # task_arguments = dict([(k[len(self._prefix_args):], v) for k, v in self._task.get_parameters().items() + # if k.startswith(self._prefix_args)]) + task_arguments = dict([(k, v) for k, v in self._task.get_parameters().items() + if not k.startswith(self._prefix_tf_defines)]) + for k, v in task_arguments.items(): + # if we have a StoreTrueAction and the value is either False or Empty or 0 change the default to False + # with the rest we have to make sure the type is correct + matched_actions = self._find_parser_action(parser, k) + for parent_parser, current_action in matched_actions: + if current_action and isinstance(current_action, _StoreConstAction): + # make the default value boolean + # first check if False value + const_value = current_action.const if current_action.const is not None else ( + current_action.default if current_action.default is not None else True) + const_type = type(const_value) + strip_v = str(v).lower().strip() + if const_type == bool: + if strip_v == 'false' or not strip_v: + const_value = False + elif strip_v == 'true': + const_value = True + else: + # first try to cast to integer + try: + const_value = int(strip_v) + except ValueError: + pass + else: + const_value = strip_v + # then cast to const type (might be boolean) + try: + const_value = const_type(const_value) + current_action.const = const_value + except ValueError: + pass + task_arguments[k] = const_value + elif current_action and current_action.nargs == '+': + try: + v = yaml.load(v.strip()) + if current_action.type: + v = [current_action.type(a) for a in v] + elif current_action.default: + v_type = type(current_action.default[0]) + v = [v_type(a) for a in v] + task_arguments[k] = v + except Exception: + pass + elif current_action and not current_action.type: + # cast manually if there is no type + var_type = type(current_action.default) + # if we have an int, we should cast to float, because it is more generic + if var_type == int: + var_type = float + elif var_type == type(None): + var_type = str + # now we should try and cast the value if we can + try: + v = var_type(v) + task_arguments[k] = v + except Exception: + pass + # add as default + try: + if current_action and isinstance(current_action, _SubParsersAction): + current_action.default = v + current_action.required = False + elif current_action and isinstance(current_action, _StoreAction): + current_action.default = v + current_action.required = False + # python2 doesn't support defaults for positional arguments, unless used with nargs=? + if PY2 and not current_action.nargs: + current_action.nargs = '?' + else: + parent_parser.add_argument( + '--%s' % k, + default=v, + type=type(v), + required=False, + help='Task parameter %s (default %s)' % (k, v), + ) + except ArgumentError: + pass + except Exception: + pass + # if we already have an instance of parsed args, we should update its values + if parsed_args: + for k, v in task_arguments.items(): + setattr(parsed_args, k, v) + parser.set_defaults(**task_arguments) + + def copy_from_dict(self, dictionary, prefix=None): + # TODO: add dict prefix + prefix = prefix or '' # self._prefix_dict + if prefix: + prefix_dictionary = dict([(prefix + k, v) for k, v in dictionary.items()]) + cur_params = dict([(k, v) for k, v in self._task.get_parameters().items() if not k.startswith(prefix)]) + cur_params.update(prefix_dictionary) + self._task.set_parameters(cur_params) + else: + self._task.update_parameters(dictionary) + if not isinstance(dictionary, self._ProxyDictWrite): + return self._ProxyDictWrite(self, **dictionary) + return dictionary + + def copy_to_dict(self, dictionary, prefix=None): + # iterate over keys and merge values according to parameter type in dictionary + # TODO: add dict prefix + prefix = prefix or '' # self._prefix_dict + if prefix: + parameters = dict([(k[len(prefix):], v) for k, v in self._task.get_parameters().items() + if k.startswith(prefix)]) + else: + parameters = dict([(k, v) for k, v in self._task.get_parameters().items() + if not k.startswith(self._prefix_tf_defines)]) + + for k, v in dictionary.items(): + param = parameters.get(k, None) + if param is None: + continue + v_type = type(v) + # assume more general purpose type int -> float + if v_type == int: + v_type = float + elif v_type == bool: + # cast based on string or int + try: + param = bool(float(param)) + except ValueError: + try: + param = str(param).lower().strip() == 'true' + except ValueError: + self._task.log.warning('Failed parsing task parameter %s=%s keeping default %s=%s' % + (str(k), str(param), str(k), str(v))) + continue + elif v_type == list: + try: + p = str(param).strip() + param = yaml.load(p) + except Exception: + self._task.log.warning('Failed parsing task parameter %s=%s keeping default %s=%s' % + (str(k), str(param), str(k), str(v))) + continue + elif v_type == dict: + try: + p = str(param).strip() + param = yaml.load(p) + except Exception: + self._task.log.warning('Failed parsing task parameter %s=%s keeping default %s=%s' % + (str(k), str(param), str(k), str(v))) + elif v_type == type(None): + v_type = str + + try: + dictionary[k] = v_type(param) + except ValueError: + self._task.log.warning('Failed parsing task parameter %s=%s keeping default %s=%s' % + (str(k), str(param), str(k), str(v))) + continue + # add missing parameters to dictionary + for k, v in parameters.items(): + if k not in dictionary: + dictionary[k] = v + + if not isinstance(dictionary, self._ProxyDictReadOnly): + return self._ProxyDictReadOnly(**dictionary) + return dictionary diff --git a/trains/backend_interface/task/development/__init__.py b/trains/backend_interface/task/development/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/trains/backend_interface/task/development/stop_signal.py b/trains/backend_interface/task/development/stop_signal.py new file mode 100644 index 00000000..1d15f22f --- /dev/null +++ b/trains/backend_interface/task/development/stop_signal.py @@ -0,0 +1,48 @@ +from ....config import config +from ....backend_interface import Task, TaskStatusEnum + + +class TaskStopReason(object): + stopped = "stopped" + reset = "reset" + status_changed = "status_changed" + + +class TaskStopSignal(object): + enabled = bool(config.get('development.support_stopping', False)) + + _number_of_consecutive_reset_tests = 4 + + _unexpected_statuses = ( + TaskStatusEnum.closed, + TaskStatusEnum.stopped, + TaskStatusEnum.failed, + TaskStatusEnum.published, + ) + + def __init__(self, task): + assert isinstance(task, Task) + self.task = task + self._task_reset_state_counter = 0 + + def test(self): + status = self.task.status + message = self.task.data.status_message + + if status == TaskStatusEnum.in_progress and "stopping" in message: + return TaskStopReason.stopped + + if status in self._unexpected_statuses and "worker" not in message: + return TaskStopReason.status_changed + + if status == TaskStatusEnum.created: + self._task_reset_state_counter += 1 + + if self._task_reset_state_counter >= self._number_of_consecutive_reset_tests: + return TaskStopReason.reset + + self.task.get_logger().warning( + "Task {} was reset! if state is consistent we shall terminate.".format(self.task.id), + ) + else: + self._task_reset_state_counter = 0 diff --git a/trains/backend_interface/task/development/worker.py b/trains/backend_interface/task/development/worker.py new file mode 100644 index 00000000..306cd247 --- /dev/null +++ b/trains/backend_interface/task/development/worker.py @@ -0,0 +1,26 @@ +from socket import gethostname + +import attr + +from ....config import config, running_remotely, dev_worker_name + + +@attr.s +class DevWorker(object): + prefix = attr.ib(type=str, default="MANUAL:") + + report_period = float(config.get('development.worker.report_period_sec', 30.)) + report_stdout = bool(config.get('development.worker.log_stdout', True)) + + @classmethod + def is_enabled(cls, model_updated=False): + return False + + def status_report(self, timestamp=None): + return True + + def register(self): + return True + + def unregister(self): + return True diff --git a/trains/backend_interface/task/log.py b/trains/backend_interface/task/log.py new file mode 100644 index 00000000..4a91d8b2 --- /dev/null +++ b/trains/backend_interface/task/log.py @@ -0,0 +1,110 @@ +import time +from logging import LogRecord, getLogger, basicConfig +from logging.handlers import BufferingHandler + +from ...backend_api.services import events +from ...config import config + +buffer_capacity = config.get('log.task_log_buffer_capacity', 100) + + +class TaskHandler(BufferingHandler): + __flush_max_history_seconds = 30. + __once = False + + @property + def task_id(self): + return self._task_id + + @task_id.setter + def task_id(self, value): + self._task_id = value + + def __init__(self, session, task_id, capacity=buffer_capacity): + super(TaskHandler, self).__init__(capacity) + self.task_id = task_id + self.session = session + self.last_timestamp = 0 + self.counter = 1 + self._last_event = None + + def shouldFlush(self, record): + """ + Should the handler flush its buffer? + + Returns true if the buffer is up to capacity. This method can be + overridden to implement custom flushing strategies. + """ + + # Notice! protect against infinite loops, i.e. flush while sending previous records + # if self.lock._is_owned(): + # return False + + # if we need to add handlers to the base_logger, + # it will not automatically create stream one when first used, so we must manually configure it. + if not TaskHandler.__once: + base_logger = getLogger() + if len(base_logger.handlers) == 1 and isinstance(base_logger.handlers[0], TaskHandler): + if record.name != 'console' and not record.name.startswith('trains.'): + base_logger.removeHandler(self) + basicConfig() + base_logger.addHandler(self) + TaskHandler.__once = True + else: + TaskHandler.__once = True + + # if we passed the max buffer + if len(self.buffer) >= self.capacity: + return True + + # if the first entry in the log was too long ago. + if len(self.buffer) and (time.time() - self.buffer[0].created) > self.__flush_max_history_seconds: + return True + + return False + + def _record_to_event(self, record): + # type: (LogRecord) -> events.TaskLogEvent + timestamp = int(record.created * 1000) + if timestamp == self.last_timestamp: + timestamp += self.counter + self.counter += 1 + else: + self.last_timestamp = timestamp + self.counter = 1 + + # unite all records in a single second + if self._last_event and timestamp - self._last_event.timestamp < 1000 and \ + record.levelname.lower() == str(self._last_event.level): + # ignore backspaces (they are often used) + self._last_event.msg += '\n' + record.getMessage().replace('\x08', '') + return None + + self._last_event = events.TaskLogEvent( + task=self.task_id, + timestamp=timestamp, + level=record.levelname.lower(), + worker=self.session.worker, + msg=record.getMessage().replace('\x08', '') # ignore backspaces (they are often used) + ) + return self._last_event + + def flush(self): + if not self.buffer: + return + self.acquire() + buffer = self.buffer + try: + if not buffer: + return + self.buffer = [] + record_events = [self._record_to_event(record) for record in buffer] + self._last_event = None + requests = [events.AddRequest(e) for e in record_events if e] + res = self.session.send(events.AddBatchRequest(requests=requests)) + if not res.ok(): + print("Failed logging task to backend ({:d} lines, {})".format(len(buffer), str(res.meta))) + except Exception: + print("Failed logging task to backend ({:d} lines)".format(len(buffer))) + finally: + self.release() diff --git a/trains/backend_interface/task/repo/__init__.py b/trains/backend_interface/task/repo/__init__.py new file mode 100644 index 00000000..40518b2c --- /dev/null +++ b/trains/backend_interface/task/repo/__init__.py @@ -0,0 +1,2 @@ +from .scriptinfo import ScriptInfo +from .freeze import pip_freeze diff --git a/trains/backend_interface/task/repo/detectors.py b/trains/backend_interface/task/repo/detectors.py new file mode 100644 index 00000000..370359a0 --- /dev/null +++ b/trains/backend_interface/task/repo/detectors.py @@ -0,0 +1,248 @@ +import abc +import os +from subprocess import call, CalledProcessError + +import attr +import six +from pathlib2 import Path + +from ....config.defs import ( + VCS_REPO_TYPE, + VCS_DIFF, + VCS_STATUS, + VCS_ROOT, + VCS_BRANCH, + VCS_COMMIT_ID, + VCS_REPOSITORY_URL, +) +from ....debugging import get_logger +from .util import get_command_output + +_logger = get_logger("Repository Detection") + + +class DetectionError(Exception): + pass + + +@attr.s +class Result(object): + """" Repository information as queried by a detector """ + + url = attr.ib(default="") + branch = attr.ib(default="") + commit = attr.ib(default="") + root = attr.ib(default="") + status = attr.ib(default="") + diff = attr.ib(default="") + modified = attr.ib(default=False, type=bool, converter=bool) + + def is_empty(self): + return not any(attr.asdict(self).values()) + + +@six.add_metaclass(abc.ABCMeta) +class Detector(object): + """ Base class for repository detection """ + + """ + Commands are represented using the result class, where each attribute contains + the command used to obtain the value of the same attribute in the actual result. + """ + + @attr.s + class Commands(object): + """" Repository information as queried by a detector """ + + url = attr.ib(default=None, type=list) + branch = attr.ib(default=None, type=list) + commit = attr.ib(default=None, type=list) + root = attr.ib(default=None, type=list) + status = attr.ib(default=None, type=list) + diff = attr.ib(default=None, type=list) + modified = attr.ib(default=None, type=list) + + def __init__(self, type_name, name=None): + self.type_name = type_name + self.name = name or type_name + + def _get_commands(self): + """ Returns a RepoInfo instance containing a command for each info attribute """ + return self.Commands() + + def _get_command_output(self, path, name, command): + """ Run a command and return its output """ + try: + return get_command_output(command, path) + + except (CalledProcessError, UnicodeDecodeError) as ex: + _logger.warning( + "Can't get {} information for {} repo in {}: {}".format( + name, self.type_name, path, str(ex) + ) + ) + return "" + + def _get_info(self, path, include_diff=False): + """ + Get repository information. + :param path: Path to repository + :param include_diff: Whether to include the diff command's output (if available) + :return: RepoInfo instance + """ + path = str(path) + commands = self._get_commands() + if not include_diff: + commands.diff = None + + info = Result( + **{ + name: self._get_command_output(path, name, command) + for name, command in attr.asdict(commands).items() + if command + } + ) + + return info + + def _post_process_info(self, info): + # check if there are uncommitted changes in the current repository + return info + + def get_info(self, path, include_diff=False): + """ + Get repository information. + :param path: Path to repository + :param include_diff: Whether to include the diff command's output (if available) + :return: RepoInfo instance + """ + info = self._get_info(path, include_diff) + return self._post_process_info(info) + + def _is_repo_type(self, script_path): + try: + with open(os.devnull, "wb") as devnull: + return ( + call( + [self.type_name, "status"], + stderr=devnull, + stdout=devnull, + cwd=str(script_path), + ) + == 0 + ) + except CalledProcessError: + _logger.warning("Can't get {} status".format(self.type_name)) + except (OSError, EnvironmentError, IOError): + # File not found or can't be executed + pass + return False + + def exists(self, script_path): + """ + Test whether the given script resides in + a repository type represented by this plugin. + """ + return self._is_repo_type(script_path) + + +class HgDetector(Detector): + def __init__(self): + super(HgDetector, self).__init__("hg") + + def _get_commands(self): + return self.Commands( + url=["hg", "paths", "--verbose"], + branch=["hg", "--debug", "id", "-b"], + commit=["hg", "--debug", "id", "-i"], + root=["hg", "root"], + status=["hg", "status"], + diff=["hg", "diff"], + modified=["hg", "status", "-m"], + ) + + def _post_process_info(self, info): + if info.url: + info.url = info.url.split(" = ")[1] + + if info.commit: + info.commit = info.commit.rstrip("+") + + return info + + +class GitDetector(Detector): + def __init__(self): + super(GitDetector, self).__init__("git") + + def _get_commands(self): + return self.Commands( + url=["git", "remote", "get-url", "origin"], + branch=["git", "rev-parse", "--abbrev-ref", "--symbolic-full-name", "@{u}"], + commit=["git", "rev-parse", "HEAD"], + root=["git", "rev-parse", "--show-toplevel"], + status=["git", "status", "-s"], + diff=["git", "diff"], + modified=["git", "ls-files", "-m"], + ) + + def _post_process_info(self, info): + if info.url and not info.url.endswith(".git"): + info.url += ".git" + + if (info.branch or "").startswith("origin/"): + info.branch = info.branch[len("origin/") :] + + return info + + +class EnvDetector(Detector): + def __init__(self, type_name): + super(EnvDetector, self).__init__(type_name, "{} environment".format(type_name)) + + def _is_repo_type(self, script_path): + return VCS_REPO_TYPE.get(default="").lower() == self.type_name and bool( + VCS_REPOSITORY_URL.get() + ) + + @staticmethod + def _normalize_root(root): + """ + Get the absolute location of the parent folder (where .git resides) + """ + root_parts = list(reversed(Path(root).parts)) + cwd_abs = list(reversed(Path.cwd().parts)) + count = len(cwd_abs) + for i, p in enumerate(cwd_abs): + if i >= len(root_parts): + break + if p == root_parts[i]: + count -= 1 + cwd_abs.reverse() + root_abs_path = Path().joinpath(*cwd_abs[:count]) + return str(root_abs_path) + + def _get_info(self, _, include_diff=False): + repository_url = VCS_REPOSITORY_URL.get() + + if not repository_url: + raise DetectionError("No VCS environment data") + + return Result( + url=repository_url, + branch=VCS_BRANCH.get(), + commit=VCS_COMMIT_ID.get(), + root=VCS_ROOT.get(converter=self._normalize_root), + status=VCS_STATUS.get(), + diff=VCS_DIFF.get(), + ) + + +class GitEnvDetector(EnvDetector): + def __init__(self): + super(GitEnvDetector, self).__init__("git") + + +class HgEnvDetector(EnvDetector): + def __init__(self): + super(HgEnvDetector, self).__init__("hg") diff --git a/trains/backend_interface/task/repo/freeze.py b/trains/backend_interface/task/repo/freeze.py new file mode 100644 index 00000000..77123f40 --- /dev/null +++ b/trains/backend_interface/task/repo/freeze.py @@ -0,0 +1,11 @@ +import sys + +from .util import get_command_output + + +def pip_freeze(): + try: + return get_command_output([sys.executable, "-m", "pip", "freeze"]).splitlines() + except Exception as ex: + print('Failed calling "pip freeze": {}'.format(str(ex))) + return [] diff --git a/trains/backend_interface/task/repo/scriptinfo.py b/trains/backend_interface/task/repo/scriptinfo.py new file mode 100644 index 00000000..ce2a0672 --- /dev/null +++ b/trains/backend_interface/task/repo/scriptinfo.py @@ -0,0 +1,162 @@ +import os +import sys + +import attr +from furl import furl +from pathlib2 import Path + +from ....debugging import get_logger +from .detectors import GitEnvDetector, GitDetector, HgEnvDetector, HgDetector, Result as DetectionResult + +_logger = get_logger("Repository Detection") + + +class ScriptInfoError(Exception): + pass + + +class ScriptInfo(object): + + plugins = [GitEnvDetector(), HgEnvDetector(), HgDetector(), GitDetector()] + """ Script info detection plugins, in order of priority """ + + @classmethod + def _get_jupyter_notebook_filename(cls): + if not sys.argv[0].endswith('/ipykernel_launcher.py') or len(sys.argv) < 3 or not sys.argv[2].endswith('.json'): + return None + + # we can safely assume that we can import the notebook package here + try: + from notebook.notebookapp import list_running_servers + import requests + current_kernel = sys.argv[2].split('/')[-1].replace('kernel-', '').replace('.json', '') + server_info = next(list_running_servers()) + r = requests.get( + url=server_info['url'] + 'api/sessions', + headers={'Authorization': 'token {}'.format(server_info.get('token', '')), }) + r.raise_for_status() + notebooks = r.json() + + cur_notebook = None + for n in notebooks: + if n['kernel']['id'] == current_kernel: + cur_notebook = n + break + + notebook_path = cur_notebook['notebook']['path'] + entry_point_filename = notebook_path.split('/')[-1] + + # now we should try to find the actual file + entry_point = (Path.cwd() / entry_point_filename).absolute() + if not entry_point.is_file(): + entry_point = (Path.cwd() / notebook_path).absolute() + + # now replace the .ipynb with .py + # we assume we will have that file available with the Jupyter notebook plugin + entry_point = entry_point.with_suffix('.py') + + return entry_point.as_posix() + except Exception: + return None + + @classmethod + def _get_entry_point(cls, repo_root, script_path): + repo_root = Path(repo_root).absolute() + + try: + # Use os.path.relpath as it calculates up dir movements (../) + entry_point = os.path.relpath(str(script_path), str(Path.cwd())) + except ValueError: + # Working directory not under repository root + entry_point = script_path.relative_to(repo_root) + + return Path(entry_point).as_posix() + + @classmethod + def _get_working_dir(cls, repo_root): + repo_root = Path(repo_root).absolute() + + try: + return Path.cwd().relative_to(repo_root).as_posix() + except ValueError: + # Working directory not under repository root + return os.path.curdir + + @classmethod + def _get_script_info(cls, filepath, check_uncommitted=False, log=None): + jupyter_filepath = cls._get_jupyter_notebook_filename() + if jupyter_filepath: + script_path = Path(os.path.normpath(jupyter_filepath)).absolute() + else: + script_path = Path(os.path.normpath(filepath)).absolute() + if not script_path.is_file(): + raise ScriptInfoError( + "Script file [{}] could not be found".format(filepath) + ) + + script_dir = script_path.parent + + def _log(msg, *args, **kwargs): + if not log: + return + log.warning( + "Failed auto-detecting task repository: {}".format( + msg.format(*args, **kwargs) + ) + ) + + plugin = next((p for p in cls.plugins if p.exists(script_dir)), None) + repo_info = DetectionResult() + if not plugin: + _log("expected one of: {}", ", ".join((p.name for p in cls.plugins))) + else: + try: + repo_info = plugin.get_info(str(script_dir), include_diff=check_uncommitted) + except Exception as ex: + _log("no info for {} ({})", script_dir, ex) + else: + if repo_info.is_empty(): + _log("no info for {}", script_dir) + + repo_root = repo_info.root or script_dir + working_dir = cls._get_working_dir(repo_root) + entry_point = cls._get_entry_point(repo_root, script_path) + + script_info = dict( + repository=furl(repo_info.url).remove(username=True, password=True).tostr(), + branch=repo_info.branch, + version_num=repo_info.commit, + entry_point=entry_point, + working_dir=working_dir, + diff=repo_info.diff, + ) + + messages = [] + if repo_info.modified: + messages.append( + "======> WARNING! UNCOMMITTED CHANGES IN REPOSITORY {} <======".format( + script_info.get("repository", "") + ) + ) + + if not any(script_info.values()): + script_info = None + + return ScriptInfoResult(script=script_info, warning_messages=messages) + + @classmethod + def get(cls, filepath=sys.argv[0], check_uncommitted=False, log=None): + try: + return cls._get_script_info( + filepath=filepath, check_uncommitted=check_uncommitted, log=log + ) + except Exception as ex: + if log: + log.warning("Failed auto-detecting task repository: {}".format(ex)) + return ScriptInfoResult() + + +@attr.s +class ScriptInfoResult(object): + script = attr.ib(default=None) + warning_messages = attr.ib(factory=list) diff --git a/trains/backend_interface/task/repo/util.py b/trains/backend_interface/task/repo/util.py new file mode 100644 index 00000000..36d0b4bf --- /dev/null +++ b/trains/backend_interface/task/repo/util.py @@ -0,0 +1,12 @@ +import os +from subprocess import check_output + + +def get_command_output(command, path=None): + """ + Run a command and return its output + :raises CalledProcessError: when command execution fails + :raises UnicodeDecodeError: when output decoding fails + """ + with open(os.devnull, "wb") as devnull: + return check_output(command, cwd=path, stderr=devnull).decode().strip() diff --git a/trains/backend_interface/task/task.py b/trains/backend_interface/task/task.py new file mode 100644 index 00000000..dfb7213f --- /dev/null +++ b/trains/backend_interface/task/task.py @@ -0,0 +1,811 @@ +""" Backend task management support """ +import collections +import itertools +import logging +from copy import copy +from six.moves.urllib.parse import urlparse, urlunparse + +import six + +from ...backend_interface.task.development.worker import DevWorker +from ...backend_api import Session +from ...backend_api.services import tasks, models, events, projects +from pathlib2 import Path +from pyhocon import ConfigTree, ConfigFactory + +from ..base import IdObjectBase +from ..metrics import Metrics, Reporter +from ..model import Model +from ..setupuploadmixin import SetupUploadMixin +from ..util import make_message, get_or_create_project, get_single_result, \ + exact_match_regex +from ...config import get_config_for_bucket, get_remote_task_id, TASK_ID_ENV_VAR, get_log_to_backend, \ + running_remotely, get_cache_dir, config_obj +from ...debugging import get_logger +from ...debugging.log import LoggerRoot +from ...storage import StorageHelper +from ...storage.helper import StorageError +from .access import AccessMixin +from .log import TaskHandler +from .repo import ScriptInfo +from ...config import config + +TaskStatusEnum = tasks.TaskStatusEnum + + +class TaskEntry(tasks.CreateRequest): + pass + + +class Task(IdObjectBase, AccessMixin, SetupUploadMixin): + """ Task manager providing task object access and management. Includes read/write access to task-associated + frames and models. + """ + + _anonymous_dataview_id = '__anonymous__' + + def __init__(self, session=None, task_id=None, log=None, project_name=None, + task_name=None, task_type=tasks.TaskTypeEnum.training, log_to_backend=True, + raise_on_validation_errors=True, force_create=False): + """ + Create a new task instance. + :param session: Optional API Session instance. If not provided, a default session based on the system's + configuration will be used. + :type session: Session + :param task_id: Optional task ID. If not provided, a new task will be created using the API + and its information reflected in the resulting instance. + :type task_id: string + :param log: Optional log to be used. If not provided, and internal log shared with all backend objects will be + used instead. + :type log: logging.Logger + :param project_name: Optional project name, used only if a new task is created. The new task will be associated + with a project by this name. If no such project exists, a new project will be created using the API. + :type project_name: str + :param task_name: Optional task name, used only if a new task is created. + :type project_name: str + :param task_type: Optional task type, used only if a new task is created. Default is custom task. + :type project_name: str (see tasks.TaskTypeEnum) + :param log_to_backend: If True, all calls to the task's log will be logged to the backend using the API. + This value can be overridden using the environment variable TRAINS_LOG_TASK_TO_BACKEND. + :type log_to_backend: bool + :param force_create: If True a new task will always be created (task_id, if provided, will be ignored) + :type force_create: bool + """ + task_id = self._resolve_task_id(task_id, log=log) if not force_create else None + super(Task, self).__init__(id=task_id, session=session, log=log) + self._storage_uri = None + self._input_model = None + self._output_model = None + self._metrics_manager = None + self._reporter = None + self._curr_label_stats = {} + self._raise_on_validation_errors = raise_on_validation_errors + self._parameters_allowed_types = ( + six.string_types + six.integer_types + (six.text_type, float, list, dict, type(None)) + ) + + if not task_id: + # generate a new task + self.id = self._auto_generate(project_name=project_name, task_name=task_name, task_type=task_type) + else: + # this is an existing task, let's try to verify stuff + self._validate() + + if running_remotely() or DevWorker.report_stdout: + log_to_backend = False + self._log_to_backend = log_to_backend + self._setup_log(default_log_to_backend=log_to_backend) + + def _setup_log(self, default_log_to_backend=None, replace_existing=False): + """ + Setup logging facilities for this task. + :param default_log_to_backend: Should this task log to the backend. If not specified, value for this option + will be obtained from the environment, with this value acting as a default in case configuration for this is + missing. + If the value for this option is false, we won't touch the current logger configuration regarding TaskHandler(s) + :param replace_existing: If True and another task is already logging to the backend, replace the handler with + a handler for this task. + """ + # Make sure urllib is never in debug/info, + disable_urllib3_info = config.get('log.disable_urllib3_info', True) + if disable_urllib3_info and logging.getLogger('urllib3').isEnabledFor(logging.INFO): + logging.getLogger('urllib3').setLevel(logging.WARNING) + + log_to_backend = get_log_to_backend(default=default_log_to_backend) or self._log_to_backend + if not log_to_backend: + return + + # Handle the root logger and our own logger. We use set() to make sure we create no duplicates + # in case these are the same logger... + loggers = {logging.getLogger(), LoggerRoot.get_base_logger()} + + # Find all TaskHandler handlers for these loggers + handlers = {logger: h for logger in loggers for h in logger.handlers if isinstance(h, TaskHandler)} + + if handlers and not replace_existing: + # Handlers exist and we shouldn't replace them + return + + # Remove all handlers, we'll add new ones + for logger, handler in handlers.items(): + logger.removeHandler(handler) + + # Create a handler that will be used in all loggers. Since our handler is a buffering handler, using more + # than one instance to report to the same task will result in out-of-order log reports (grouped by whichever + # handler instance handled them) + backend_handler = TaskHandler(self.session, self.task_id) + + # Add backend handler to both loggers: + # 1. to root logger root logger + # 2. to our own logger as well, since our logger is not propagated to the root logger + # (if we propagate our logger will be caught be the root handlers as well, and + # we do not want that) + for logger in loggers: + logger.addHandler(backend_handler) + + def _validate(self, check_output_dest_credentials=True): + raise_errors = self._raise_on_validation_errors + output_dest = self.get_output_destination(raise_on_error=False, log_on_error=False) + if output_dest and check_output_dest_credentials: + try: + self.log.info('Validating output destination') + conf = get_config_for_bucket(base_url=output_dest) + if not conf: + msg = 'Failed resolving output destination (no credentials found for %s)' % output_dest + self.log.warn(msg) + if raise_errors: + raise Exception(msg) + else: + StorageHelper._test_bucket_config(conf=conf, log=self.log, raise_on_error=raise_errors) + except StorageError: + raise + except Exception as ex: + self.log.error('Failed trying to verify output destination: %s' % ex) + + @classmethod + def _resolve_task_id(cls, task_id, log=None): + if not task_id: + task_id = cls.normalize_id(get_remote_task_id()) + if task_id: + log = log or get_logger('task') + log.info('Using task ID from env %s=%s' % (TASK_ID_ENV_VAR[0], task_id)) + return task_id + + def _update_repository(self): + result = ScriptInfo.get(log=self.log) + for msg in result.warning_messages: + self.get_logger().console(msg) + + self.data.script = result.script + # Since we might run asynchronously, don't use self.data (lest someone else + # overwrite it before we have a chance to call edit) + self._edit(script=result.script) + + def _auto_generate(self, project_name=None, task_name=None, task_type=tasks.TaskTypeEnum.training): + created_msg = make_message('Auto-generated at %(time)s by %(user)s@%(host)s') + + project_id = None + if project_name: + project_id = get_or_create_project(self, project_name, created_msg) + + tags = ['development'] if not running_remotely() else [] + + req = tasks.CreateRequest( + name=task_name or make_message('Anonymous task (%(user)s@%(host)s %(time)s)'), + type=task_type, + comment=created_msg, + project=project_id, + input={'view': {}}, + tags=tags, + ) + res = self.send(req) + + return res.response.id + + def _set_storage_uri(self, value): + value = value.rstrip('/') + self._storage_uri = StorageHelper.conform_url(value) + self.data.output.destination = self._storage_uri + self._edit(output_dest=self._storage_uri) + self.output_model.upload_storage_uri = self._storage_uri + + @property + def storage_uri(self): + if self._storage_uri: + return self._storage_uri + if running_remotely(): + return self.data.output.destination + else: + return None + + @storage_uri.setter + def storage_uri(self, value): + self._set_storage_uri(value) + + @property + def task_id(self): + return self.id + + @property + def name(self): + return self.data.name + + @property + def task_type(self): + return self.data.type + + @property + def project(self): + return self.data.project + + @property + def input_model_id(self): + return self.data.execution.model + + @property + def output_model_id(self): + return self.data.output.model + + @property + def comment(self): + return self.data.comment + + @property + def cache_dir(self): + """ Cache dir used to store task related files """ + return Path(get_cache_dir()) / self.id + + @property + def status(self): + """ The task's status. In order to stay updated, we always reload the task info when this value is accessed. """ + self.reload() + return self._status + + @property + def _status(self): + """ Return the task's cached status (don't reload if we don't have to) """ + return self.data.status + + @property + def input_model(self): + """ A model manager used to handle the input model object """ + model_id = self._get_task_property('execution.model', raise_on_error=False) + if not model_id: + return None + if self._input_model is None: + self._input_model = Model( + session=self.session, + model_id=model_id, + cache_dir=self.cache_dir, + log=self.log, + upload_storage_uri=None) + return self._input_model + + @property + def output_model(self): + """ A model manager used to manage the output model object """ + if self._output_model is None: + self._output_model = self._get_output_model(upload_required=True) + return self._output_model + + def create_output_model(self): + return self._get_output_model(upload_required=False, force=True) + + def _get_output_model(self, upload_required=True, force=False): + return Model( + session=self.session, + model_id=None if force else self._get_task_property( + 'output.model', raise_on_error=False, log_on_error=False), + cache_dir=self.cache_dir, + upload_storage_uri=self.storage_uri or self.get_output_destination( + raise_on_error=upload_required, log_on_error=upload_required), + upload_storage_suffix=self._get_output_destination_suffix('models'), + log=self.log) + + @property + def metrics_manager(self): + """ A metrics manager used to manage the metrics related to this task """ + return self._get_metrics_manager(self.get_output_destination()) + + @property + def reporter(self): + """ + Returns a simple metrics reporter instance + """ + if self._reporter is None: + try: + storage_uri = self.get_output_destination(log_on_error=False) + except ValueError: + storage_uri = None + self._reporter = Reporter(self._get_metrics_manager(storage_uri=storage_uri)) + return self._reporter + + def _get_metrics_manager(self, storage_uri): + if self._metrics_manager is None: + self._metrics_manager = Metrics( + session=self.session, + task_id=self.id, + storage_uri=storage_uri, + storage_uri_suffix=self._get_output_destination_suffix('metrics') + ) + return self._metrics_manager + + def _get_output_destination_suffix(self, extra_path=None): + return '/'.join(x for x in ('task_%s' % self.data.id, extra_path) if x) + + def _reload(self): + """ Reload the task object from the backend """ + res = self.send(tasks.GetByIdRequest(task=self.id)) + return res.response.task + + def reset(self, set_started_on_success=True): + """ Reset the task. Task will be reloaded following a successful reset. """ + self.send(tasks.ResetRequest(task=self.id)) + if set_started_on_success: + self.started() + self.reload() + + def started(self, ignore_errors=True): + """ Signal that this task has started """ + return self.send(tasks.StartedRequest(self.id), ignore_errors=ignore_errors) + + def stopped(self, ignore_errors=True): + """ Signal that this task has stopped """ + return self.send(tasks.StoppedRequest(self.id), ignore_errors=ignore_errors) + + def mark_failed(self, ignore_errors=True, status_reason=None, status_message=None): + """ Signal that this task has stopped """ + return self.send(tasks.FailedRequest(self.id, status_reason=status_reason, status_message=status_message), + ignore_errors=ignore_errors) + + def publish(self, ignore_errors=True): + """ Signal that this task will be published """ + if self.status != tasks.TaskStatusEnum.stopped: + raise ValueError("Can't publish, Task is not stopped") + resp = self.send(tasks.PublishRequest(self.id), ignore_errors=ignore_errors) + assert isinstance(resp.response, tasks.PublishResponse) + return resp + + def update_model_desc(self, new_model_desc_file=None): + """ Change the task's model_desc """ + execution = self._get_task_property('execution') + p = Path(new_model_desc_file) + if not p.is_file(): + raise IOError('mode_desc file %s cannot be found' % new_model_desc_file) + new_model_desc = p.read_text() + model_desc_key = list(execution.model_desc.keys())[0] if execution.model_desc else 'design' + execution.model_desc[model_desc_key] = new_model_desc + + res = self._edit(execution=execution) + return res.response + + def update_output_model(self, model_uri, name=None, comment=None, tags=None): + """ + Update the task's output model. + Note that this method only updates the model's metadata using the API and does not upload any data. Use this + method to update the output model when you have a local model URI (e.g. storing the weights file locally and + providing a file://path/to/file URI) + :param model_uri: URI for the updated model weights file + :type model_uri: str + :param name: Optional updated model name + :type name: str + :param comment: Optional updated model description + :type comment: str + :param tags: Optional updated model tags + :type tags: [str] + """ + self._conditionally_start_task() + self._get_output_model(upload_required=False).update_for_task(model_uri, self.id, name, comment, tags) + + def update_output_model_and_upload( + self, model_file, name=None, comment=None, tags=None, async_enable=False, cb=None, iteration=None): + """ + Update the task's output model weights file. File is first uploaded to the preconfigured output destination (see + task's output.destination property or call setup_upload()), than the model object associated with the task is + updated using an API call with the URI of the uploaded file (and other values provided by additional arguments) + :param model_file: Path to the updated model weights file + :type model_file: str + :param name: Optional updated model name + :type name: str + :param comment: Optional updated model description + :type comment: str + :param tags: Optional updated model tags + :type tags: [str] + :param async_enable: Request asynchronous upload. If False, the call blocks until upload is completed and the + API call updating the model returns. If True, the call returns immediately, while upload and update are + scheduled in another thread. Default is False. + :type async_enable: bool + :param cb: Asynchronous callback. If async=True, this callback will be invoked once the asynchronous upload and + update have completed. + :return: The URI of the uploaded weights file. If async=True, this is the expected URI as the upload is + probably still in progress. + """ + self._conditionally_start_task() + uri = self.output_model.update_for_task_and_upload( + model_file, self.id, name=name, comment=comment, tags=tags, async_enable=async_enable, cb=cb, + iteration=iteration + ) + return uri + + def _conditionally_start_task(self): + if self.status == TaskStatusEnum.created: + self.started() + + @property + def labels_stats(self): + """ Get accumulated label stats for the current/last frames iteration """ + return self._curr_label_stats + + def _accumulate_label_stats(self, roi_stats, reset=False): + if reset: + self._curr_label_stats = {} + for label in roi_stats: + if label in self._curr_label_stats: + self._curr_label_stats[label] += roi_stats[label] + else: + self._curr_label_stats[label] = roi_stats[label] + + def set_input_model(self, model_id=None, model_name=None, update_task_design=True, update_task_labels=True): + """ + Set a new input model for this task. Model must be 'ready' in order to be used as the Task's input model. + :param model_id: ID for a model that exists in the backend. Required if model_name is not provided. + :param model_name: Model name. Required if model_id is not provided. If provided, this name will be used to + locate an existing model in the backend. + :param update_task_design: if True, the task's model design will be copied from the input model + :param update_task_labels: if True, the task's label enumeration will be copied from the input model + """ + if model_id is None and not model_name: + raise ValueError('Expected one of [model_id, model_name]') + + if model_name: + # Try getting the model by name. Limit to 10 results. + res = self.send( + models.GetAllRequest( + name=exact_match_regex(model_name), + ready=True, + page=0, + page_size=10, + order_by='-created', + only_fields=['id'] + ) + ) + model = get_single_result(entity='model', query=model_name, results=res.response.models, log=self.log) + model_id = model.id + + if model_id: + res = self.send(models.GetByIdRequest(model=model_id)) + model = res.response.model + if not model.ready: + # raise ValueError('Model %s is not published (not ready)' % model_id) + self.log.debug('Model %s [%s] is not published yet (not ready)' % (model_id, model.uri)) + else: + # clear the input model + model = None + model_id = '' + + # store model id + self.data.execution.model = model_id + + # Auto populate input field from model, if they are empty + if update_task_design and not self.data.execution.model_desc: + self.data.execution.model_desc = model.design if model else '' + if update_task_labels and not self.data.execution.model_labels: + self.data.execution.model_labels = model.labels if model else {} + + self._edit(execution=self.data.execution) + + def set_parameters(self, *args, **kwargs): + """ + Set parameters for this task. This allows setting a complete set of key/value parameters, but does not support + parameter descriptions (as the input is a dictionary or key/value pairs. + :param args: Positional arguments (one or more dictionary or (key, value) iterable). These will be merged into + a single key/value dictionary. + :param kwargs: Key/value pairs, merged into the parameters dictionary created from `args`. + """ + if not all(isinstance(x, (dict, collections.Iterable)) for x in args): + raise ValueError('only dict or iterable are supported as positional arguments') + + update = kwargs.pop('__update', False) + + parameters = dict() if not update else self.get_parameters() + parameters.update(itertools.chain.from_iterable(x.items() if isinstance(x, dict) else x for x in args)) + parameters.update(kwargs) + + not_allowed = { + k: type(v).__name__ + for k, v in parameters.items() + if not isinstance(v, self._parameters_allowed_types) + } + if not_allowed: + raise ValueError( + "Only builtin types ({}) are allowed for values (got {})".format( + ', '.join(t.__name__ for t in self._parameters_allowed_types), + ', '.join('%s=>%s' % p for p in not_allowed.items())), + ) + + # force cast all variables to strings (so that we can later edit them in UI) + parameters = {k: str(v) if v is not None else "" for k, v in parameters.items()} + + execution = self.data.execution + if execution is None: + execution = tasks.Execution(parameters=parameters) + else: + execution.parameters = parameters + self._edit(execution=execution) + + def set_parameter(self, name, value, description=None): + """ + Set a single task parameter. This overrides any previous value for this parameter. + :param name: Parameter name + :param value: Parameter value + :param description: Parameter description (unused for now) + """ + params = self.get_parameters() + params[name] = value + self.set_parameters(params) + + def get_parameter(self, name, default=None): + """ + Get a value for a parameter. + :param name: Parameter name + :param default: Default value + :return: Parameter value (or default value if parameter is not defined) + """ + params = self.get_parameters() + return params.get(name, default) + + def update_parameters(self, *args, **kwargs): + """ + Update parameters for this task. + + This allows updating a complete set of key/value parameters,but does not support + parameter descriptions (as the input is a dictionary or key/value pairs. + + :param args: Positional arguments (one or more dictionary or (key, value) iterable). These will be merged into + a single key/value dictionary. + :param kwargs: Key/value pairs, merged into the parameters dictionary created from `args`. + """ + self.set_parameters(__update=True, *args, **kwargs) + + def set_model_label_enumeration(self, enumeration=None): + enumeration = enumeration or {} + execution = self.data.execution + if enumeration is None: + return + if not (isinstance(enumeration, dict) + and all(isinstance(k, six.string_types) and isinstance(v, int) for k, v in enumeration.items())): + raise ValueError('Expected label to be a dict[str => int]') + execution.model_labels = enumeration + self._edit(execution=execution) + + def _set_model_design(self, design=None): + execution = self.data.execution + if design is not None: + execution.model_desc = Model._wrap_design(design) + + self._edit(execution=execution) + + def get_labels_enumeration(self): + """ + Return a dictionary of labels (text) to ids (integers) {str(label): integer(id)} + :return: + """ + if not self.data or not self.data.execution: + return {} + return self.data.execution.model_labels + + def get_model_design(self): + """ + Returns the model configuration as blob of text + :return: + """ + design = self._get_task_property("execution.model_desc", default={}, raise_on_error=False, log_on_error=False) + return Model._unwrap_design(design) + + def set_output_model_id(self, model_id): + self.data.output.model = str(model_id) + self._edit(output=self.data.output) + + def get_random_seed(self): + # fixed seed for the time being + return 1337 + + def set_random_seed(self, random_seed): + # fixed seed for the time being + pass + + def set_project(self, project_id): + assert isinstance(project_id, six.string_types) + self._set_task_property("project", project_id) + self._edit(project=project_id) + + def get_project_name(self): + if self.project is None: + return None + + res = self.send(projects.GetByIdRequest(project=self.project), raise_on_errors=False) + return res.response.project.name + + def get_tags(self): + return self._get_task_property("tags") + + def set_tags(self, tags): + assert isinstance(tags, (list, tuple)) + self._set_task_property("tags", tags) + self._edit(tags=self.data.tags) + + def _get_default_report_storage_uri(self): + app_host = self._get_app_server() + parsed = urlparse(app_host) + if parsed.port: + parsed = parsed._replace(netloc=parsed.netloc.replace(':%d' % parsed.port, ':8081')) + else: + parsed = parsed._replace(netloc=parsed.netloc+':8081') + return urlunparse(parsed) + + def _get_app_server(self): + host = config_obj.get('api.host') + if '://demoapi.' in host: + return host.replace('://demoapi.', '://demoapp.') + if '://api.' in host: + return host.replace('://api.', '://app.') + + parsed = urlparse(host) + if parsed.port == 8008: + return host.replace(':8008', ':8080') + + def _edit(self, **kwargs): + # Since we ae using forced update, make sure he task status is valid + if not self._data or (self.data.status not in (TaskStatusEnum.created, TaskStatusEnum.in_progress)): + raise ValueError('Task object can only be updated if created or in_progress') + + res = self.send(tasks.EditRequest(task=self.id, force=True, **kwargs), raise_on_errors=False) + return res + + @classmethod + def create_new_task(cls, session, task_entry, log=None): + """ + Create a new task + :param session: Session object used for sending requests to the API + :type session: Session + :param task_entry: A task entry instance + :type task_entry: TaskEntry + :param log: Optional log + :type log: logging.Logger + :return: A new Task instance + """ + if isinstance(task_entry, dict): + task_entry = TaskEntry(**task_entry) + + assert isinstance(task_entry, TaskEntry) + res = cls._send(session=session, req=task_entry, log=log) + return cls(session, task_id=res.response.id) + + @classmethod + def clone_task(cls, cloned_task_id, name, comment=None, execution_overrides=None, + tags=None, parent=None, project=None, log=None, session=None): + """ + Clone a task + :param session: Session object used for sending requests to the API + :type session: Session + :param cloned_task_id: Task ID for the task to be cloned + :type cloned_task_id: str + :param name: New for the new task + :type name: str + :param comment: Optional comment for the new task + :type comment: str + :param execution_overrides: Task execution overrides. Applied over the cloned task's execution + section, useful for overriding values in the cloned task. + :type execution_overrides: dict + :param tags: Optional updated model tags + :type tags: [str] + :param parent: Optional parent ID of the new task. + :type parent: str + :param project: Optional project ID of the new task. + If None, the new task will inherit the cloned task's project. + :type parent: str + :param log: Log object used by the infrastructure. + :type log: logging.Logger + :return: The new tasks's ID + """ + + session = session if session else cls._get_default_session() + + res = cls._send(session=session, log=log, req=tasks.GetByIdRequest(task=cloned_task_id)) + task = res.response.task + output_dest = None + if task.output: + output_dest = task.output.destination + execution = task.execution.to_dict() if task.execution else {} + execution = ConfigTree.merge_configs(ConfigFactory.from_dict(execution), + ConfigFactory.from_dict(execution_overrides or {})) + req = tasks.CreateRequest( + name=name, + type=task.type, + input=task.input, + tags=tags if tags is not None else task.tags, + comment=comment or task.comment, + parent=parent, + project=project if project else task.project, + output_dest=output_dest, + execution=execution.as_plain_ordered_dict(), + script=task.script + ) + res = cls._send(session=session, log=log, req=req) + return res.response.id + + @classmethod + def enqueue_task(cls, task_id, session=None, queue_id=None, log=None): + """ + Enqueue a task for execution + :param session: Session object used for sending requests to the API + :type session: Session + :param task_id: ID of the task to be enqueued + :type task_id: str + :param queue_id: ID of the queue in which to enqueue the task. If not provided, the default queue will be used. + :type queue_id: str + :param log: Log object + :type log: logging.Logger + :return: enqueue response + """ + assert isinstance(task_id, six.string_types) + req = tasks.EnqueueRequest(task=task_id, queue=queue_id) + res = cls._send(session=session, req=req, log=log) + resp = res.response + return resp + + @classmethod + def get_all(cls, session, log=None, **kwargs): + """ + Get all tasks + :param session: Session object used for sending requests to the API + :type session: Session + :param log: Log object + :type log: logging.Logger + :param kwargs: Keyword args passed to the GetAllRequest (see .backend_api.services.tasks.GetAllRequest) + :type kwargs: dict + :return: API response + """ + req = tasks.GetAllRequest(**kwargs) + res = cls._send(session=session, req=req, log=log) + return res + + @classmethod + def get_by_name(cls, task_name): + res = cls._send(cls._get_default_session(), tasks.GetAllRequest(name=exact_match_regex(task_name))) + + task = get_single_result(entity='task', query=task_name, results=res.response.tasks) + return cls(task_id=task.id) + + def _get_all_events(self, max_events=100): + """ + Get a list of all reported events. + + Warning: Debug only. Do not use outside of testing. + + :param max_events: The maximum events the function will return. Pass None + to return all the reported events. + :return: A list of events from the task. + """ + + log_events = self.send(events.GetTaskEventsRequest( + task=self.id, + order='asc', + batch_size=max_events, + )) + + events_list = log_events.response.events + total_events = log_events.response.total + scroll = log_events.response.scroll_id + + while len(events_list) < total_events and (max_events is None or len(events_list) < max_events): + log_events = self.send(events.GetTaskEventsRequest( + task=self.id, + order='asc', + batch_size=max_events, + scroll_id=scroll, + )) + events_list.extend(log_events.response.events) + scroll = log_events.response.scroll_id + + return events_list diff --git a/trains/backend_interface/util.py b/trains/backend_interface/util.py new file mode 100644 index 00000000..81b938f7 --- /dev/null +++ b/trains/backend_interface/util.py @@ -0,0 +1,77 @@ +import getpass +import re +from _socket import gethostname +from datetime import datetime + +from ..backend_api.services import projects +from ..debugging.log import get_logger + + +def make_message(s, **kwargs): + args = dict( + user=getpass.getuser(), + host=gethostname(), + time=datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') + ) + args.update(kwargs) + return s % args + + +def get_or_create_project(session, project_name, description=None): + res = session.send(projects.GetAllRequest(name=exact_match_regex(project_name))) + if res.response.projects: + return res.response.projects[0].id + res = session.send(projects.CreateRequest(name=project_name, description=description)) + return res.response.id + + +def get_single_result(entity, query, results, log=None, show_results=10, raise_on_error=True): + if not results: + if not raise_on_error: + return None + + raise ValueError('No {entity}s found when searching for `{query}`'.format(**locals())) + + if not log: + log = get_logger() + + if len(results) > 1: + log.warn('More than one {entity} found when searching for `{query}`' + ' (showing first {show_results} {entity}s follow)'.format(**locals())) + for obj in (o if isinstance(o, dict) else o.to_dict() for o in results[:show_results]): + log.warn('Found {entity} `{obj[name]}` (id={obj[id]})'.format(**locals())) + + if raise_on_error: + raise ValueError('More than one {entity}s found when searching for ``{query}`'.format(**locals())) + + return results[0] + + +def at_least_one(_exception_cls=Exception, **kwargs): + actual = [k for k, v in kwargs.items() if v] + if len(actual) < 1: + raise _exception_cls('At least one of (%s) is required' % ', '.join(kwargs.keys())) + + +def mutually_exclusive(_exception_cls=Exception, _require_at_least_one=True, **kwargs): + """ Helper for checking mutually exclusive options """ + actual = [k for k, v in kwargs.items() if v] + if _require_at_least_one: + at_least_one(_exception_cls=_exception_cls, **kwargs) + if len(actual) > 1: + raise _exception_cls('Only one of (%s) is allowed' % ', '.join(kwargs.keys())) + + +def validate_dict(obj, key_types, value_types, desc=''): + if not isinstance(obj, dict): + raise ValueError('%sexpected a dictionary' % ('%s: ' % desc if desc else '')) + if not all(isinstance(l, key_types) for l in obj.keys()): + raise ValueError('%skeys must all be strings' % ('%s ' % desc if desc else '')) + if not all(isinstance(l, value_types) for l in obj.values()): + raise ValueError('%svalues must all be integers' % ('%s ' % desc if desc else '')) + + +def exact_match_regex(name): + """ Convert string to a regex representing an exact match """ + return '^%s$' % re.escape(name) + diff --git a/trains/config/__init__.py b/trains/config/__init__.py new file mode 100644 index 00000000..edba8a74 --- /dev/null +++ b/trains/config/__init__.py @@ -0,0 +1,64 @@ +""" Configuration module. Uses backend_config to load system configuration. """ +import logging +from os.path import expandvars, expanduser + +from ..backend_api import load_config +from ..backend_config.bucket_config import S3BucketConfigurations + +from .defs import * +from .remote import running_remotely_task_id as _running_remotely_task_id + +config_obj = load_config(Path(__file__).parent) +config_obj.initialize_logging() +config = config_obj.get("sdk") +""" Configuration object reflecting the merged SDK section of all available configuration files """ + + +def get_cache_dir(): + cache_base_dir = Path( + expandvars( + expanduser( + config.get("storage.cache.default_base_dir") or DEFAULT_CACHE_DIR + ) + ) + ) + return cache_base_dir + + +def get_config_for_bucket(base_url, extra_configurations=None): + config_list = S3BucketConfigurations.from_config(config.get("aws.s3")) + + for configuration in extra_configurations or []: + config_list.add_config(configuration) + + return config_list.get_config_by_uri(base_url) + + +def get_remote_task_id(): + return None + + +def running_remotely(): + return False + + +def get_log_to_backend(default=None): + return LOG_TO_BACKEND_ENV_VAR.get(default=default) + + +def get_node_id(default=0): + return NODE_ID_ENV_VAR.get(default=default) + + +def get_log_redirect_level(): + """ Returns which log level (and up) should be redirected to stderr. None means no redirection. """ + value = LOG_STDERR_REDIRECT_LEVEL.get() + try: + if value: + return logging._checkLevel(value) + except (ValueError, TypeError): + pass + + +def dev_worker_name(): + return DEV_WORKER_NAME.get() diff --git a/trains/config/cache.py b/trains/config/cache.py new file mode 100644 index 00000000..ffec70e7 --- /dev/null +++ b/trains/config/cache.py @@ -0,0 +1,40 @@ +import json +from . import get_cache_dir +from .defs import SESSION_CACHE_FILE + + +class SessionCache(object): + """ + Handle SDK session cache. + TODO: Improve error handling to something like "except (FileNotFoundError, PermissionError, JSONDecodeError)" + TODO: that's both six-compatible and tested + """ + @classmethod + def _load_cache(cls): + try: + with (get_cache_dir() / SESSION_CACHE_FILE).open("rt") as fp: + return json.load(fp) + except Exception: + return {} + + @classmethod + def _store_cache(cls, cache): + try: + get_cache_dir().mkdir(parents=True, exist_ok=True) + with (get_cache_dir() / SESSION_CACHE_FILE).open("wt") as fp: + json.dump(cache, fp) + except Exception: + pass + + @classmethod + def store_dict(cls, unique_cache_name, dict_object): + # type: (str, dict) -> None + cache = cls._load_cache() + cache[unique_cache_name] = dict_object + cls._store_cache(cache) + + @classmethod + def load_dict(cls, unique_cache_name): + # type: (str) -> dict + cache = cls._load_cache() + return cache.get(unique_cache_name, {}) if cache else {} diff --git a/trains/config/default/__init__.py b/trains/config/default/__init__.py new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/trains/config/default/__init__.py @@ -0,0 +1 @@ + diff --git a/trains/config/default/__main__.py b/trains/config/default/__main__.py new file mode 100644 index 00000000..83315b18 --- /dev/null +++ b/trains/config/default/__main__.py @@ -0,0 +1,132 @@ +from pyhocon import ConfigFactory +from pathlib2 import Path +from six.moves.urllib.parse import urlparse, urlunparse + +from trains.backend_api.session.defs import ENV_HOST +from trains.backend_config.defs import LOCAL_CONFIG_FILES +from trains.config import config_obj + + +description = """ +Please create new key/secrete credentials using {}/admin + +Copy/Paste credentials here: """ + +try: + def_host = ENV_HOST.get(default=config_obj.get("api.host")) +except Exception: + def_host = 'http://localhost:8080' + +host_description = """ +Editing configuration file: {CONFIG_FILE} +Enter your trains-server host [{HOST}]: """.format( + CONFIG_FILE=LOCAL_CONFIG_FILES[0], + HOST=def_host, +) + + +def main(): + print('TRAINS SDK setup process') + conf_file = Path(LOCAL_CONFIG_FILES[0]).absolute() + if conf_file.exists() and conf_file.is_file() and conf_file.stat().st_size > 0: + print('Configuration file already exists: {}'.format(str(conf_file))) + print('Leaving setup, feel free to edit the configuration file.') + return + + print(host_description, end='') + parsed_host = None + while not parsed_host: + parse_input = input() + if not parse_input: + parse_input = def_host + try: + parsed_host = urlparse(parse_input) + if parsed_host.scheme not in ('http', 'https'): + parsed_host = None + except Exception: + parsed_host = None + print('Could not parse url {}\nEnter your trains-server host: '.format(parse_input), end='') + + if parsed_host.port == 8080: + # this is a docker 8080 is the web address, we need the api address, it is 8008 + print('Port 8080 is the web port, we need the api port. Replacing 8080 with 8008') + api_host = parsed_host.scheme + "://" + parsed_host.netloc.replace(':8080', ':8008') + parsed_host.path + web_host = parsed_host.scheme + "://" + parsed_host.netloc + parsed_host.path + elif parsed_host.netloc.startswith('demoapp.'): + print('{} is the web server, we need the api server. Replacing \'demoapp.\' with \'demoapi.\''.format( + parsed_host.netloc)) + # this is our demo server + api_host = parsed_host.scheme + "://" + parsed_host.netloc.replace('demoapp.', 'demoapi.') + parsed_host.path + web_host = parsed_host.scheme + "://" + parsed_host.netloc + parsed_host.path + elif parsed_host.netloc.startswith('app.'): + print('{} is the web server, we need the api server. Replacing \'app.\' with \'api.\''.format( + parsed_host.netloc)) + # this is our application server + api_host = parsed_host.scheme + "://" + parsed_host.netloc.replace('app.', 'api.') + parsed_host.path + web_host = parsed_host.scheme + "://" + parsed_host.netloc + parsed_host.path + elif parsed_host.port == 8008: + api_host = parsed_host.scheme + "://" + parsed_host.netloc + parsed_host.path + web_host = parsed_host.scheme + "://" + parsed_host.netloc.replace(':8008', ':8080') + parsed_host.path + elif parsed_host.netloc.startswith('demoapi.'): + api_host = parsed_host.scheme + "://" + parsed_host.netloc + parsed_host.path + web_host = parsed_host.scheme + "://" + parsed_host.netloc.replace('demoapi.', 'demoapp.') + parsed_host.path + elif parsed_host.netloc.startswith('api.'): + api_host = parsed_host.scheme + "://" + parsed_host.netloc + parsed_host.path + web_host = parsed_host.scheme + "://" + parsed_host.netloc.replace('api.', 'app.') + parsed_host.path + else: + api_host = parsed_host.scheme + "://" + parsed_host.netloc + parsed_host.path + web_host = parsed_host.scheme + "://" + parsed_host.netloc + parsed_host.path + + print('Host configured to: {}'.format(api_host)) + + print(description.format(web_host), end='') + parse_input = input() + # check if these are valid credentials + credentials = None + try: + parsed = ConfigFactory.parse_string(parse_input) + if parsed: + credentials = parsed.get("credentials", None) + except Exception: + credentials = None + + if not credentials or set(credentials) != {"access_key", "secret_key"}: + print('Could not parse user credentials, try again one after the other.') + credentials = {} + # parse individual + print('Enter user access key: ', end='') + credentials['access_key'] = input() + print('Enter user secret: ', end='') + credentials['secret_key'] = input() + + print('Detected credentials key=\"{}\" secret=\"{}\"'.format(credentials['access_key'], + credentials['secret_key'], )) + + try: + default_sdk_conf = Path(__file__).parent.absolute() / 'sdk.conf' + with open(str(default_sdk_conf), 'rt') as f: + default_sdk = f.read() + except Exception: + print('Error! Could not read default configuration file') + return + + try: + with open(str(conf_file), 'wt') as f: + header = '# TRAINS SDK configuration file\n' \ + 'api {\n' \ + ' host: %s\n' \ + ' credentials {"access_key": "%s", "secret_key": "%s"}\n' \ + '}\n' \ + 'sdk ' % (api_host, credentials['access_key'], credentials['secret_key']) + f.write(header) + f.write(default_sdk) + except Exception: + print('Error! Could not write configuration file at: {}'.format(str(conf_file))) + return + + print('\nNew configuration stored in {}'.format(str(conf_file))) + print('TRAINS setup completed successfully.') + + +if __name__ == '__main__': + main() diff --git a/trains/config/default/logging.conf b/trains/config/default/logging.conf new file mode 100644 index 00000000..6b3cb071 --- /dev/null +++ b/trains/config/default/logging.conf @@ -0,0 +1,27 @@ +{ + version: 1 + disable_existing_loggers: 0 + loggers { + trains { + level: INFO + } + boto { + level: WARNING + } + "boto.perf" { + level: WARNING + } + botocore { + level: WARNING + } + boto3 { + level: WARNING + } + google { + level: WARNING + } + urllib3 { + level: WARNING + } + } +} \ No newline at end of file diff --git a/trains/config/default/sdk.conf b/trains/config/default/sdk.conf new file mode 100644 index 00000000..cec8ece8 --- /dev/null +++ b/trains/config/default/sdk.conf @@ -0,0 +1,126 @@ +{ + # TRAINS - default SDK configuration + + storage { + cache { + # Defaults to system temp folder / cache + default_base_dir: "~/.trains/cache" + } + } + + metrics { + # History size for debug files per metric/variant. For each metric/variant combination with an attached file + # (e.g. debug image event), file names for the uploaded files will be recycled in such a way that no more than + # X files are stored in the upload destination for each metric/variant combination. + file_history_size: 100 + + # Settings for generated debug images + images { + format: JPEG + quality: 87 + subsampling: 0 + } + } + + network { + metrics { + # Number of threads allocated to uploading files (typically debug images) when transmitting metrics for + # a specific iteration + file_upload_threads: 4 + + # Warn about upload starvation if no uploads were made in specified period while file-bearing events keep + # being sent for upload + file_upload_starvation_warning_sec: 120 + } + + iteration { + # Max number of retries when getting frames if the server returned an error (http code 500) + max_retries_on_server_error: 5 + # Backoff factory for consecutive retry attempts. + # SDK will wait for {backoff factor} * (2 ^ ({number of total retries} - 1)) between retries. + retry_backoff_factor_sec: 10 + } + } + aws { + s3 { + # S3 credentials, used for read/write access by various SDK elements + + # default, used for any bucket not specified below + key: "" + secret: "" + region: "" + + credentials: [ + # specifies key/secret credentials to use when handling s3 urls (read or write) + # { + # bucket: "my-bucket-name" + # key: "my-access-key" + # secret: "my-secret-key" + # }, + # { + # # This will apply to all buckets in this host (unless key/value is specifically provided for a given bucket) + # host: "my-minio-host:9000" + # key: "12345678" + # secret: "12345678" + # multipart: false + # secure: false + # } + ] + } + boto3 { + pool_connections: 512 + max_multipart_concurrency: 16 + } + } + google.storage { + # # Default project and credentials file + # # Will be used when no bucket configuration is found + # project: "trains" + # credentials_json: "/path/to/credentials.json" + + # # Specific credentials per bucket and sub directory + # credentials = [ + # { + # bucket: "my-bucket" + # subdir: "path/in/bucket" # Not required + # project: "trains" + # credentials_json: "/path/to/credentials.json" + # }, + # ] + } + + log { + # debugging feature: set this to true to make null log propagate messages to root logger (so they appear in stdout) + null_log_propagate: False + task_log_buffer_capacity: 66 + + # disable urllib info and lower levels + disable_urllib3_info: True + } + + development { + # Development-mode options + + # dev task reuse window + task_reuse_time_window_in_hours: 72.0 + + # Run VCS repository detection asynchronously + vcs_repo_detect_async: False + + # Store uncommitted git/hg source code diff in experiment manifest when training in development mode + # This stores "git diff" or "hg diff" into the experiment's "script.requirements.diff" section + store_uncommitted_code_diff_on_train: True + + # Support stopping an experiment in case it was externally stopped, status was changed or task was reset + support_stopping: True + + # Development mode worker + worker { + # Status report period in seconds + report_period_sec: 2 + + # Log all stdout & stderr + log_stdout: True + } + } +} diff --git a/trains/config/defs.py b/trains/config/defs.py new file mode 100644 index 00000000..49de1dc0 --- /dev/null +++ b/trains/config/defs.py @@ -0,0 +1,31 @@ +import tempfile + +from ..backend_config import EnvEntry +from ..backend_config.converters import base64_to_text, or_ +from pathlib2 import Path + +SESSION_CACHE_FILE = ".session.json" +DEFAULT_CACHE_DIR = str(Path(tempfile.gettempdir()) / "trains_cache") + + +TASK_ID_ENV_VAR = EnvEntry("TRAINS_TASK_ID", "ALG_TASK_ID") +LOG_TO_BACKEND_ENV_VAR = EnvEntry("TRAINS_LOG_TASK_TO_BACKEND", "ALG_LOG_TASK_TO_BACKEND", type=bool) +NODE_ID_ENV_VAR = EnvEntry("TRAINS_NODE_ID", "ALG_NODE_ID", type=int) +PROC_MASTER_ID_ENV_VAR = EnvEntry("TRAINS_PROC_MASTER_ID", "ALG_PROC_MASTER_ID", type=int) +LOG_STDERR_REDIRECT_LEVEL = EnvEntry("TRAINS_LOG_STDERR_REDIRECT_LEVEL", "ALG_LOG_STDERR_REDIRECT_LEVEL") +DEV_WORKER_NAME = EnvEntry("TRAINS_WORKER_NAME", "ALG_WORKER_NAME") + +LOG_LEVEL_ENV_VAR = EnvEntry("TRAINS_LOG_LEVEL", "ALG_LOG_LEVEL", converter=or_(int, str)) + +# Repository detection +VCS_REPO_TYPE = EnvEntry("TRAINS_VCS_REPO_TYPE", "ALG_VCS_REPO_TYPE", default="git") +VCS_REPOSITORY_URL = EnvEntry("TRAINS_VCS_REPO_URL", "ALG_VCS_REPO_URL") +VCS_COMMIT_ID = EnvEntry("TRAINS_VCS_COMMIT_ID", "ALG_VCS_COMMIT_ID") +VCS_BRANCH = EnvEntry("TRAINS_VCS_BRANCH", "ALG_VCS_BRANCH") +VCS_ROOT = EnvEntry("TRAINS_VCS_ROOT", "ALG_VCS_ROOT") +VCS_STATUS = EnvEntry("TRAINS_VCS_STATUS", "ALG_VCS_STATUS", converter=base64_to_text) +VCS_DIFF = EnvEntry("TRAINS_VCS_DIFF", "ALG_VCS_DIFF", converter=base64_to_text) + +# User credentials +API_ACCESS_KEY = EnvEntry("TRAINS_API_ACCESS_KEY", "ALG_API_ACCESS_KEY", help="API Access Key") +API_SECRET_KEY = EnvEntry("TRAINS_API_SECRET_KEY", "ALG_API_SECRET_KEY", help="API Secret Key") diff --git a/trains/config/remote.py b/trains/config/remote.py new file mode 100644 index 00000000..1214dcae --- /dev/null +++ b/trains/config/remote.py @@ -0,0 +1,17 @@ +from .defs import TASK_ID_ENV_VAR + +running_remotely_task_id = TASK_ID_ENV_VAR.get() + + +def override_current_task_id(task_id): + """ + Overrides the current task id to simulate remote running with a specific task. + + Use for testing and debug only. + + :param task_id: The task's id to use as the remote task. + Pass None to simulate local execution. + """ + + global running_remotely_task_id + running_remotely_task_id = task_id diff --git a/trains/debugging/__init__.py b/trains/debugging/__init__.py new file mode 100644 index 00000000..fca7ba2f --- /dev/null +++ b/trains/debugging/__init__.py @@ -0,0 +1,4 @@ +""" Debugging module """ +from .timer import Timer +from .log import get_logger, get_null_logger, TqdmLog, add_options as add_log_options, \ + apply_args as parse_log_args, add_rotating_file_handler, add_time_rotating_file_handler diff --git a/trains/debugging/log.py b/trains/debugging/log.py new file mode 100644 index 00000000..89651dc0 --- /dev/null +++ b/trains/debugging/log.py @@ -0,0 +1,181 @@ +""" Logging convenience functions and wrappers """ +import inspect +import logging +import logging.handlers +import os +import sys +from platform import system + +import colorama +from ..config import config, get_log_redirect_level +from coloredlogs import ColoredFormatter +from pathlib2 import Path +from six import BytesIO +from tqdm import tqdm + +default_level = logging.INFO + + +class _LevelRangeFilter(logging.Filter): + + def __init__(self, min_level, max_level, name=''): + super(_LevelRangeFilter, self).__init__(name) + self.min_level = min_level + self.max_level = max_level + + def filter(self, record): + return self.min_level <= record.levelno <= self.max_level + + +class LoggerRoot(object): + __base_logger = None + + @classmethod + def _make_stream_handler(cls, level=None, stream=sys.stdout, colored=False): + ch = logging.StreamHandler(stream=stream) + ch.setLevel(level) + if colored: + colorama.init() + formatter = ColoredFormatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') + else: + formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') + ch.setFormatter(formatter) + return ch + + @classmethod + def get_base_logger(cls, level=None, stream=sys.stdout, colored=False): + if LoggerRoot.__base_logger: + return LoggerRoot.__base_logger + LoggerRoot.__base_logger = logging.getLogger('trains') + level = level if level is not None else default_level + LoggerRoot.__base_logger.setLevel(level) + + redirect_level = get_log_redirect_level() + + # Do not redirect to stderr if the target stream is already stderr + if redirect_level is not None and stream not in (None, sys.stderr): + # Adjust redirect level in case requested level is higher (e.g. logger is requested for CRITICAL + # and redirect is set for ERROR, in which case we redirect from CRITICAL) + redirect_level = max(level, redirect_level) + LoggerRoot.__base_logger.addHandler( + cls._make_stream_handler(redirect_level, sys.stderr, colored) + ) + + if level < redirect_level: + # Not all levels were redirected, remaining should be sent to requested stream + handler = cls._make_stream_handler(level, stream, colored) + handler.addFilter(_LevelRangeFilter(min_level=level, max_level=redirect_level - 1)) + LoggerRoot.__base_logger.addHandler(handler) + else: + LoggerRoot.__base_logger.addHandler( + cls._make_stream_handler(level, stream, colored) + ) + + LoggerRoot.__base_logger.propagate = False + return LoggerRoot.__base_logger + + @classmethod + def flush(cls): + if LoggerRoot.__base_logger: + for h in LoggerRoot.__base_logger.handlers: + h.flush() + + +def add_options(parser): + """ Add logging options to an argparse.ArgumentParser object """ + level = logging.getLevelName(default_level) + parser.add_argument( + '--log-level', '-l', default=level, help='Log level (default is %s)' % level) + + +def apply_args(args): + """ Apply logging args from an argparse.ArgumentParser parsed args """ + global default_level + default_level = logging.getLevelName(args.log_level.upper()) + + +def get_logger(path=None, level=None, stream=None, colored=False): + """ Get a python logging object named using the provided filename and preconfigured with a color-formatted + stream handler + """ + path = path or os.path.abspath((inspect.stack()[1])[1]) + root_log = LoggerRoot.get_base_logger(level=default_level, stream=sys.stdout, colored=colored) + log = root_log.getChild(Path(path).stem) + level = level if level is not None else root_log.level + log.setLevel(level) + if stream: + ch = logging.StreamHandler(stream=stream) + ch.setLevel(level) + log.propagate = True + return log + + +def _add_file_handler(logger, log_dir, fh, formatter=None): + """ Adds a file handler to a logger """ + Path(log_dir).mkdir(parents=True, exist_ok=True) + if not formatter: + log_format = '%(asctime)s %(name)s x_x[%(levelname)s] %(message)s' + formatter = logging.Formatter(log_format) + fh.setFormatter(formatter) + logger.addHandler(fh) + + +def add_rotating_file_handler(logger, log_dir, log_file_prefix, max_bytes=10 * 1024 * 1024, backup_count=20, + formatter=None): + """ Create and add a rotating file handler to a logger """ + fh = logging.handlers.RotatingFileHandler( + str(Path(log_dir) / ('%s.log' % log_file_prefix)), maxBytes=max_bytes, backupCount=backup_count) + _add_file_handler(logger, log_dir, fh, formatter) + + +def add_time_rotating_file_handler(logger, log_dir, log_file_prefix, when='midnight', formatter=None): + """ + Create and add a time rotating file handler to a logger. + Possible values for when are 'midnight', weekdays ('w0'-'W6', when 0 is Monday), and 's', 'm', 'h' amd 'd' for + seconds, minutes, hours and days respectively (case-insensitive) + """ + fh = logging.handlers.TimedRotatingFileHandler( + str(Path(log_dir) / ('%s.log' % log_file_prefix)), when=when) + _add_file_handler(logger, log_dir, fh, formatter) + + +def get_null_logger(name=None): + """ Get a logger with a null handler """ + log = logging.getLogger(name if name else 'null') + if not log.handlers: + log.addHandler(logging.NullHandler()) + log.propagate = config.get("log.null_log_propagate", False) + return log + + +class TqdmLog(object): + """ Tqdm (progressbar) wrapped logging class """ + + class _TqdmIO(BytesIO): + """ IO wrapper class for Tqdm """ + + def __init__(self, level=20, logger=None, *args, **kwargs): + self._log = logger or get_null_logger() + self._level = level + BytesIO.__init__(self, *args, **kwargs) + + def write(self, buf): + self._buf = buf.strip('\r\n\t ') + + def flush(self): + self._log.log(self._level, self._buf) + + def __init__(self, total, desc='', log_level=20, ascii=False, logger=None, smoothing=0, mininterval=5, initial=0): + self._io = self._TqdmIO(level=log_level, logger=logger) + self._tqdm = tqdm(total=total, desc=desc, file=self._io, ascii=ascii if not system() == 'Windows' else True, + smoothing=smoothing, + mininterval=mininterval, initial=initial) + + def update(self, n=None): + if n is not None: + self._tqdm.update(n=n) + else: + self._tqdm.update() + + def close(self): + self._tqdm.close() diff --git a/trains/debugging/timer.py b/trains/debugging/timer.py new file mode 100644 index 00000000..c07eae12 --- /dev/null +++ b/trains/debugging/timer.py @@ -0,0 +1,112 @@ +""" Timing support """ +import sys +import time + +import six + + +class Timer(object): + """A class implementing a simple timer, with a reset option """ + + def __init__(self): + self._start_time = 0. + self._diff = 0. + self._total_time = 0. + self._average_time = 0. + self._calls = 0 + self.tic() + + def reset(self): + self._start_time = 0. + self._diff = 0. + self.reset_average() + + def reset_average(self): + """ Reset average counters (does not change current timer) """ + self._total_time = 0 + self._average_time = 0 + self._calls = 0 + + def tic(self): + try: + # using time.time instead of time.clock because time time.clock + # does not normalize for multi threading + self._start_time = time.time() + except Exception: + pass + + def toc(self, average=True): + self._diff = time.time() - self._start_time + self._total_time += self._diff + self._calls += 1 + self._average_time = self._total_time / self._calls + if average: + return self._average_time + else: + return self._diff + + @property + def average_time(self): + return self._average_time + + @property + def total_time(self): + return self._total_time + + def toc_with_reset(self, average=True, reset_if_calls=1000): + """ Enable toc with reset (slightly inaccurate if reset event occurs) """ + if self._calls > reset_if_calls: + last_diff = time.time() - self._start_time + self._start_time = time.time() + self._total_time = last_diff + self._average_time = 0 + self._calls = 0 + + return self.toc(average=average) + + +class TimersMixin(object): + def __init__(self): + self._timers = {} + + def add_timers(self, *names): + for name in names: + self.add_timer(name) + + def add_timer(self, name, timer=None): + if name in self._timers: + raise ValueError('timer %s already exists' % name) + timer = timer or Timer() + self._timers[name] = timer + return timer + + def get_timer(self, name, default=None): + return self._timers.get(name, default) + + def get_timers(self): + return self._timers + + def _call_timer(self, name, callable, silent_fail=False): + try: + return callable(self._timers[name]) + except KeyError: + if not silent_fail: + six.reraise(*sys.exc_info()) + + def reset_timers(self, *names): + for name in names: + self._call_timer(name, lambda t: t.reset()) + + def reset_average_timers(self, *names): + for name in names: + self._call_timer(name, lambda t: t.reset_average()) + + def tic_timers(self, *names): + for name in names: + self._call_timer(name, lambda t: t.tic()) + + def toc_timers(self, *names): + return [self._call_timer(name, lambda t: t.toc()) for name in names] + + def toc_with_reset_timer(self, name, average=True, reset_if_calls=1000): + return self._call_timer(name, lambda t: t.toc_with_reset(average, reset_if_calls)) diff --git a/trains/errors.py b/trains/errors.py new file mode 100644 index 00000000..f3ef762d --- /dev/null +++ b/trains/errors.py @@ -0,0 +1,3 @@ +class UsageError(RuntimeError): + """ An exception raised for illegal usage of trains objects""" + pass diff --git a/trains/logger.py b/trains/logger.py new file mode 100644 index 00000000..629e5943 --- /dev/null +++ b/trains/logger.py @@ -0,0 +1,684 @@ +import logging +import re +import sys +import threading +from functools import wraps + +import numpy as np +from pathlib2 import Path + +from .debugging.log import LoggerRoot +from .backend_interface.task.development.worker import DevWorker +from .backend_interface.task.log import TaskHandler +from .storage import StorageHelper +from .utilities.plotly import SeriesInfo +from .backend_interface import TaskStatusEnum +from .backend_interface.task import Task as _Task +from .config import running_remotely, get_cache_dir + + +def _safe_names(func): + """ + Validate the form of title and series parameters. + + This decorator assert that a method receives 'title' and 'series' as its + first positional arguments, and that their values have only legal characters. + + '\', '/' and ':' will be replaced automatically by '_' + Whitespace chars will be replaced automatically by ' ' + """ + _replacements = { + '_': re.compile(r"[/\\:]"), + ' ': re.compile(r"[\s]"), + } + + def _make_safe(value): + for repl, regex in _replacements.items(): + value = regex.sub(repl, value) + return value + + @wraps(func) + def fixed_names(self, title, series, *args, **kwargs): + title = _make_safe(title) + series = _make_safe(series) + + func(self, title, series, *args, **kwargs) + + return fixed_names + + +class Logger(object): + """ + Console log and metric statistics interface. + + This is how we send graphs/plots/text to the system, later we can compare the performance of different tasks. + + **Usage: Task.get_logger()** + """ + SeriesInfo = SeriesInfo + _stdout_proxy = None + _stderr_proxy = None + _stdout_original_write = None + + def __init__(self, private_task): + """ + **Do not construct Logger manually!** + + please use Task.get_logger() + """ + assert isinstance(private_task, _Task), \ + 'Logger object cannot be instantiated externally, use Task.get_logger()' + super(Logger, self).__init__() + self._task = private_task + self._default_upload_destination = None + self._flusher = None + self._report_worker = None + self._task_handler = None + + if DevWorker.report_stdout and not PrintPatchLogger.patched: + Logger._stdout_proxy = PrintPatchLogger(sys.stdout, self, level=logging.INFO) + Logger._stderr_proxy = PrintPatchLogger(sys.stderr, self, level=logging.ERROR) + self._task_handler = TaskHandler(self._task.session, self._task.id, capacity=100) + # noinspection PyBroadException + try: + Logger._stdout_original_write = sys.stdout.write + # this will only work in python 3, but we still better guard it with try/catch + sys.stdout._original_write = sys.stdout.write + sys.stdout.write = stdout__patched__write__ + sys.stderr._original_write = sys.stderr.write + sys.stderr.write = stderr__patched__write__ + except Exception: + pass + sys.stdout = Logger._stdout_proxy + sys.stderr = Logger._stderr_proxy + + def console(self, msg, level=logging.INFO, omit_console=False, *args, **kwargs): + """ + print text to log (same as print to console, and also prints to console) + + :param msg: text to print to the console (always send to the backend and displayed in console) + :param level: logging level, default: logging.INFO + :param omit_console: If True we only send 'msg' to log (no console print) + """ + try: + level = int(level) + except (TypeError, ValueError): + self._task.log.log(level=logging.ERROR, + msg='Logger failed casting log level "%s" to integer' % str(level)) + level = logging.INFO + + try: + record = self._task.log.makeRecord( + "console", level=level, fn='', lno=0, func='', msg=msg, args=args, exc_info=None + ) + # find the task handler + if not self._task_handler: + self._task_handler = [h for h in LoggerRoot.get_base_logger().handlers if isinstance(h, TaskHandler)][0] + self._task_handler.emit(record) + except Exception: + self._task.log.log(level=logging.ERROR, + msg='Logger failed sending log: [level %s]: "%s"' % (str(level), str(msg))) + + if not omit_console: + # if we are here and we grabbed the stdout, we need to print the real thing + if DevWorker.report_stdout: + try: + # make sure we are writing to the original stdout + Logger._stdout_original_write(str(msg)+'\n') + except Exception: + pass + else: + print(str(msg)) + + # if task was not started, we have to start it + self._start_task_if_needed() + + def report_text(self, msg, level=logging.INFO, print_console=False, *args, **_): + return self.console(msg, level, not print_console, *args, **_) + + def debug(self, msg, *args, **kwargs): + """ Print information to the log. This is the same as console(msg, logging.DEBUG) """ + self._task.log.log(msg=msg, level=logging.DEBUG, *args, **kwargs) + + def info(self, msg, *args, **kwargs): + """ Print information to the log. This is the same as console(msg, logging.INFO) """ + self._task.log.log(msg=msg, level=logging.INFO, *args, **kwargs) + + def warn(self, msg, *args, **kwargs): + """ Print a warning to the log. This is the same as console(msg, logging.WARNING) """ + self._task.log.log(msg=msg, level=logging.WARNING, *args, **kwargs) + + warning = warn + + def error(self, msg, *args, **kwargs): + """ Print an error to the log. This is the same as console(msg, logging.ERROR) """ + self._task.log.log(msg=msg, level=logging.ERROR, *args, **kwargs) + + def fatal(self, msg, *args, **kwargs): + """ Print a fatal error to the log. This is the same as console(msg, logging.FATAL) """ + self._task.log.log(msg=msg, level=logging.FATAL, *args, **kwargs) + + def critical(self, msg, *args, **kwargs): + """ Print a critical error to the log. This is the same as console(msg, logging.CRITICAL) """ + self._task.log.log(msg=msg, level=logging.CRITICAL, *args, **kwargs) + + def report_scalar(self, title, series, value, iteration): + """ + Report a scalar value + + :param title: Title (AKA metric) + :type title: str + :param series: Series (AKA variant) + :type series: str + :param value: Reported value + :type value: float + :param iteration: Iteration number + :type value: int + """ + + # if task was not started, we have to start it + self._start_task_if_needed() + + return self._task.reporter.report_scalar(title=title, series=series, value=float(value), iter=iteration) + + def report_vector(self, title, series, values, iteration, labels=None, xlabels=None): + """ + Report a histogram plot + + :param title: Title (AKA metric) + :type title: str + :param series: Series (AKA variant) + :type series: str + :param values: Reported values (or numpy array) + :type values: [float] + :param iteration: Iteration number + :type iteration: int + :param labels: optional label per entry in the vector (for histogram) + """ + + if not isinstance(values, np.ndarray): + values = np.array(values) + + # if task was not started, we have to start it + self._start_task_if_needed() + + return self._task.reporter.report_histogram( + title=title, + series=series, + histogram=values, + iter=iteration, + labels=labels, + xlabels=xlabels, + ) + + def report_line_plot(self, title, series, iteration, xaxis, yaxis, mode='lines', reverse_xaxis=False, comment=None): + """ + Report a (possibly multiple) line plot. + + :param title: Title (AKA metric) + :type title: str + :param series: All the series' data, one for each line in the plot. + :type series: An iterable of LineSeriesInfo. + :param iteration: Iteration number + :type iteration: int + :param xaxis: optional x-axis title + :param yaxis: optional y-axis title + :param mode: scatter plot with 'lines'/'markers'/'lines+markers' + :type mode: str + :param reverse_xaxis: If true X axis will be displayed from high to low (reversed) + :type reverse_xaxis: bool + :param comment: comment underneath the title + :type comment: str + """ + + series = [self.SeriesInfo(**s) if isinstance(s, dict) else s for s in series] + + # if task was not started, we have to start it + self._start_task_if_needed() + + return self._task.reporter.report_line_plot( + title=title, + series=series, + iter=iteration, + xtitle=xaxis, + ytitle=yaxis, + mode=mode, + reverse_xaxis=reverse_xaxis, + comment=comment, + ) + + def report_scatter2d(self, title, series, scatter, iteration, xaxis=None, yaxis=None, labels=None, + mode='lines', comment=None): + """ + Report a 2d scatter graph (with lines) + + :param title: Title (AKA metric) + :type title: str + :param series: Series (AKA variant) + :type series: str + :param scatter: A scattered data: list of (pairs of x,y) (or numpy array) + :type scatter: ndarray or list + :param iteration: Iteration number + :type iteration: int + :param xaxis: optional x-axis title + :param yaxis: optional y-axis title + :param labels: label (text) per point in the scatter (in the same order) + :param mode: scatter plot with 'lines'/'markers'/'lines+markers' + :type mode: str + :param comment: comment underneath the title + :type comment: str + """ + + if not isinstance(scatter, np.ndarray): + if not isinstance(scatter, list): + scatter = list(scatter) + scatter = np.array(scatter) + + # if task was not started, we have to start it + self._start_task_if_needed() + + return self._task.reporter.report_2d_scatter( + title=title, + series=series, + data=scatter.astype(np.float32), + iter=iteration, + mode=mode, + xtitle=xaxis, + ytitle=yaxis, + labels=labels, + comment=comment, + ) + + def report_scatter3d(self, title, series, scatter, iteration, labels=None, mode='markers', + fill=False, comment=None): + """ + Report a 3d scatter graph (with markers) + + :param title: Title (AKA metric) + :type title: str + :param series: Series (AKA variant) + :type series: str + :param scatter: A scattered data: list of (pairs of x,y,z) (or numpy array) or list of series [[(x1,y1,z1)...]] + :type scatter: ndarray or list + :param iteration: Iteration number + :type iteration: int + :param labels: label (text) per point in the scatter (in the same order) + :param mode: scatter plot with 'lines'/'markers'/'lines+markers' + :param fill: fill area under the curve + :param comment: comment underneath the title + """ + # check if multiple series + multi_series = ( + isinstance(scatter, list) + and ( + isinstance(scatter[0], np.ndarray) + or ( + scatter[0] + and isinstance(scatter[0], list) + and isinstance(scatter[0][0], list) + ) + ) + ) + + if not multi_series: + if not isinstance(scatter, np.ndarray): + if not isinstance(scatter, list): + scatter = list(scatter) + scatter = np.array(scatter) + try: + scatter = scatter.astype(np.float32) + except ValueError: + pass + + # if task was not started, we have to start it + self._start_task_if_needed() + + return self._task.reporter.report_3d_scatter( + title=title, + series=series, + data=scatter, + iter=iteration, + labels=labels, + mode=mode, + fill=fill, + comment=comment, + ) + + def report_confusion_matrix(self, title, series, matrix, iteration, xlabels=None, ylabels=None, comment=None): + """ + Report a heat-map matrix + + :param title: Title (AKA metric) + :type title: str + :param series: Series (AKA variant) + :type series: str + :param matrix: A heat-map matrix (example: confusion matrix) + :type matrix: ndarray + :param iteration: Iteration number + :type iteration: int + :param xlabels: optional label per column of the matrix + :param ylabels: optional label per row of the matrix + :param comment: comment underneath the title + """ + + if not isinstance(matrix, np.ndarray): + matrix = np.array(matrix) + + # if task was not started, we have to start it + self._start_task_if_needed() + + return self._task.reporter.report_value_matrix( + title=title, + series=series, + data=matrix.astype(np.float32), + iter=iteration, + xlabels=xlabels, + ylabels=ylabels, + comment=comment, + ) + + def report_matrix(self, title, series, matrix, iteration, xlabels=None, ylabels=None): + """ + Same as report_confusion_matrix + Report a heat-map matrix + + :param title: Title (AKA metric) + :type title: str + :param series: Series (AKA variant) + :type series: str + :param matrix: A heat-map matrix (example: confusion matrix) + :type matrix: ndarray + :param iteration: Iteration number + :type iteration: int + :param xlabels: optional label per column of the matrix + :param ylabels: optional label per row of the matrix + """ + return self.report_confusion_matrix(title, series, matrix, iteration, xlabels=xlabels, ylabels=ylabels) + + def report_surface(self, title, series, matrix, iteration, xlabels=None, ylabels=None, + xtitle=None, ytitle=None, camera=None, comment=None): + """ + Report a 3d surface (same data as heat-map matrix, only presented differently) + + :param title: Title (AKA metric) + :type title: str + :param series: Series (AKA variant) + :type series: str + :param matrix: A heat-map matrix (example: confusion matrix) + :type matrix: ndarray + :param iteration: Iteration number + :type iteration: int + :param xlabels: optional label per column of the matrix + :param ylabels: optional label per row of the matrix + :param xtitle: optional x-axis title + :param ytitle: optional y-axis title + :param camera: X,Y,Z camera position. def: (1,1,1) + :param comment: comment underneath the title + """ + + if not isinstance(matrix, np.ndarray): + matrix = np.array(matrix) + + # if task was not started, we have to start it + self._start_task_if_needed() + + return self._task.reporter.report_value_surface( + title=title, + series=series, + data=matrix.astype(np.float32), + iter=iteration, + xlabels=xlabels, + ylabels=ylabels, + xtitle=xtitle, + ytitle=ytitle, + camera=camera, + comment=comment, + ) + + @_safe_names + def report_image(self, title, series, src, iteration): + """ + Report an image, and register the 'src' as url content. + + :param title: Title (AKA metric) + :type title: str + :param series: Series (AKA variant) + :type series: str + :param src: Image source URI. This URI will be used by the webapp and workers when trying to obtain the image \ + for presentation of processing. Currently only http(s), file and s3 schemes are supported. + :type src: str + :param iteration: Iteration number + :type iteration: int + """ + + # if task was not started, we have to start it + self._start_task_if_needed() + + self._task.reporter.report_image( + title=title, + series=series, + src=src, + iter=iteration, + ) + + @_safe_names + def report_image_and_upload(self, title, series, iteration, path=None, matrix=None, max_image_history=None): + """ + Report an image and upload its contents. + + Image is uploaded to a preconfigured bucket (see setup_upload()) with a key (filename) + describing the task ID, title, series and iteration. + + :param title: Title (AKA metric) + :type title: str + :param series: Series (AKA variant) + :type series: str + :param iteration: Iteration number + :type iteration: int + :param path: A path to an image file. Required unless matrix is provided. + :type path: str + :param matrix: A 3D numpy.ndarray object containing image data (RGB). Required unless filename is provided. + :type matrix: str + :param max_image_history: maximum number of image to store per metric/variant combination \ + use negative value for unlimited. default is set in global configuration (default=5) + :type max_image_history: int + """ + + # if task was not started, we have to start it + self._start_task_if_needed() + upload_uri = self._default_upload_destination or self._task._get_default_report_storage_uri() + if not upload_uri: + upload_uri = Path(get_cache_dir()) / 'debug_images' + upload_uri.mkdir(parents=True, exist_ok=True) + # Verify that we can upload to this destination + upload_uri = str(upload_uri) + storage = StorageHelper.get(upload_uri) + upload_uri = storage.verify_upload(folder_uri=upload_uri) + + self._task.reporter.report_image_and_upload( + title=title, + series=series, + path=path, + matrix=matrix, + iter=iteration, + upload_uri=upload_uri, + max_image_history=max_image_history, + ) + + def set_default_upload_destination(self, uri): + """ + Set the uri to upload all the debug images to. + + Images are uploaded separately to the destination storage (e.g. s3,gc,file) and then + a link to the uploaded image is sent in the report + Notice: credentials for the upload destination will be pooled from the + global configuration file (i.e. ~/trains.conf) + + :param uri: example: 's3://bucket/directory/' or 'file:///tmp/debug/' + :return: True if destination scheme is supported (i.e. s3:// file:// gc:// etc...) + """ + + # Create the storage helper + storage = StorageHelper.get(uri) + + # Verify that we can upload to this destination + uri = storage.verify_upload(folder_uri=uri) + + self._default_upload_destination = uri + + def flush(self): + """ + Flush cached reports and console outputs to backend. + + :return: True if successful + """ + self._flush_stdout_handler() + if self._task: + return self._task.flush() + return False + + def get_flush_period(self): + if self._flusher: + return self._flusher.period + return None + + def set_flush_period(self, period): + """ + Set the period of the logger flush. + + :param period: The period to flush the logger in seconds. If None or 0, + There will be no periodic flush. + """ + if self._task.is_main_task() and DevWorker.report_stdout and DevWorker.report_period and not running_remotely(): + period = min(period or DevWorker.report_period, DevWorker.report_period) + + if not period: + if self._flusher: + self._flusher.exit() + self._flusher = None + elif self._flusher: + self._flusher.set_period(period) + else: + self._flusher = _Flusher(self, period) + self._flusher.start() + + def _start_task_if_needed(self): + if self._task._status == TaskStatusEnum.created: + self._task.mark_started() + + self._task._dev_mode_task_start() + + def _flush_stdout_handler(self): + if self._task_handler and DevWorker.report_stdout: + self._task_handler.flush() + + +def stdout__patched__write__(*args, **kwargs): + if Logger._stdout_proxy: + return Logger._stdout_proxy.write(*args, **kwargs) + return sys.stdout._original_write(*args, **kwargs) + + +def stderr__patched__write__(*args, **kwargs): + if Logger._stderr_proxy: + return Logger._stderr_proxy.write(*args, **kwargs) + return sys.stderr._original_write(*args, **kwargs) + + +class PrintPatchLogger(object): + """ + Allowed patching a stream into the logger. + Used for capturing and logging stdin and stderr when running in development mode pseudo worker. + """ + patched = False + lock = threading.Lock() + recursion_protect_lock = threading.RLock() + + def __init__(self, stream, logger=None, level=logging.INFO): + PrintPatchLogger.patched = True + self._terminal = stream + self._log = logger + self._log_level = level + self._cur_line = '' + + def write(self, message): + # make sure that we do not end up in infinite loop (i.e. log.console ends up calling us) + if self._log and not PrintPatchLogger.recursion_protect_lock._is_owned(): + try: + self.lock.acquire() + with PrintPatchLogger.recursion_protect_lock: + if hasattr(self._terminal, '_original_write'): + self._terminal._original_write(message) + else: + self._terminal.write(message) + + do_flush = '\n' in message + do_cr = '\r' in message + self._cur_line += message + if (not do_flush and not do_cr) or not message: + return + last_lf = self._cur_line.rindex('\n' if do_flush else '\r') + next_line = self._cur_line[last_lf + 1:] + cur_line = self._cur_line[:last_lf + 1].rstrip() + self._cur_line = next_line + finally: + self.lock.release() + + if cur_line: + with PrintPatchLogger.recursion_protect_lock: + self._log.console(cur_line, level=self._log_level, omit_console=True) + else: + if hasattr(self._terminal, '_original_write'): + self._terminal._original_write(message) + else: + self._terminal.write(message) + + def connect(self, logger): + self._log = logger + + def __getattr__(self, attr): + if attr in ['_log', '_terminal', '_log_level', '_cur_line']: + return self.__dict__.get(attr) + return getattr(self._terminal, attr) + + def __setattr__(self, key, value): + if key in ['_log', '_terminal', '_log_level', '_cur_line']: + self.__dict__[key] = value + else: + return setattr(self._terminal, key, value) + + +class _Flusher(threading.Thread): + def __init__(self, logger, period, **kwargs): + super(_Flusher, self).__init__(**kwargs) + self.daemon = True + + self._period = period + self._logger = logger + self._exit_event = threading.Event() + + @property + def period(self): + return self._period + + def run(self): + self._logger.flush() + # store original wait period + while True: + period = self._period + while not self._exit_event.wait(period or 1.0): + self._logger.flush() + # check if period is negative or None we should exit + if self._period is None or self._period < 0: + break + # check if period was changed, we should restart + self._exit_event.clear() + + def exit(self): + self._period = None + self._exit_event.set() + + def set_period(self, period): + self._period = period + # make sure we exit the previous wait + self._exit_event.set() diff --git a/trains/model.py b/trains/model.py new file mode 100644 index 00000000..4b32b9ae --- /dev/null +++ b/trains/model.py @@ -0,0 +1,1006 @@ +import abc +import os +import re +import tarfile +import zipfile +from tempfile import mkdtemp, mkstemp + +import pyparsing +import six +from .backend_api.services import models +from pathlib2 import Path +from pyhocon import ConfigFactory, HOCONConverter + +from .backend_interface.util import validate_dict, get_single_result, mutually_exclusive +from .debugging.log import get_logger +from .storage import StorageHelper +from .utilities.enum import Options +from .backend_interface import Task as _Task +from .backend_interface.model import Model as _Model, DummyModel as _DummyModel +from .config import running_remotely, get_cache_dir + +ARCHIVED_TAG = "archived" + + +class Framework(Options): + """ + Optional frameworks for output model + """ + tensorflow = 'TensorFlow' + tensorflowjs = 'TensorFlow_js' + tensorflowlite = 'TensorFlow_Lite' + pytorch = 'PyTorch' + caffe = 'Caffe' + caffe2 = 'Caffe2' + onnx = 'ONNX' + keras = 'Keras' + mknet = 'MXNet' + cntk = 'CNTK' + torch = 'Torch' + darknet = 'Darknet' + paddlepaddle = 'PaddlePaddle' + scikitlearn = 'ScikitLearn' + + __file_extensions_mapping = { + '.pb': (tensorflow, tensorflowjs, onnx, ), + '.meta': (tensorflow, ), + '.pbtxt': (tensorflow, onnx, ), + '.zip': (tensorflow, ), + '.tgz': (tensorflow, ), + '.tar.gz': (tensorflow, ), + 'model.json': (tensorflowjs, ), + '.tflite': (tensorflowlite, ), + '.pth': (pytorch, ), + '.caffemodel': (caffe, ), + '.prototxt': (caffe, ), + 'predict_net.pb': (caffe2, ), + 'predict_net.pbtxt': (caffe2, ), + '.onnx': (onnx, ), + '.h5': (keras, ), + '.hdf5': (keras, ), + '.keras': (keras, ), + '.model': (mknet, cntk, ), + '-symbol.json': (mknet, ), + '.cntk': (cntk, ), + '.t7': (torch, ), + '.cfg': (darknet, ), + '__model__': (paddlepaddle, ), + '.pkl': (scikitlearn, keras, ), + } + + @classmethod + def _get_file_ext(cls, framework, filename): + mapping = cls.__file_extensions_mapping + filename = filename.lower() + + def find_framework_by_ext(framework_selector): + for ext, frameworks in mapping.items(): + if frameworks and filename.endswith(ext): + fw = framework_selector(frameworks) + if fw: + return (fw, ext) + + # If no framework, try finding first framework matching the extension, otherwise (or if no match) try matching + # the given extension to the given framework. If no match return an empty extension + return ( + (not framework and find_framework_by_ext(lambda frameworks_: frameworks_[0])) + or find_framework_by_ext(lambda frameworks_: framework if framework in frameworks_ else None) + or (framework, filename.split('.')[-1] if '.' in filename else '') + ) + + +@six.add_metaclass(abc.ABCMeta) +class BaseModel(object): + _package_tag = "package" + + @property + def id(self): + """ + return the id of the model (string) + + :return: model id (string) + """ + return self._get_model_data().id + + @property + def name(self): + """ + return the name of the model (string) + + :return: model name (string) + """ + return self._get_model_data().name + + @name.setter + def name(self, value): + """ + Update the model name + + :param value: model name (string) + """ + self._get_base_model().update(name=value) + + @property + def comment(self): + """ + return comment/description of the model (string) + + :return: model description (string) + """ + return self._get_model_data().comment + + @comment.setter + def comment(self, value): + """ + Update the model comment/description of the model (string) + + :param value: model comment/description (string) + """ + self._get_base_model().update(comment=value) + + @property + def tags(self): + """ + Return the list of tags the model has + + :return: list of strings (tags) + """ + return self._get_model_data().tags + + @tags.setter + def tags(self, value): + """ + Update the model list of tags (list of strings) + + :param value: list of strings as tags + """ + self._get_base_model().update(tags=value) + + @property + def config_text(self): + """ + returns a string representing the model configuration (from prototxt to ini file or python code to evaluate) + + :return: string + """ + return _Model._unwrap_design(self._get_model_data().design) + + @property + def config_dict(self): + """ + returns a configuration dictionary parsed from the design text, + usually representing the model configuration (from prototxt to ini file or python code to evaluate) + + :return: Dictionary + """ + return self._text_to_config_dict(self.config_text) + + @property + def labels(self): + """ + Return the labels enumerator {str(label): integer(id)} as saved in the model object + + :return: labels_dict, dictionary with labels (text) keys and values as integers + """ + return self._get_model_data().labels + + @property + def task(self): + return self._task + + @property + def published(self): + return self._get_base_model().locked + + @property + def framework(self): + return self._get_model_data().framework + + def __init__(self, task=None): + super(BaseModel, self).__init__() + self._log = get_logger() + self._task = None + self._set_task(task) + + def get_weights(self): + """ + Download the base model and returns a string of locally stored filename + + :return: string to locally stored file + """ + # download model (synchronously) and return local file + return self._get_base_model().download_model_weights() + + def get_weights_package(self, return_path=False): + """ + Download the base model package, extract the files and return list of locally stored filenames + + :param return_path: if True the model weights are downloaded into a + temporary directory and the directory path is returned, instead of list of files + :return: string to locally stored file + """ + # check if model was packaged + if self._package_tag not in self._get_model_data().tags: + raise ValueError('Model is not packaged') + + # download packaged model + packed_file = self.get_weights() + + # unpack + target_folder = mkdtemp(prefix='model_package_') + if not target_folder: + raise ValueError('cannot create temporary directory for packed weight files') + + for func in (zipfile.ZipFile, tarfile.open): + try: + obj = func(packed_file) + obj.extractall(path=target_folder) + break + except (zipfile.BadZipfile, tarfile.ReadError): + pass + else: + raise ValueError('cannot extract files from packaged model at %s', packed_file) + + if return_path: + return target_folder + + target_files = list(Path(target_folder).glob('*')) + return target_files + + def publish(self): + """ + Set the model to 'published' and set it for public use. + + If the model is already published, this method is a no-op. + """ + + if not self.published: + self._get_base_model().publish() + + def _running_remotely(self): + return bool(running_remotely() and self._task is not None) + + def _set_task(self, value): + if value is not None and not isinstance(value, _Task): + raise ValueError('task argument must be of Task type') + self._task = value + + @abc.abstractmethod + def _get_model_data(self): + pass + + @abc.abstractmethod + def _get_base_model(self): + pass + + def _set_package_tag(self): + if self._package_tag not in self.tags: + self.tags.append(self._package_tag) + self._get_base_model().update(tags=self.tags) + + @staticmethod + def _config_dict_to_text(config): + if not isinstance(config, dict): + raise ValueError("Model configuration only supports dictionary objects") + try: + # hack, pyhocon is not very good with dict conversion so we pass through json + try: + import json + text = json.dumps(config) + text = HOCONConverter.convert(ConfigFactory.parse_string(text), 'hocon') + except Exception: + # fallback pyhocon + text = HOCONConverter.convert(ConfigFactory.from_dict(config), 'hocon') + except Exception: + raise ValueError("Could not serialize configuration dictionary:\n", config) + return text + + @staticmethod + def _text_to_config_dict(text): + if not isinstance(text, six.string_types): + raise ValueError("Model configuration parsing only supports string") + try: + return ConfigFactory.parse_string(text).as_plain_ordered_dict() + except pyparsing.ParseBaseException as ex: + pos = "at char {}, line:{}, col:{}".format(ex.loc, ex.lineno, ex.column) + six.raise_from(ValueError("Could not parse configuration text ({}):\n{}".format(pos, text)), None) + except Exception: + six.raise_from(ValueError("Could not parse configuration text:\n{}".format(text)), None) + + @staticmethod + def _resolve_config(config_text=None, config_dict=None): + mutually_exclusive(config_text=config_text, config_dict=config_dict, _require_at_least_one=False) + if config_dict: + return InputModel._config_dict_to_text(config_dict) + + return config_text + + +class InputModel(BaseModel): + """ + Load an existing model in the system, search by model id. + The Model will be read-only and can be used to pre initialize a network + We can connect the model to a task as input model, then when running remotely override it with the UI. + """ + + _EMPTY_MODEL_ID = _Model._EMPTY_MODEL_ID + + @classmethod + def import_model( + cls, + weights_url, + config_text=None, + config_dict=None, + label_enumeration=None, + name=None, + tags=None, + comment=None, + logger=None, + is_package=False, + create_as_published=False, + framework=None, + ): + """ + Create a model from pre-existing model file (link must be valid), and model configuration. + + If the url to the weights file already exists, the import process will stop with a warning + and automatically it will try to import the model that was found. + The Model will be read-only and can be used to pre initialize a network + We can connect the model to a task as input model, then when running remotely override it with the UI. + Load model based on id, returned object is read-only and can be connected to a task + That is, we can override the input model when running remotely + + :param weights_url: valid url for the weights file (string). + examples: "https://domain.com/file.bin" or "s3://bucket/file.bin" or "file:///home/user/file.bin". + NOTE: if a model with the exact same URL exists, it will be used, and all other arguments will be ignored. + :param config_text: model configuration (unconstrained text string). usually the content of + configuration file. If `config_text` is not None, `config_dict` must not be provided. + :param config_dict: model configuration parameters (dict). + If `config_dict` is not None, `config_text` must not be provided. + :param label_enumeration: dictionary of string to integer, enumerating the model output to labels + example: {'background': 0 , 'person': 1} + :param name: optional, name for the newly imported model + :param tags: optional, list of strings as tags + :param comment: optional, string description for the model + :param logger: The logger to use. If None, use the default logger + :param is_package: Boolean. Indicates that the imported weights file is a package. + If True, and a new model was created, a package tag will be added. + :param create_as_published: Boolean. If True, and a new model is created, it will be published. + :param framework: optional, string name of the framework of the model or Framework + """ + config_text = cls._resolve_config(config_text=config_text, config_dict=config_dict) + weights_url = StorageHelper.conform_url(weights_url) + result = _Model._get_default_session().send(models.GetAllRequest( + uri=[weights_url], + only_fields=["id", "name"], + tags=["-" + ARCHIVED_TAG] + )) + + if result.response.models: + if not logger: + logger = get_logger() + + logger.debug('A model with uri "{}" already exists. Selecting it'.format(weights_url)) + + model = get_single_result( + entity='model', + query=weights_url, + results=result.response.models, + log=logger, + raise_on_error=False, + ) + + logger.info("Selected model id: {}".format(model.id)) + + return InputModel(model_id=model.id) + + base_model = _Model( + upload_storage_uri=None, + cache_dir=get_cache_dir(), + ) + + from .task import Task + task = Task.current_task() + if task: + comment = 'Imported by task id: {}'.format(task.id) + ('\n'+comment if comment else '') + project_id = task.project + task_id = task.id + else: + project_id = None + task_id = None + + if not framework: + framework, file_ext = Framework._get_file_ext( + framework=framework, + filename=weights_url + ) + + base_model.update( + design=config_text, + labels=label_enumeration, + name=name, + comment=comment, + tags=tags, + uri=weights_url, + framework=framework, + project_id=project_id, + task_id=task_id, + ) + + this_model = InputModel(model_id=base_model.id) + this_model._base_model = base_model + + if is_package: + this_model._set_package_tag() + + if create_as_published: + this_model.publish() + + return this_model + + @classmethod + def empty( + cls, + config_text=None, + config_dict=None, + label_enumeration=None, + ): + """ + Create an empty model, so that later we can execute the task in remote and + replace the empty model with pre-trained model file + + :param config_text: model configuration (unconstrained text string). usually the content of a config_dict file. + If `config_text` is not None, `config_dict` must not be provided. + :param config_dict: model configuration parameters (dict). + If `config_dict` is not None, `config_text` must not be provided. + :param label_enumeration: dictionary of string to integer, enumerating the model output to labels + example: {'background': 0 , 'person': 1} + """ + design = cls._resolve_config(config_text=config_text, config_dict=config_dict) + + this_model = InputModel(model_id=cls._EMPTY_MODEL_ID) + this_model._base_model = m = _Model( + cache_dir=None, + upload_storage_uri=None, + model_id=cls._EMPTY_MODEL_ID, + ) + m._data.design = _Model._wrap_design(design) + m._data.labels = label_enumeration + return this_model + + def __init__(self, model_id): + """ + Load model based on id, returned object is read-only and can be connected to a task + + Notice, we can override the input model when running remotely + + :param model_id: id (string) + """ + super(InputModel, self).__init__() + self._base_model_id = model_id + self._base_model = None + + @property + def id(self): + return self._base_model_id + + def connect(self, task): + """ + Connect current model with a specific task, only supported for preexisting models, + + i.e. not supported on objects created with create_and_connect() + When running in debug mode (i.e. locally), the task is updated with the model object + (i.e. task input model is the load_model_id) + When running remotely (i.e. from a daemon) the model is being updated from the task + Notice! when running remotely the load_model_id is ignored and loaded from the task object + regardless of the code + + :param task: Task object + """ + self._set_task(task) + + if running_remotely() and task.input_model and task.is_main_task(): + self._base_model = task.input_model + self._base_model_id = task.input_model.id + else: + # we should set the task input model to point to us + model = self._get_base_model() + # try to store the input model id, if it is not empty + if model.id != self._EMPTY_MODEL_ID: + task.set_input_model(model_id=model.id) + # only copy the model design if the task has no design to begin with + if not self._task.get_model_config_text(): + task.set_model_config(config_text=model.model_design) + if not self._task.get_labels_enumeration(): + task.set_model_label_enumeration(model.data.labels) + + # If there was an output model connected, it may need to be updated by + # the newly connected input model + self.task._reconnect_output_model() + + def _get_base_model(self): + if self._base_model: + return self._base_model + + if not self._base_model_id: + # this shouldn't actually happen + raise Exception('Missing model ID, cannot create an empty model') + self._base_model = _Model( + upload_storage_uri=None, + cache_dir=get_cache_dir(), + model_id=self._base_model_id, + ) + return self._base_model + + def _get_model_data(self): + return self._get_base_model().data + + +class OutputModel(BaseModel): + """ + Create an output model for a task to store the training results in. + + By definition the Model is always connected to a task, and is automatically registered as its output model. + The common use case is reusing the model object, and overriding the weights every stored snapshot. + A user can create multiple output models for a task, think a snapshot after a validation test has a new high-score. + The Model will be read-write and if config/label-enumeration are None, + their values will be initialized from the task input model. + """ + + @property + def published(self): + if not self.id: + return False + return self._get_base_model().locked + + @property + def config_text(self): + """ + returns a string representing the model configuration (from prototxt to ini file or python code to evaluate) + + :return: string + """ + return _Model._unwrap_design(self._get_model_data().design) + + @config_text.setter + def config_text(self, value): + """ + Update the model configuration, store a blob of text for custom usage + """ + self.update_design(config_text=value) + + @property + def config_dict(self): + """ + returns a configuration dictionary parsed from the config_text text, + usually representing the model configuration (from prototxt to ini file or python code to evaluate) + + :return: Dictionary + """ + return self._text_to_config_dict(self.config_text) + + @config_dict.setter + def config_dict(self, value): + """ + Update the model configuration: model configuration parameters (dict). + """ + self.update_design(config_dict=value) + + @property + def labels(self): + """ + Return the labels enumerator {str(label): integer(id)} as saved in the model object + + :return: labels_dict, dictionary with labels (text) keys and values as integers + """ + return self._get_model_data().labels + + @labels.setter + def labels(self, value): + """ + update the labels enumerator {str(label): integer(id)} as saved in the model object + """ + self.update_labels(labels=value) + + @property + def upload_storage_uri(self): + return self._get_base_model().upload_storage_uri + + def __init__( + self, + task, + config_text=None, + config_dict=None, + label_enumeration=None, + name=None, + tags=None, + comment=None, + framework=None, + ): + """ + Create a new model and immediately connect it to a task. + + We do not allow for Model creation without a task, so we always keep track on how we created the models + In remote execution, Model parameters can be overridden by the Task (such as model configuration & label enumerator) + + :param task: Task object + :type task: Task + :param config_text: model configuration (unconstrained text string). usually the content of a config_dict file. + If `config_text` is not None, `config_dict` must not be provided. + :param config_dict: model configuration parameters (dict). + If `config_dict` is not None, `config_text` must not be provided. + :param label_enumeration: dictionary of string to integer, enumerating the model output to labels + example: {'background': 0 , 'person': 1} + :type label_enumeration: dict[str: int] or None + :param name: optional, name for the newly created model + :param tags: optional, list of strings as tags + :param comment: optional, string description for the model + :param framework: optional, string name of the framework of the model or Framework + """ + super(OutputModel, self).__init__(task=task) + + config_text = self._resolve_config(config_text=config_text, config_dict=config_dict) + + self._model_local_filename = None + self._base_model = None + self._floating_data = _DummyModel( + design=_Model._wrap_design(config_text), + labels=label_enumeration or task.get_labels_enumeration(), + name=name, + tags=tags, + comment='Created by task id: {}'.format(task.id) + ('\n' + comment if comment else ''), + framework=framework, + upload_storage_uri=task.output_uri, + ) + self.connect(task) + + def connect(self, task): + """ + Connect current model with a specific task, only supported for preexisting models, + + i.e. not supported on objects created with create_and_connect() + When running in debug mode (i.e. locally), the task is updated with the model object + (i.e. task input model is the load_model_id) + When running remotely (i.e. from a daemon) the model is being updated from the task + Notice! when running remotely the load_model_id is ignored and loaded from the task object + regardless of the code + + :param task: Task object + """ + if self._task != task: + raise ValueError('Can only connect preexisting model to task, but this is a fresh model') + + if running_remotely() and task.is_main_task(): + self._floating_data.design = _Model._wrap_design(self._task.get_model_config_text()) + self._floating_data.labels = self._task.get_labels_enumeration() + elif self._floating_data is not None: + # we copy configuration / labels if they exist, obviously someone wants them as the output base model + if _Model._unwrap_design(self._floating_data.design): + task.set_model_config(config_text=self._floating_data.design) + else: + self._floating_data.design = _Model._wrap_design(self._task.get_model_config_text()) + + if self._floating_data.labels: + task.set_model_label_enumeration(self._floating_data.labels) + else: + self._floating_data.labels = self._task.get_labels_enumeration() + + self.task._save_output_model(self) + + def set_upload_destination(self, uri): + """ + Set the uri to upload all the model weight files to. + + Files are uploaded separately to the destination storage (e.g. s3,gc,file) and then + a link to the uploaded model is stored in the model object + Notice: credentials for the upload destination will be pooled from the + global configuration file (i.e. ~/trains.conf) + + :param uri: upload destination (string). example: 's3://bucket/directory/' or 'file:///tmp/debug/' + :return: True if destination scheme is supported (i.e. s3:// file:// gc:// etc...) + """ + if not uri: + return + + # Test if we can update the model. + self._validate_update() + + # Create the storage helper + storage = StorageHelper.get(uri) + + # Verify that we can upload to this destination + try: + uri = storage.verify_upload(folder_uri=uri) + except Exception: + raise ValueError("Could not set destination uri to: %s [Check write permissions]" % uri) + + # store default uri + self._get_base_model().upload_storage_uri = uri + + def update_weights(self, weights_filename=None, upload_uri=None, target_filename=None, + auto_delete_file=True, register_uri=None, iteration=None, update_comment=True): + """ + Update the model weights from a locally stored model filename. + + Uploading the model is a background process, the call returns immediately. + + :param weights_filename: locally stored filename to be uploaded as is + :param upload_uri: destination uri for model weights upload (default: previously used uri) + :param target_filename: the newly created filename in the destination uri location (default: weights_filename) + :param auto_delete_file: delete temporary file after uploading + :param register_uri: register an already uploaded weights file (uri must be valid) + :param update_comment: if True, model comment will be updated with local weights file location (provenance) + :return: uploaded uri + """ + + def delete_previous_weights_file(filename=weights_filename): + try: + if filename: + os.remove(filename) + except OSError: + self._log.debug('Failed removing temporary file %s' % filename) + + # test if we can update the model + if self.id and self.published: + raise ValueError('Model is published and cannot be changed') + + if (not weights_filename and not register_uri) or (weights_filename and register_uri): + raise ValueError('Model update must have either local weights file to upload, ' + 'or pre-uploaded register_uri, never both') + + # only upload if we are connected to a task + if not self._task: + raise Exception('Missing a task for this model') + + if weights_filename is not None: + # make sure we delete the previous file, if it exists + if self._model_local_filename != weights_filename: + delete_previous_weights_file(self._model_local_filename) + # store temp filename for deletion next time, if needed + if auto_delete_file: + self._model_local_filename = weights_filename + + # make sure the created model is updated: + model = self._get_force_base_model() + if not model: + raise ValueError('Failed creating internal output model') + + # select the correct file extension based on the framework, or update the framework based on the file extension + framework, file_ext = Framework._get_file_ext( + framework=self._get_model_data().framework, + filename=weights_filename or register_uri + ) + + if weights_filename: + target_filename = target_filename or Path(weights_filename).name + if not target_filename.lower().endswith(file_ext): + target_filename += file_ext + + # set target uri for upload (if specified) + if upload_uri: + self.set_upload_destination(upload_uri) + + # let us know the iteration number, we put it in the comment section for now. + if update_comment: + comment = self.comment or '' + iteration_msg = 'snapshot {} stored'.format(weights_filename or register_uri) + if not comment.startswith('\n'): + comment = '\n' + comment + comment = iteration_msg + comment + else: + comment = None + + # if we have no output destination, just register the local model file + if weights_filename and not self.upload_storage_uri and not self._task.storage_uri: + register_uri = weights_filename + weights_filename = None + auto_delete_file = False + self._log.info('No output storage destination defined, registering local model %s' % register_uri) + + # start the upload + if weights_filename: + if not model.upload_storage_uri: + self.set_upload_destination(self.upload_storage_uri or self._task.storage_uri) + + output_uri = model.update_and_upload( + model_file=weights_filename, + task_id=self._task.id, + async_enable=True, + target_filename=target_filename, + framework=self.framework or framework, + comment=comment, + cb=delete_previous_weights_file if auto_delete_file else None, + iteration=iteration or self._task.data.last_iteration, + ) + elif register_uri: + register_uri = StorageHelper.conform_url(register_uri) + output_uri = model.update(uri=register_uri, task_id=self._task.id, framework=framework, comment=comment) + else: + output_uri = None + + # make sure that if we are in dev move we report that we are training (not debugging) + self._task._output_model_updated() + + return output_uri + + def update_weights_package(self, weights_filenames=None, weights_path=None, upload_uri=None, + target_filename=None, auto_delete_file=True, iteration=None): + """ + Update the model weights from a locally stored model files (or directory containing multiple files). + + Uploading the model is a background process, the call returns immediately. + + :param weights_filenames: list of locally stored filenames (list of strings) + :type weights_filenames: list + :param weights_path: directory path to package (all the files in the directory will be uploaded) + :type weights_path: str + :param upload_uri: destination uri for model weights upload (default: previously used uri) + :param target_filename: the newly created filename in the destination uri location (default: weights_filename) + :param auto_delete_file: delete temporary file after uploading + :return: uploaded uri for the weights package + """ + # create list of files + if (not weights_filenames and not weights_path) or (weights_filenames and weights_path): + raise ValueError('Model update weights package should get either directory path to pack or a list of files') + + if not weights_filenames: + weights_filenames = list(map(six.text_type, Path(weights_path).glob('*'))) + + # create packed model from all the files + fd, zip_file = mkstemp(prefix='model_package.', suffix='.zip') + try: + with zipfile.ZipFile(zip_file, 'w', allowZip64=True, compression=zipfile.ZIP_STORED) as zf: + for filename in weights_filenames: + zf.write(filename, arcname=Path(filename).name) + finally: + os.close(fd) + + # now we can delete the files (or path if provided) + if auto_delete_file: + def safe_remove(path, is_dir=False): + try: + (os.rmdir if is_dir else os.remove)(path) + except OSError: + self._log.info('Failed removing temporary {}'.format(path)) + + for filename in weights_filenames: + safe_remove(filename) + if weights_path: + safe_remove(weights_path, is_dir=True) + + if target_filename and not target_filename.lower().endswith('.zip'): + target_filename += '.zip' + + # and now we should upload the file, always delete the temporary zip file + comment = self.comment or '' + iteration_msg = 'snapshot {} stored'.format(str(weights_filenames)) + if not comment.startswith('\n'): + comment = '\n' + comment + comment = iteration_msg + comment + self.comment = comment + uploaded_uri = self.update_weights(weights_filename=zip_file, auto_delete_file=True, upload_uri=upload_uri, + target_filename=target_filename or 'model_package.zip', + iteration=iteration, update_comment=False) + # set the model tag (by now we should have a model object) so we know we have packaged file + self._set_package_tag() + return uploaded_uri + + def update_design(self, config_text=None, config_dict=None): + """ + Update the model configuration, basically store a blob of text for custom usage + + Notice: this is done in a lazily, only when updating weights we force the update of configuration in the backend + + :param config_text: model configuration (unconstrained text string). usually the content of a config_dict file. + If `config_text` is not None, `config_dict` must not be provided. + :param config_dict: model configuration parameters (dict). + If `config_dict` is not None, `config_text` must not be provided. + :return: True if update was successful + """ + if not self._validate_update(): + return + + config_text = self._resolve_config(config_text=config_text, config_dict=config_dict) + + if self._task: + self._task.set_model_config(config_text=config_text) + + if self.id: + # update the model object (this will happen if we resumed a training task) + result = self._get_force_base_model().update(design=config_text, task_id=self._task.id) + else: + self._floating_data.design = _Model._wrap_design(config_text) + result = Waitable() + + # you can wait on this object + return result + + def update_labels(self, labels): + """ + Update the model label enumeration {str(label): integer(id)} + + :param labels: dictionary with labels (text) keys and values as integers + example: {'background': 0 , 'person': 1} + :return: + """ + validate_dict(labels, key_types=six.string_types, value_types=six.integer_types, desc='label enumeration') + + if not self._validate_update(): + return + + if self._task: + self._task.set_model_label_enumeration(labels) + + if self.id: + # update the model object (this will happen if we resumed a training task) + result = self._get_force_base_model().update(labels=labels, task_id=self._task.id) + else: + self._floating_data.labels = labels + result = Waitable() + + # you can wait on this object + return result + + @classmethod + def wait_for_uploads(cls, timeout=None, max_num_uploads=None): + """ + Wait for any pending/in-progress model uploads. If no uploads are pending or in-progress, returns immediately. + + :param timeout: If not None, a floating point number specifying a timeout in seconds after which this call will + return. + :param max_num_uploads: Max number of uploads to wait for. + """ + _Model.wait_for_results(timeout=timeout, max_num_uploads=max_num_uploads) + + def _get_force_base_model(self): + if self._base_model: + return self._base_model + + # create a new model from the task + self._base_model = self._task.create_output_model() + # update the model from the task inputs + labels = self._task.get_labels_enumeration() + config_text = self._task.get_model_config_text() + parent = self._task.output_model_id or self._task.input_model_id + self._base_model.update( + labels=labels, + design=config_text, + task_id=self._task.id, + project_id=self._task.project, + parent_id=parent, + name=self._floating_data.name or self._task.name, + comment=self._floating_data.comment, + tags=self._floating_data.tags, + framework=self._floating_data.framework, + upload_storage_uri=self._floating_data.upload_storage_uri + ) + + # remove model floating change set, by now they should have matched the task. + self._floating_data = None + + # now we have to update the creator task so it points to us + self._base_model.update_for_task(task_id=self._task.id, override_model_id=self.id) + + return self._base_model + + def _get_base_model(self): + if self._floating_data: + return self._floating_data + return self._get_force_base_model() + + def _get_model_data(self): + if self._base_model: + return self._base_model.data + return self._floating_data + + def _validate_update(self): + # test if we can update the model + if self.id and self.published: + raise ValueError('Model is published and cannot be changed') + + return True + + +class Waitable(object): + def wait(self, *_, **__): + return True diff --git a/trains/storage/__init__.py b/trains/storage/__init__.py new file mode 100644 index 00000000..bfce2dca --- /dev/null +++ b/trains/storage/__init__.py @@ -0,0 +1,2 @@ +""" Local and remote storage support """ +from .helper import StorageHelper diff --git a/trains/storage/helper.py b/trains/storage/helper.py new file mode 100644 index 00000000..b58db14b --- /dev/null +++ b/trains/storage/helper.py @@ -0,0 +1,1361 @@ +import getpass +import json +import os +import threading +from _socket import gethostname +from concurrent.futures import ThreadPoolExecutor +from copy import copy +from datetime import datetime +from multiprocessing.pool import ThreadPool +from time import time +from types import GeneratorType + +import boto3 +import botocore.client +import numpy as np +import requests +import six +from ..backend_api.utils import get_http_session_with_retry +from ..backend_config.bucket_config import S3BucketConfigurations, GSBucketConfigurations +from attr import attrs, attrib, asdict +from botocore.exceptions import ClientError +from furl import furl +from libcloud.common.types import ProviderError, LibcloudError +from libcloud.storage.providers import get_driver +from libcloud.storage.types import Provider +from pathlib2 import Path +from six import binary_type +from six.moves.queue import Queue, Empty +from six.moves.urllib.parse import urlparse +from six.moves.urllib.request import url2pathname + +from ..config import config +from ..debugging import get_logger +from ..errors import UsageError + +log = get_logger('storage') +level = config.get('storage.log.level', None) + +if level: + try: + log.setLevel(level) + except (TypeError, ValueError): + log.error('invalid storage log level in configuration: %s' % level) + +upload_pool = ThreadPool(processes=1) + + +class StorageError(Exception): + pass + + +class _DownloadProgressReport(object): + def __init__(self, total_size, verbose, remote_path, report_chunk_size_mb, log): + self._total_size = total_size + self._verbose = verbose + self.downloaded_mb = 0. + self._report_chunk_size = report_chunk_size_mb + self._log = log + self.last_reported = 0. + self._tic = time() + self._remote_path = remote_path + + def __call__(self, chunk_size): + chunk_size /= 1024. * 1024. + self.downloaded_mb += chunk_size + last_part = self.downloaded_mb - self.last_reported + + if self._verbose or (last_part >= self._report_chunk_size): + speed = last_part / (time() - self._tic) + self._tic = time() + self.last_reported = self.downloaded_mb + self._log.info('Downloading: %.0fMB / %.2fMb @ %.2fMbs from %s' % + (self.downloaded_mb, self._total_size, speed, self._remote_path)) + + +class StorageHelper(object): + """ Storage helper. + Used by the entire system to download/upload files. + Supports both local and remote files (currently local files, network-mapped files, HTTP/S and Amazon S3) + """ + _temp_download_suffix = '.partially' + + @attrs + class _PathSubstitutionRule(object): + registered_prefix = attrib(type=str) + local_prefix = attrib(type=str) + replace_windows_sep = attrib(type=bool) + replace_linux_sep = attrib(type=bool) + + path_substitution_config = 'storage.path_substitution' + + @classmethod + def load_list_from_config(cls): + rules_list = [] + for index, sub_config in enumerate(config.get(cls.path_substitution_config, list())): + rule = cls( + registered_prefix=sub_config.get('registered_prefix', None), + local_prefix=sub_config.get('local_prefix', None), + replace_windows_sep=sub_config.get('replace_windows_sep', False), + replace_linux_sep=sub_config.get('replace_linux_sep', False), + ) + + if any(prefix is None for prefix in (rule.registered_prefix, rule.local_prefix)): + log.warning( + "Illegal substitution rule configuration '{}[{}]': {}".format( + cls.path_substitution_config, + index, + asdict(rule), + )) + + continue + + if all((rule.replace_windows_sep, rule.replace_linux_sep)): + log.warning( + "Only one of replace_windows_sep and replace_linux_sep flags may be set." + "'{}[{}]': {}".format( + cls.path_substitution_config, + index, + asdict(rule), + )) + continue + + rules_list.append(rule) + + return rules_list + + class _UploadData(object): + @property + def src_path(self): + return self._src_path + + @property + def dest_path(self): + return self._dest_path + + @property + def extra(self): + return self._extra + + @property + def callback(self): + return self._callback + + def __init__(self, src_path, dest_path, extra, callback): + self._src_path = src_path + self._dest_path = dest_path + self._extra = extra + self._callback = callback + + def __str__(self): + return "src=%s" % self.src_path + + _helpers = {} # cache of helper instances + + # global terminate event for async upload threads + _terminate = threading.Event() + _async_upload_threads = set() + + # collect all bucket credentials that aren't empty (ignore entries with an empty key or secret) + _s3_configurations = S3BucketConfigurations.from_config(config.get('aws.s3')) + _gs_configurations = GSBucketConfigurations.from_config(config.get('google.storage', default=None)) + + _path_substitutions = _PathSubstitutionRule.load_list_from_config() + + _bucket_location_failure_reported = set() + + @property + def log(self): + return self._log + + @property + def scheme(self): + return self._scheme + + @property + def secure(self): + return self._secure + + @property + def base_url(self): + return self._base_url + + @classmethod + def get(cls, url, logger=None, **kwargs): + """ Get a storage helper instance for the given URL """ + + # Handle URL substitution etc before locating the correct storage driver + url = cls._canonize_url(url) + + # Get the credentials we should use for this url + base_url = cls._resolve_base_url(url) + + instance_key = '%s_%s' % (base_url, threading.current_thread().ident or 0) + + force_create = kwargs.pop('__force_create', False) + if (instance_key in cls._helpers) and (not force_create): + return cls._helpers[instance_key] + + # Don't canonize URL since we already did it + instance = cls(base_url=base_url, url=url, logger=logger, canonize_url=False, **kwargs) + cls._helpers[instance_key] = instance + return instance + + def __init__(self, base_url, url, key=None, secret=None, region=None, verbose=False, logger=None, retries=5, + **kwargs): + self._log = logger or log + self._verbose = verbose + self._retries = retries + self._extra = {} + self._base_url = base_url + self._secure = True + self._driver = None + self._container = None + self._conf = None + + if kwargs.get('canonize_url', True): + url = self._canonize_url(url) + + parsed = urlparse(url) + self._scheme = parsed.scheme + if self._scheme == 'libcloud-s3': + self._conf = copy(self._s3_configurations.get_config_by_uri(url)) + self._secure = self._conf.secure + + final_region = region if region else self._conf.region + if not final_region: + final_region = None + + self._conf.update( + key=key or self._conf.key, + secret=secret or self._conf.secret, + multipart=self._conf.multipart, + region=final_region, + ) + + if not self._conf.key or not self._conf.secret: + raise ValueError('Missing key and secret for S3 storage access (%s)' % base_url) + + def init_driver_and_container(host, port, bucket): + s3_region_to_libcloud_driver = { + None: Provider.S3, + "": Provider.S3, + 'us-east-1': Provider.S3, + 'ap-northeast': Provider.S3_AP_NORTHEAST, + 'ap-northeast-1': Provider.S3_AP_NORTHEAST1, + 'ap-northeast-2': Provider.S3_AP_NORTHEAST2, + 'ap-south': Provider.S3_AP_SOUTH, + 'ap-south-1': Provider.S3_AP_SOUTH, + 'ap-southeast': Provider.S3_AP_SOUTHEAST, + 'ap-southeast-1': Provider.S3_AP_SOUTHEAST, + 'ap-southeast-2': Provider.S3_AP_SOUTHEAST2, + 'ca-central': Provider.S3_CA_CENTRAL, + 'cn-north': Provider.S3_CN_NORTH, + 'eu-west': Provider.S3_EU_WEST, + 'eu-west-1': Provider.S3_EU_WEST, + 'eu-west-2': Provider.S3_EU_WEST2, + 'eu-central': Provider.S3_EU_CENTRAL, + 'eu-central-1': Provider.S3_EU_CENTRAL, + 'sa-east': Provider.S3_SA_EAST, + 'sa-east-1': Provider.S3_SA_EAST, + 'us-east-2': Provider.S3_US_EAST2, + 'us-west': Provider.S3_US_WEST, + 'us-west-1': Provider.S3_US_WEST, + 'us-west-2': Provider.S3_US_WEST_OREGON, + 'us-west-oregon': Provider.S3_US_WEST_OREGON, + 'us-gov-west': Provider.S3_US_GOV_WEST, + 'rgw': Provider.S3_RGW, + 'rgw_outscale': Provider.S3_RGW_OUTSCALE, + } + + driver_name = s3_region_to_libcloud_driver.get( + self._conf.region or self._get_bucket_region(self._conf, self._log) + ) + + if not driver_name: + self._log.error("Invalid S3 region `%s`: no driver found" % self._conf.region) + raise ValueError("Invalid s3 region") + + host = host or None + port = port or None + + try: + driver = get_driver(driver_name)( + self._conf.key, + self._conf.secret, + host=host, + port=port, + secure=self._secure, + region=self._conf.region, + ) + + driver.supports_s3_multipart_upload = self._conf.multipart + container = driver.get_container(container_name=bucket) + + except LibcloudError: + attempted_uri = str(furl(host=host, port=port, path=bucket)).strip('/') + self._log.error( + 'Could not create S3 driver for {} in region {}'.format(attempted_uri, self._conf.region)) + raise + + return driver, container + + parts = Path(parsed.path.strip('/')).parts + first_part = parts[0] if parts else "" + if not self._conf.host and not self._conf.bucket: + # configuration has no indication of host or bucket, we'll just go with what we have + try: + self._driver, self._container = init_driver_and_container(parsed.netloc, None, first_part) + except Exception as e: + self._driver, self._container = init_driver_and_container(None, None, parsed.netloc) + else: + # configuration provides at least one of host/bucket + host, _, port = (self._conf.host or '').partition(':') + port = int(port) if port else None + bucket = self._conf.bucket or first_part + self._driver, self._container = init_driver_and_container(host, port, bucket) + + if self._conf.acl: + self._extra['acl'] = self._conf.acl + + elif self._scheme == 's3': + self._conf = copy(self._s3_configurations.get_config_by_uri(url)) + self._secure = self._conf.secure + + final_region = region if region else self._conf.region + if not final_region: + final_region = None + + self._conf.update( + key=key or self._conf.key, + secret=secret or self._conf.secret, + multipart=self._conf.multipart, + region=final_region, + ) + + if not self._conf.key or not self._conf.secret: + raise ValueError('Missing key and secret for S3 storage access (%s)' % base_url) + + self._driver = _Boto3Driver() + self._container = self._driver.get_container(container_name=self._base_url, retries=retries, + config=self._conf) + + elif self._scheme == _GoogleCloudStorageDriver.scheme: + self._conf = copy(self._gs_configurations.get_config_by_uri(url)) + self._driver = _GoogleCloudStorageDriver() + self._container = self._driver.get_container( + container_name=self._base_url, + config=self._conf + ) + + elif self._scheme in ('http', 'https'): + self._driver = _HttpDriver(retries=retries) + self._container = self._driver.get_container(container_name=self._base_url) + else: # elif self._scheme == 'file': + # if this is not a known scheme assume local file + + # If the scheme is file, use only the path segment, If not, use the entire URL + if self._scheme == 'file': + url = parsed.path + + url = url.replace("\\", "/") + + # url2pathname is specifically intended to operate on (urlparse result).path + # and returns a cross-platform compatible result + driver_uri = url2pathname(url) + if Path(driver_uri).is_file(): + driver_uri = str(Path(driver_uri).parent) + elif not Path(driver_uri).exists(): + # assume a folder and create + Path(driver_uri).mkdir(parents=True, exist_ok=True) + + self._driver = get_driver(Provider.LOCAL)(driver_uri) + self._container = self._driver.get_container(container_name='.') + + @classmethod + def terminate_uploads(cls, force=True, timeout=2.0): + if force: + # since async uploaders are daemon threads, we can just return and let them close by themselves + return + # signal all threads to terminate and give them a chance for 'timeout' seconds (total, not per-thread) + cls._terminate.set() + remaining_timeout = timeout + for thread in cls._async_upload_threads: + t = time() + try: + thread.join(timeout=remaining_timeout) + except Exception: + pass + remaining_timeout -= (time() - t) + + @classmethod + def get_configuration(cls, bucket_config): + return cls._s3_configurations.get_config_by_bucket(bucket_config.bucket, bucket_config.host) + + @classmethod + def add_configuration(cls, bucket_config, log=None, _test_config=True): + # Try to use existing configuration if we have no key and secret + use_existing = not bucket_config.is_valid() + + # Get existing config anyway (we'll either try to use it or alert we're replacing it + existing = cls.get_configuration(bucket_config) + + configs = cls._s3_configurations + + if not use_existing: + # Test bucket config, fails if unsuccessful + if _test_config: + cls._test_bucket_config(bucket_config, log) + + if existing: + if log: + log.warning('Overriding existing configuration for %s/%s' + % (existing.host or 'AWS', existing.bucket)) + configs.remove_config(existing) + else: + # Try to use existing configuration + good_config = False + if existing: + if log: + log.info('Using existing credentials for bucket %s/%s' + % (bucket_config.host or 'AWS', bucket_config.bucket)) + good_config = cls._test_bucket_config(existing, log, raise_on_error=False) + + if not good_config: + # Try to use global key/secret + configs.update_config_with_defaults(bucket_config) + + if log: + log.info('Using global credentials for bucket %s/%s' + % (bucket_config.host or 'AWS', bucket_config.bucket)) + if _test_config: + cls._test_bucket_config(bucket_config, log) + else: + # do not add anything, existing config is OK + return + + configs.add_config(bucket_config) + + @classmethod + def add_path_substitution( + cls, + registered_prefix, + local_prefix, + replace_windows_sep=False, + replace_linux_sep=False, + ): + """ + Add a path substitution rule for storage paths. + + Useful for case where the data was registered under some path, and that + path was later renamed. This may happen with local storage paths where + each machine is has different mounts or network drives configurations + + :param registered_prefix: The prefix to search for and replace. This is + the prefix of the path the data is registered under. This should be the + exact url prefix, case sensitive, as the data is registered. + :param local_prefix: The prefix to replace 'registered_prefix' with. This + is the prefix of the path the data is actually saved under. This should be the + exact url prefix, case sensitive, as the data is saved under. + :param replace_windows_sep: If set to True, and the prefix matches, the rest + of the url has all of the windows path separators (backslash '\') replaced with + the native os path separator. + :param replace_linux_sep: If set to True, and the prefix matches, the rest + of the url has all of the linux/unix path separators (slash '/') replaced with + the native os path separator. + """ + + if not registered_prefix or not local_prefix: + raise UsageError("Path substitution prefixes must be non empty strings") + + if replace_windows_sep and replace_linux_sep: + raise UsageError("Only one of replace_windows_sep and replace_linux_sep may be set.") + + rule = cls._PathSubstitutionRule( + registered_prefix=registered_prefix, + local_prefix=local_prefix, + replace_windows_sep=replace_windows_sep, + replace_linux_sep=replace_linux_sep, + ) + + cls._path_substitutions.append(rule) + + @classmethod + def clear_path_substitutions(cls): + """ + Removes all path substitution rules, including ones from the configuration file. + """ + cls._path_substitutions = list() + + def verify_upload(self, folder_uri='', raise_on_error=True, log_on_error=True): + """ + Verify that this helper can upload files to a folder. + + An upload is possible iff: + 1. the destination folder is under the base uri of the url used to create the helper + 2. the helper has credentials to write to the destination folder + + :param folder_uri: The destination folder to test. Must be an absolute + url that begins with the base uri of the url used to create the helper. + :param raise_on_error: Raise an exception if an upload is not possible + :param log_on_error: Log an error if an upload is not possible + :return: True iff an upload to folder_uri is possible. + """ + + folder_uri = self._canonize_url(folder_uri) + + folder_uri = self.conform_url(folder_uri, self._base_url) + + test_path = self._normalize_object_name(folder_uri) + + if self._scheme == 's3': + self._test_bucket_config( + self._conf, + self._log, + test_path=test_path, + raise_on_error=raise_on_error, + log_on_error=log_on_error, + ) + elif self._scheme == _GoogleCloudStorageDriver.scheme: + self._driver.test_upload(test_path, self._conf) + + elif self._scheme == 'file': + # Check path exists + Path(test_path).mkdir(parents=True, exist_ok=True) + # check path permissions + Path(test_path).touch(exist_ok=True) + + return folder_uri + + def upload_from_stream(self, stream, dest_path, extra=None): + dest_path = self._canonize_url(dest_path) + object_name = self._normalize_object_name(dest_path) + extra = extra.copy() if extra else {} + extra.update(self._extra) + self._driver.upload_object_via_stream( + iterator=stream, + container=self._container, + object_name=object_name, + extra=extra) + + return dest_path + + def upload(self, src_path, dest_path=None, extra=None, async_enable=False, cb=None): + if not dest_path: + dest_path = os.path.basename(src_path) + + dest_path = self._canonize_url(dest_path) + + if async_enable: + data = self._UploadData(src_path=src_path, dest_path=dest_path, extra=extra, callback=cb) + return upload_pool.apply_async(self._do_async_upload, args=(data,)) + else: + return self._do_upload(src_path, dest_path, extra, cb, verbose=False) + + def list(self, prefix=None): + """ + List entries in the helper base path. + + Return a list of names inside this helper base path. The base path is + determined at creation time and is specific for each storage medium. + For Google Storage and S3 it is the bucket of the path. + For local files it is the root directory. + + This operation is not supported for http and https protocols. + + :param prefix: If None, return the list as described above. If not, it + must be a string - the path of a sub directory under the base path. + the returned list will include only objects under that subdir. + + :return: List of strings - the paths of all the objects in the storage base + path under prefix. Listed relative to the base path. + + """ + + if prefix: + if prefix.startswith(self._base_url): + prefix = prefix[len(self.base_url):].lstrip("/") + + try: + res = self._driver.list_container_objects(self._container, ex_prefix=prefix) + except TypeError: + res = self._driver.list_container_objects(self._container) + + return [ + obj.name + for obj in res if + obj.name.startswith(prefix) and obj.name != prefix + ] + else: + return [obj.name for obj in self._driver.list_container_objects(self._container)] + + def download_to_file(self, remote_path, local_path, overwrite_existing=False, delete_on_failure=True): + def next_chunk(astream): + _tic = time() + if isinstance(astream, binary_type): + chunk = astream + astream = None + elif astream: + try: + chunk = next(astream) + except StopIteration: + chunk = None + else: + chunk = None + _tic = time() - _tic + return chunk, astream, _tic + + remote_path = self._canonize_url(remote_path) + + temp_local_path = None + try: + if self._verbose: + self._log.info('Start downloading from %s' % remote_path) + if not overwrite_existing and Path(local_path).is_file(): + self._log.warn( + 'File {} already exists, no need to download, thread id = {}'.format( + local_path, + threading.current_thread().ident, + ), + ) + + return local_path + # we download into temp_local_path so that if we accidentally stop in the middle, + # we won't think we have the entire file + temp_local_path = '{}_{}{}'.format(local_path, time(), self._temp_download_suffix) + obj = self._get_object(remote_path) + + # object size in bytes + total_size_mb = -1 + dl_total_mb = 0. + download_reported = False + # chunks size is ignored and always 5Mb + chunk_size_mb = 5 + + # try to get file size + try: + if isinstance(self._driver, _HttpDriver) and obj: + total_size_mb = float(obj.headers.get('Content-Length', 0)) / (1024 * 1024) + elif hasattr(obj, 'size'): + size = obj.size + # Google storage has the option to reload the object to get the size + if size is None and hasattr(obj, 'reload'): + obj.reload() + size = obj.size + + total_size_mb = 0 if size is None else float(size) / (1024 * 1024) + elif hasattr(obj, 'content_length'): + total_size_mb = float(obj.content_length) / (1024 * 1024) + except (ValueError, AttributeError, KeyError): + pass + + # if driver supports download with call back, use it (it might be faster) + if hasattr(self._driver, 'download_object'): + # callback + cb = _DownloadProgressReport(total_size_mb, self._verbose, + remote_path, chunk_size_mb, self._log) + self._driver.download_object(obj, temp_local_path, callback=cb) + download_reported = bool(cb.last_reported) + dl_total_mb = cb.downloaded_mb + else: + stream = self._driver.download_object_as_stream(obj, chunk_size_mb * 1024 * 1024) + if stream is None: + raise ValueError('Could not download %s' % remote_path) + with open(temp_local_path, 'wb') as fd: + data, stream, tic = next_chunk(stream) + while data: + fd.write(data) + dl_rate = len(data) / float(1024 * 1024 * tic + 0.000001) + dl_total_mb += len(data) / float(1024 * 1024) + # report download if we are on the second chunk + if self._verbose or (dl_total_mb * 0.9 > chunk_size_mb): + download_reported = True + self._log.info('Downloading: %.0fMB / %.2fMb @ %.2fMbs from %s' % + (dl_total_mb, total_size_mb, dl_rate, remote_path)) + data, stream, tic = next_chunk(stream) + + # remove target local_path if already exists + try: + os.remove(local_path) + except: + pass + + if Path(temp_local_path).stat().st_size <= 0: + raise Exception('downloaded a 0-sized file') + + # rename temp file to local_file + os.rename(temp_local_path, local_path) + # report download if we are on the second chunk + if self._verbose or download_reported: + self._log.info( + 'Downloaded %.2f MB successfully from %s , saved to %s' % (dl_total_mb, remote_path, local_path)) + return local_path + except Exception as e: + self._log.error("Could not download %s , err: %s " % (remote_path, str(e))) + if delete_on_failure: + try: + if temp_local_path: + os.remove(temp_local_path) + except: + pass + return None + + def download_as_stream(self, remote_path, chunk_size=None): + remote_path = self._canonize_url(remote_path) + try: + obj = self._get_object(remote_path) + return self._driver.download_object_as_stream(obj, chunk_size=chunk_size) + except Exception as e: + self._log.error("Could not download file : %s, err:%s " % (remote_path, str(e))) + return None + + def download_as_nparray(self, remote_path, chunk_size=None): + try: + stream = self.download_as_stream(remote_path, chunk_size) + if stream is None: + return + + # TODO: ugly py3 hack, please remove ASAP + if six.PY3 and not isinstance(stream, GeneratorType): + return np.frombuffer(stream, dtype=np.uint8) + else: + return np.asarray(bytearray(b''.join(stream)), dtype=np.uint8) + + except Exception as e: + self._log.error("Could not download file : %s, err:%s " % (remote_path, str(e))) + + def delete(self, path): + return self._driver.delete_object(self._get_object(path)) + + @classmethod + def _canonize_url(cls, url): + return cls._apply_url_substitutions(url) + + @classmethod + def _apply_url_substitutions(cls, url): + def replace_separator(_url, where, sep): + return _url[:where] + _url[where:].replace(sep, os.sep) + + for index, rule in enumerate(cls._path_substitutions): + if url.startswith(rule.registered_prefix): + url = url.replace( + rule.registered_prefix, + rule.local_prefix, + 1, # count. str.replace() does not support keyword arguments + ) + + if rule.replace_windows_sep: + url = replace_separator(url, len(rule.local_prefix), '\\') + + if rule.replace_linux_sep: + url = replace_separator(url, len(rule.local_prefix), '/') + + break + + return url + + @classmethod + def _get_bucket_region(cls, conf, log=None, report_info=False): + if not conf.bucket: + return None + + def report(msg): + if log and conf.get_bucket_host() not in cls._bucket_location_failure_reported: + if report_info: + log.debug(msg) + else: + log.warning(msg) + cls._bucket_location_failure_reported.add(conf.get_bucket_host()) + + try: + boto_session = boto3.Session(conf.key, conf.secret) + boto_resource = boto_session.resource('s3') + return boto_resource.meta.client.get_bucket_location(Bucket=conf.bucket)["LocationConstraint"] + + except ClientError as ex: + report("Failed getting bucket location (region) for bucket " + "%s: %s (%s, access_key=%s). Default region will be used. " + "This is normal if you do not have GET_BUCKET_LOCATION permission" + % (conf.bucket, ex.response['Error']['Message'], ex.response['Error']['Code'], conf.key)) + except Exception as ex: + report("Failed getting bucket location (region) for bucket %s: %s. Default region will be used." + % (conf.bucket, str(ex))) + + return None + + @classmethod + def _test_bucket_config(cls, conf, log, test_path='', raise_on_error=True, log_on_error=True): + if not conf.bucket: + return False + try: + if not conf.is_valid(): + raise Exception('Missing credentials') + + fullname = furl(conf.bucket).add(path=test_path).add(path='%s-upload_test' % cls.__module__) + bucket_name = str(fullname.path.segments[0]) + filename = str(furl(path=fullname.path.segments[1:])) + + data = { + 'user': getpass.getuser(), + 'machine': gethostname(), + 'time': datetime.utcnow().isoformat() + } + + boto_session = boto3.Session(conf.key, conf.secret) + boto_resource = boto_session.resource('s3', conf.region) + bucket = boto_resource.Bucket(bucket_name) + bucket.put_object(Key=filename, Body=six.b(json.dumps(data))) + + region = cls._get_bucket_region(conf=conf, log=log, report_info=True) + + if region and ((conf.region and region != conf.region) or (not conf.region and region != 'us-east-1')): + msg = "incorrect region specified for bucket %s (detected region %s)" % (conf.bucket, region) + else: + return True + + except ClientError as ex: + msg = ex.response['Error']['Message'] + if log_on_error and log: + log.error(msg) + + if raise_on_error: + raise + + except Exception as ex: + msg = str(ex) + if log_on_error and log: + log.error(msg) + + if raise_on_error: + raise + + msg = ("Failed testing access to bucket %s: " % conf.bucket) + msg + + if log_on_error and log: + log.error(msg) + + if raise_on_error: + raise StorageError(msg) + + return False + + @classmethod + def _resolve_base_url(cls, base_url): + parsed = urlparse(base_url) + if parsed.scheme == 's3': + conf = cls._s3_configurations.get_config_by_uri(base_url) + bucket = conf.bucket + if not bucket: + parts = Path(parsed.path.strip('/')).parts + if parts: + bucket = parts[0] + return '/'.join(x for x in ('s3:/', conf.host, bucket) if x) + elif parsed.scheme == _GoogleCloudStorageDriver.scheme: + conf = cls._gs_configurations.get_config_by_uri(base_url) + return str(furl(scheme=parsed.scheme, netloc=conf.bucket)) + elif parsed.scheme == 'http': + return 'http://' + elif parsed.scheme == 'https': + return 'https://' + else: # if parsed.scheme == 'file': + # if we do not know what it is, we assume file + return 'file://' + + @classmethod + def conform_url(cls, folder_uri, base_url=None): + if not folder_uri: + return folder_uri + _base_url = cls._resolve_base_url(folder_uri) if not base_url else base_url + + if not folder_uri.startswith(_base_url): + prev_folder_uri = folder_uri + if _base_url == 'file://': + folder_uri = str(Path(folder_uri).absolute()) + if folder_uri.startswith('/'): + folder_uri = _base_url + folder_uri + else: + folder_uri = '/'.join((_base_url, folder_uri)) + + log.debug('Upload destination {} amended to {} for registration purposes'.format( + prev_folder_uri, folder_uri)) + else: + raise ValueError('folder_uri: {} does not start with base url: {}'.format(folder_uri, _base_url)) + + return folder_uri + + def _absolute_object_name(self, path): + """ Returns absolute remote path, including any prefix that is handled by the container """ + if not path.startswith(self.base_url): + return self.base_url.rstrip('/') + '///' + path.lstrip('/') + return path + + def _normalize_object_name(self, path): + """ Normalize remote path. Remove any prefix that is already handled by the container """ + if path.startswith(self.base_url): + path = path[len(self.base_url):] + if path.startswith('/') and os.name == 'nt': + path = path[1:] + if self.scheme in ('s3', _GoogleCloudStorageDriver.scheme): + path = path.lstrip('/') + return path + + def _do_async_upload(self, data): + assert isinstance(data, self._UploadData) + return self._do_upload(data.src_path, data.dest_path, data.extra, data.callback, verbose=True) + + def _upload_from_file(self, local_path, dest_path, extra=None): + if not hasattr(self._driver, 'upload_object'): + with open(local_path, 'rb') as stream: + res = self.upload_from_stream(stream=stream, dest_path=dest_path, extra=extra) + else: + object_name = self._normalize_object_name(dest_path) + extra = extra.copy() if extra else {} + extra.update(self._extra) + res = self._driver.upload_object( + file_path=local_path, + container=self._container, + object_name=object_name, + extra=extra) + return res + + def _do_upload(self, src_path, dest_path, extra=None, cb=None, verbose=False): + object_name = self._normalize_object_name(dest_path) + if cb: + try: + cb(None) + except Exception as e: + self._log.error("Calling upload callback when starting upload: %s" % str(e)) + if verbose: + msg = "Starting upload: %s => %s" % (src_path, object_name) + if object_name.startswith('file://') or object_name.startswith('/'): + self._log.debug(msg) + else: + self._log.info(msg) + try: + self._upload_from_file(local_path=src_path, dest_path=dest_path, extra=extra) + except Exception as e: + # TODO - exception is xml, need to parse. + self._log.error("Exception encountered while uploading %s" % str(e)) + try: + cb(False) + except Exception as e: + self._log.warn("Exception on upload callback: %s" % str(e)) + raise + if verbose: + self._log.debug("Finished upload: %s => %s" % (src_path, object_name)) + if cb: + try: + cb(dest_path) + except Exception as e: + self._log.warn("Exception on upload callback: %s" % str(e)) + + return dest_path + + def _get_object(self, path): + object_name = self._normalize_object_name(path) + try: + return self._driver.get_object(container_name=self._container.name, object_name=object_name) + except ProviderError: + raise + except Exception as e: + self.log.exception('Storage helper problem for {}'.format(str(object_name))) + return None + + +class _HttpDriver(object): + """ LibCloud http/https adapter (simple, enough for now) """ + + class _Container(object): + def __init__(self, name, retries=5, **kwargs): + self.name = name + self.session = get_http_session_with_retry(total=retries) + + def __init__(self, retries=5): + self._retries = retries + self._containers = {} + + def get_container(self, container_name, *_, **kwargs): + if container_name not in self._containers: + self._containers[container_name] = self._Container(name=container_name, retries=self._retries, **kwargs) + return self._containers[container_name] + + def upload_object_via_stream(self, iterator, container, object_name, extra=None, **kwargs): + url = object_name[:object_name.index('/')] + url_path = object_name[len(url)+1:] + res = container.session.post(container.name+url, files={url_path: iterator}) + if res.status_code != requests.codes.ok: + raise ValueError('Failed uploading object %s (%d): %s' % (object_name, res.status_code, res.text)) + return res + + def list_container_objects(self, *args, **kwargs): + raise NotImplementedError('List is not implemented for http protocol') + + def delete_object(self, *args, **kwargs): + raise NotImplementedError('Delete is not implemented for http protocol') + + def get_object(self, container_name, object_name, *args, **kwargs): + container = self._containers[container_name] + # set stream flag before get request + container.session.stream = kwargs.get('stream', True) + res = container.session.get(''.join((container_name, object_name.lstrip('/')))) + if res.status_code != requests.codes.ok: + raise ValueError('Failed getting object %s (%d): %s' % (object_name, res.status_code, res.text)) + return res + + def download_object_as_stream(self, obj, chunk_size=64 * 1024): + # return iterable object + return obj.iter_content(chunk_size=chunk_size) + + def download_object(self, obj, local_path, overwrite_existing=True, delete_on_failure=True, callback=None): + p = Path(local_path) + if not overwrite_existing and p.is_file(): + log.warn('failed saving after download: overwrite=False and file exists (%s)' % str(p)) + return + length = p.write_bytes(obj.content) + if callback: + try: + callback(length) + except Exception as e: + log.warn('Failed reporting downloaded file size for {}: {}'.format(p, e)) + + +class _Stream(object): + encoding = None + mode = 'rw' + name = '' + newlines = '\n' + softspace = False + + def __init__(self, input_iterator=None): + self.closed = False + self._buffer = Queue() + self._input_iterator = input_iterator + self._leftover = None + + def __iter__(self): + return self + + def __next__(self): + return self.next() + + def close(self): + self.closed = True + + def flush(self): + pass + + def fileno(self): + return 87 + + def isatty(self): + return False + + def next(self): + while not self.closed or not self._buffer.empty(): + # input stream + if self._input_iterator: + try: + chunck = next(self._input_iterator) + return chunck + except StopIteration: + self.closed = True + raise StopIteration() + except Exception as ex: + log.error('Failed downloading: %s' % ex) + else: + # in/out stream + try: + return self._buffer.get(block=True, timeout=1.) + except Empty: + pass + + raise StopIteration() + + def read(self, size=None): + try: + data = self.next() if self._leftover is None else self._leftover + except StopIteration: + return six.b('') + + self._leftover = None + try: + while size is None or len(data) < size: + chunk = self.next() + if chunk is not None: + if data is not None: + data += chunk + else: + data = chunk + except StopIteration: + pass + + if size is not None and len(data) > size: + self._leftover = data[size:] + return data[:size] + + return data + + def readline(self, size=None): + return self.read(size) + + def readlines(self, sizehint=None): + pass + + def truncate(self, size=None): + pass + + def write(self, bytes): + self._buffer.put(bytes, block=True) + + def writelines(self, sequence): + for s in sequence: + self.write(s) + + +class _Boto3Driver(object): + """ Boto3 storage adapter (simple, enough for now) """ + _max_multipart_concurrency = config.get('aws.boto3.max_multipart_concurrency', 16) + + _min_pool_connections = 512 + _pool_connections = config.get('aws.boto3.pool_connections', 512) + + _stream_download_pool_connections = 128 + _stream_download_pool = None + + _containers = {} + + class _Container(object): + _creation_lock = threading.Lock() + + def __init__(self, name, cfg): + # skip 's3://' + self.name = name[5:] + endpoint = (('https://' if cfg.secure else 'http://') + cfg.host) if cfg.host else None + + # boto3 client creation isn't thread-safe (client itself is) + with self._creation_lock: + self.resource = boto3.resource( + 's3', + aws_access_key_id=cfg.key, + aws_secret_access_key=cfg.secret, + endpoint_url=endpoint, + use_ssl=cfg.secure, + config=botocore.client.Config( + max_pool_connections=max( + _Boto3Driver._min_pool_connections, + _Boto3Driver._pool_connections) + ), + ) + + self.config = cfg + bucket_name = self.name[len(cfg.host) + 1:] if cfg.host else self.name + self.bucket = self.resource.Bucket(bucket_name) + + @attrs + class ListResult(object): + name = attrib(default=None) + + def __init__(self): + pass + + def _get_stream_download_pool(self): + if self._stream_download_pool is None: + self._stream_download_pool = ThreadPoolExecutor(max_workers=self._stream_download_pool_connections) + return self._stream_download_pool + + def get_container(self, container_name, *_, **kwargs): + if container_name not in self._containers: + self._containers[container_name] = self._Container(name=container_name, cfg=kwargs.get('config')) + self._containers[container_name].config.retries = kwargs.get('retries', 5) + return self._containers[container_name] + + def upload_object_via_stream(self, iterator, container, object_name, extra=None, **kwargs): + stream = _Stream(iterator) + try: + container.bucket.upload_fileobj(stream, object_name, Config=boto3.s3.transfer.TransferConfig( + use_threads=container.config.multipart, + max_concurrency=self._max_multipart_concurrency if container.config.multipart else 1, + num_download_attempts=container.config.retries)) + except Exception as ex: + log.error('Failed uploading: %s' % ex) + return False + return True + + def upload_object(self, file_path, container, object_name, extra=None, **kwargs): + try: + container.bucket.upload_file(file_path, object_name, Config=boto3.s3.transfer.TransferConfig( + use_threads=container.config.multipart, + max_concurrency=self._max_multipart_concurrency if container.config.multipart else 1, + num_download_attempts=container.config.retries)) + except Exception as ex: + log.error('Failed uploading: %s' % ex) + return False + return True + + def list_container_objects(self, container, ex_prefix=None, **kwargs): + if ex_prefix: + res = container.bucket.objects.filter(Prefix=ex_prefix) + else: + res = container.bucket.objects.all() + for res in res: + yield self.ListResult(name=res.key) + + def delete_object(self, object, **kwargs): + object.delete() + + def get_object(self, container_name, object_name, *args, **kwargs): + full_container_name = 's3://' + container_name + container = self._containers[full_container_name] + obj = container.resource.Object(container.bucket.name, object_name) + obj.container_name = full_container_name + return obj + + def download_object_as_stream(self, obj, chunk_size=64 * 1024): + def async_download(a_obj, a_stream, cfg): + try: + a_obj.download_fileobj(a_stream, Config=cfg) + except Exception as ex: + log.error('Failed downloading: %s' % ex) + a_stream.close() + + # return iterable object + stream = _Stream() + container = self._containers[obj.container_name] + config = boto3.s3.transfer.TransferConfig( + use_threads=container.config.multipart, + max_concurrency=self._max_multipart_concurrency if container.config.multipart else 1, + num_download_attempts=container.config.retries) + self._get_stream_download_pool().submit(async_download, obj, stream, config) + + return stream + + def download_object(self, obj, local_path, overwrite_existing=True, delete_on_failure=True, callback=None): + p = Path(local_path) + if not overwrite_existing and p.is_file(): + log.warn('failed saving after download: overwrite=False and file exists (%s)' % str(p)) + return + container = self._containers[obj.container_name] + obj.download_file(str(p), + Callback=callback, + Config=boto3.s3.transfer.TransferConfig( + use_threads=container.config.multipart, + max_concurrency=self._max_multipart_concurrency if container.config.multipart else 1, + num_download_attempts=container.config.retries)) + + +class _GoogleCloudStorageDriver(object): + """Storage driver for google cloud storage""" + + _stream_download_pool_connections = 128 + _stream_download_pool = None + + _containers = {} + + scheme = 'gs' + scheme_prefix = str(furl(scheme=scheme, netloc='')) + + class _Container(object): + def __init__(self, name, cfg): + try: + from google.cloud import storage + from google.oauth2 import service_account + except ImportError: + raise UsageError( + 'Google cloud driver not found.' + 'Please install driver using "pip install google-cloud-storage"' + ) + + self.name = name[len(_GoogleCloudStorageDriver.scheme_prefix):] + + if cfg.credentials_json: + credentials = service_account.Credentials.from_service_account_file(cfg.credentials_json) + else: + credentials = None + + self.client = storage.Client(project=cfg.project, credentials=credentials) + self.config = cfg + self.bucket = self.client.bucket(self.name) + + def _get_stream_download_pool(self): + if self._stream_download_pool is None: + self._stream_download_pool = ThreadPoolExecutor(max_workers=self._stream_download_pool_connections) + return self._stream_download_pool + + def get_container(self, container_name, *_, **kwargs): + if container_name not in self._containers: + self._containers[container_name] = self._Container(name=container_name, cfg=kwargs.get('config')) + self._containers[container_name].config.retries = kwargs.get('retries', 5) + return self._containers[container_name] + + def upload_object_via_stream(self, iterator, container, object_name, extra=None, **kwargs): + try: + blob = container.bucket.blob(object_name) + blob.upload_from_file(iterator) + except Exception as ex: + log.error('Failed uploading: %s' % ex) + return False + return True + + def upload_object(self, file_path, container, object_name, extra=None, **kwargs): + try: + blob = container.bucket.blob(object_name) + blob.upload_from_filename(file_path) + except Exception as ex: + log.error('Failed uploading: %s' % ex) + return False + return True + + def list_container_objects(self, container, **kwargs): + return list(container.bucket.list_blobs()) + + def delete_object(self, object, **kwargs): + object.delete() + + def get_object(self, container_name, object_name, *args, **kwargs): + full_container_name = str(furl(scheme=self.scheme, netloc=container_name)) + container = self._containers[full_container_name] + obj = container.bucket.blob(object_name) + obj.container_name = full_container_name + return obj + + def download_object_as_stream(self, obj, chunk_size=256 * 1024): + raise NotImplementedError('Unsupported for google storage') + + def async_download(a_obj, a_stream): + try: + a_obj.download_to_file(a_stream) + except Exception as ex: + log.error('Failed downloading: %s' % ex) + a_stream.close() + + # return iterable object + stream = _Stream() + obj.chunk_size = chunk_size + self._get_stream_download_pool().submit(async_download, obj, stream) + + return stream + + def download_object(self, obj, local_path, overwrite_existing=True, delete_on_failure=True, callback=None): + p = Path(local_path) + if not overwrite_existing and p.is_file(): + log.warn('failed saving after download: overwrite=False and file exists (%s)' % str(p)) + return + obj.download_to_filename(str(p)) + + def test_upload(self, test_path, config): + bucket_url = str(furl(scheme=self.scheme, netloc=config.bucket, path=config.subdir)) + bucket = self.get_container(container_name=bucket_url, config=config).bucket + + test_obj = bucket + + if test_path: + if not test_path.endswith('/'): + test_path += '/' + + blob = bucket.blob(test_path) + + if blob.exists(): + test_obj = blob + + permissions_to_test = ('storage.objects.get', 'storage.objects.update') + return set(test_obj.test_iam_permissions(permissions_to_test)) == set(permissions_to_test) diff --git a/trains/storage/util.py b/trains/storage/util.py new file mode 100644 index 00000000..a9f4e29e --- /dev/null +++ b/trains/storage/util.py @@ -0,0 +1,18 @@ +import six +import fnmatch + + +def get_config_object_matcher(**patterns): + unsupported = {k: v for k, v in patterns.items() if not isinstance(v, six.string_types)} + if unsupported: + raise ValueError('Unsupported object matcher (expecting string): %s' + % ', '.join('%s=%s' % (k, v) for k, v in unsupported.items())) + + def _matcher(**kwargs): + for key, value in kwargs.items(): + if not value: + continue + pat = patterns.get(key) + if pat and fnmatch.fnmatch(value, pat): + return True + return _matcher diff --git a/trains/task.py b/trains/task.py new file mode 100644 index 00000000..5d4094ec --- /dev/null +++ b/trains/task.py @@ -0,0 +1,1192 @@ +import atexit +import os +import sys +import threading +import time +import warnings +from argparse import ArgumentParser +from collections import OrderedDict, Callable + +import psutil +import six +from .backend_api.services import tasks, projects +from six.moves._thread import start_new_thread + +from .backend_interface import TaskStatusEnum +from .backend_interface.model import Model as BackendModel +from .backend_interface.task.args import _Arguments +from .backend_interface.task.development.stop_signal import TaskStopSignal +from .backend_interface.task.development.worker import DevWorker +from .backend_interface.task.repo import pip_freeze, ScriptInfo +from .backend_interface.util import get_single_result, exact_match_regex, make_message +from .config import config, PROC_MASTER_ID_ENV_VAR +from .debugging.log import LoggerRoot +from .errors import UsageError +from .task_parameters import TaskParameters +from .utilities.args import argparser_parseargs_called, get_argparser_last_args, \ + argparser_update_currenttask +from .utilities.matplotlib_bind import PatchedMatplotlib +from .utilities.seed import make_deterministic +from .utilities.absl_bind import PatchAbsl +from .utilities.frameworks import PatchSummaryToEventTransformer, PatchModelCheckPointCallback, \ + PatchTensorFlowEager, PatchKerasModelIO, PatchTensorflowModelIO, PatchPyTorchModelIO +from .backend_interface.task import Task as _Task +from .config import running_remotely, get_remote_task_id +from .config.cache import SessionCache +from .logger import Logger +from .model import InputModel, OutputModel, ARCHIVED_TAG + +NotSet = object() + + +class Task(_Task): + """ + Task (experiment) object represents the current running experiments and connects all the different parts into \ + a fully reproducible experiment + + Common usage is calling Task.init() to initialize the main task. + The main task is development / remote execution mode-aware, and supports connecting various SDK objects + such as Models etc. In development mode, the main task supports task reuse (see Task.init() for more + information in development mode features). + Any subsequent call to Task.init() will return the already-initialized main task + and will not create a new main task. + + Sub-tasks, meaning tasks which are not the main task and are not development / remote execution mode aware, can be + created using Task.create(). These tasks do no support task reuse and any call + to Task.create() will always create a new task. + + You can also query existing tasks in the system by calling Task.get_task(). + + **Usage: Task.init(...), Task.create() or Task.get_task(...)** + """ + + TaskTypes = tasks.TaskTypeEnum + + __create_protection = object() + __main_task = None + __exit_hook = None + __task_id_reuse_time_window_in_hours = float(config.get('development.task_reuse_time_window_in_hours', 24.0)) + __store_diff_on_train = config.get('development.store_uncommitted_code_diff_on_train', False) + __detect_repo_async = config.get('development.vcs_repo_detect_async', False) + + class _ConnectedParametersType(object): + argparse = "argument_parser" + dictionary = "dictionary" + task_parameters = "task_parameters" + + @classmethod + def _options(cls): + return { + var for var, val in vars(cls).items() + if isinstance(val, six.string_types) + } + + def __init__(self, private=None, **kwargs): + """ + **Do not construct Task manually!** + + please use Task.current_task() or Task.get_task(id=, project=, name=) + """ + if private is not Task.__create_protection: + raise UsageError( + 'Task object cannot be instantiated externally, use Task.current_task() or Task.get_task(...)') + + super(Task, self).__init__(**kwargs) + self._arguments = _Arguments(self) + self._logger = None + self._last_input_model_id = None + self._connected_output_model = None + self._dev_worker = None + self._dev_stop_signal = None + self._dev_mode_periodic_flag = False + self._connected_parameter_type = None + # register atexit, so that we mark the task as stopped + self._at_exit_called = False + self.__register_at_exit(self._at_exit) + + @classmethod + def current_task(cls): + """ + Return the Current Task object for the main execution task (task context). + :return: Task() object or None + """ + return cls.__main_task + + @classmethod + def init( + cls, + project_name=None, + task_name=None, + task_type=TaskTypes.training, + reuse_last_task_id=True, + output_uri=None, + auto_connect_arg_parser=True, + auto_connect_frameworks=True, + **kwargs + ): + """ + Return the Task object for the main execution task (task context). + + :param project_name: project to create the task in (if project doesn't exist, it will be created) + :param task_name: task name to be created (in development mode, not when running remotely) + :param task_type: task type to be created (in development mode, not when running remotely) + :param reuse_last_task_id: start with the previously used task id (stored in the data cache folder). \ + if False every time we call the function we create a new task with the same name \ + Notice! The reused task will be reset. (when running remotely, the usual behaviour applies) \ + Note: A closed or published task will not be reused, and a new task will be created. + :param output_uri: Default location for output models (currently support folder/S3/GS/ ). + notice: sub-folders (task_id) is created in the destination folder for all outputs. + :param auto_connect_arg_parser: Automatically grab the ArgParser and connect it with the task. + if set to false, you can manually connect the ArgParser with task.connect(parser) + :param auto_connect_frameworks: If true automatically patch MatplotLib, Keras callbacks, and TensorBoard/X to + serialize plots, graphs and model location to trains backend (in addition to original output destination) + :return: Task() object + """ + + def verify_defaults_match(): + validate = [ + ('project name', project_name, cls.__main_task.get_project_name()), + ('task name', task_name, cls.__main_task.name), + ('task type', task_type, cls.__main_task.task_type), + ] + + for field, default, current in validate: + if default is not None and default != current: + raise UsageError( + "Current task already created " + "and requested {field} '{default}' does not match current {field} '{current}'".format( + field=field, + default=default, + current=current, + ) + ) + + if cls.__main_task is not None: + if not running_remotely(): + verify_defaults_match() + + return cls.__main_task + + # check that we are not a child process, in that case do nothing + if PROC_MASTER_ID_ENV_VAR.get() and PROC_MASTER_ID_ENV_VAR.get() != os.getpid(): + class _TaskStub(object): + def __call__(self, *args, **kwargs): + return self + + def __getattr__(self, attr): + return self + + def __setattr__(self, attr, val): + pass + + return _TaskStub() + # set us as master process + PROC_MASTER_ID_ENV_VAR.set(os.getpid()) + + if task_type is None: + # Backwards compatibility: if called from Task.current_task and task_type + # was not specified, keep legacy default value of TaskTypes.training + __from_current_task = kwargs.pop("__from_current_task", False) + if __from_current_task: + task_type = cls.TaskTypes.training + + try: + if not running_remotely(): + task = cls._create_dev_task( + project_name, + task_name, + task_type, + reuse_last_task_id, + ) + if output_uri: + task.output_uri = output_uri + else: + task = cls( + private=cls.__create_protection, + task_id=get_remote_task_id(), + log_to_backend=False, + ) + except Exception: + raise + else: + Task.__main_task = task + # Patch argparse to be aware of the current task + argparser_update_currenttask(Task.__main_task) + if auto_connect_frameworks: + PatchedMatplotlib.update_current_task(Task.__main_task) + PatchAbsl.update_current_task(Task.__main_task) + PatchSummaryToEventTransformer.update_current_task(task) + # PatchModelCheckPointCallback.update_current_task(task) + PatchTensorFlowEager.update_current_task(task) + PatchKerasModelIO.update_current_task(task) + PatchTensorflowModelIO.update_current_task(task) + PatchPyTorchModelIO.update_current_task(task) + # Check if parse args already called. If so, sync task parameters with parser + if argparser_parseargs_called(): + parser, parsed_args = get_argparser_last_args() + task._connect_argparse(parser=parser, parsed_args=parsed_args) + + # make sure all random generators are initialized with new seed + make_deterministic(task.get_random_seed()) + + if auto_connect_arg_parser: + # Patch ArgParser to be aware of the current task + argparser_update_currenttask(Task.__main_task) + # Check if parse args already called. If so, sync task parameters with parser + if argparser_parseargs_called(): + parser, parsed_args = get_argparser_last_args() + task._connect_argparse(parser, parsed_args=parsed_args) + + # Make sure we start the logger, it will patch the main logging object and pipe all output + # if we are running locally and using development mode worker, we will pipe all stdout to logger. + # The logger will automatically take care of all patching (we just need to make sure to initialize it) + task.get_logger() + + # Make sure we start the dev worker if required, otherwise it will only be started when we write + # something to the log. + task._dev_mode_task_start() + + return task + + @classmethod + def create( + cls, + task_name=None, + project_name=None, + task_type=TaskTypes.training, + ): + """ + Create a new Task object, regardless of the main execution task (Task.init). + + Notice: This function will always create a new task, whether running in development or remote execution mode. + + :param task_name: task name to be created + :param project_name: Project to create the task in. + If project is None, and the main execution task is initialized (Task.init), its project will be used. + If project is provided but doesn't exist, it will be created. + :param task_type: Task type to be created. (default: "training") + Optional Task types are: "training" / "testing" / "dataset_import" / "annotation" / "annotation_manual" + :return: Task() object + """ + if not project_name: + if not cls.__main_task: + raise ValueError("Please provide project_name, no global task context found " + "(Task.current_task hasn't been called)") + project_name = cls.__main_task.get_project_name() + + try: + task = cls( + private=cls.__create_protection, + project_name=project_name, + task_name=task_name, + task_type=task_type, + log_to_backend=False, + force_create=True, + ) + except Exception: + raise + return task + + @classmethod + def _reset_current_task_obj(cls): + if not cls.__main_task: + return + task = cls.__main_task + cls.__main_task = None + if task._dev_worker: + task._dev_worker.unregister() + task._dev_worker = None + if task._dev_stop_signal: + task._dev_stop_signal = None + + @classmethod + def _create_dev_task(cls, default_project_name, default_task_name, default_task_type, reuse_last_task_id): + if not default_project_name or not default_task_name: + # get project name and task name from repository name and entry_point + result = ScriptInfo.get() + if result: + if not default_project_name: + # noinspection PyBroadException + try: + parts = result.script['repository'].split('/') + default_project_name = (parts[-1] or parts[-2]).replace('.git', '') or 'Untitled' + except Exception: + default_project_name = 'Untitled' + if not default_task_name: + # noinspection PyBroadException + try: + default_task_name = os.path.splitext(os.path.basename(result.script['entry_point']))[0] + except Exception: + pass + + # if we have a previous session to use, get the task id from it + default_task = cls.__get_last_used_task_id( + default_project_name, + default_task_name, + default_task_type.value, + ) + + closed_old_task = False + default_task_id = None + in_dev_mode = not running_remotely() and not DevWorker.is_enabled() + + if in_dev_mode: + if not reuse_last_task_id or not cls.__task_is_relevant(default_task): + default_task_id = None + closed_old_task = cls.__close_timed_out_task(default_task) + else: + default_task_id = default_task.get('id') if default_task else None + + if default_task_id: + try: + task = cls( + private=cls.__create_protection, + task_id=default_task_id, + log_to_backend=True, + ) + if ((task.status in (TaskStatusEnum.published, TaskStatusEnum.closed)) + or (ARCHIVED_TAG in task.data.tags) or task.output_model_id): + # If the task is published or closed, we shouldn't reset it so we can't use it in dev mode + # If the task is archived, or already has an output model, + # we shouldn't use it in development mode either + default_task_id = None + task = None + else: + # reset the task, so we can update it + task.reset(set_started_on_success=False, force=False) + # clear task parameters, they are not cleared by the Task reset + task.set_parameters({}, __update=False) + # clear the comment, it is not cleared on reset + task.set_comment(make_message('Auto-generated at %(time)s by %(user)s@%(host)s')) + # clear the input model (and task model design/labels) + task.set_input_model(model_id='', update_task_design=False, update_task_labels=False) + task.set_model_config(config_text='') + task.set_model_label_enumeration({}) + + except (Exception, ValueError): + # we failed reusing task, create a new one + default_task_id = None + + # create a new task + if not default_task_id: + task = cls( + private=cls.__create_protection, + project_name=default_project_name, + task_name=default_task_name, + task_type=default_task_type, + log_to_backend=True, + ) + + if in_dev_mode: + # update this session, for later use + cls.__update_last_used_task_id(default_project_name, default_task_name, default_task_type.value, task.id) + + # force update of base logger to this current task (this is the main logger task) + task._setup_log(replace_existing=True) + logger = task.get_logger() + if closed_old_task: + logger.console('TRAINS Task: Closing old development task id={}'.format(default_task.get('id'))) + # print warning, reusing/creating a task + if default_task_id: + logger.console('TRAINS Task: overwriting (reusing) task id=%s' % task.id) + else: + logger.console('TRAINS Task: created new task id=%s' % task.id) + + # update current repository and put warning into logs + if in_dev_mode and cls.__detect_repo_async: + start_new_thread(task._update_repository, tuple()) + else: + task._update_repository() + + # show the debug metrics page in the log, it is very convinient + logger.console( + 'TRAINS results page: {}/projects/{}/experiments/{}/output/log'.format( + task._get_app_server(), + task.project if task.project is not None else '*', + task.id, + ), + ) + # make sure we see something in the UI + threading.Thread(target=LoggerRoot.flush).start() + return task + + @staticmethod + def get_task(task_id=None, project_name=None, task_name=None): + """ + Returns Task object based on either, task_id (system uuid) or task name + + :param task_id: unique task id string (if exists other parameters are ignored) + :param project_name: project name (str) the task belogs to + :param task_name: task name (str) in within the selected project + :return: Task object + """ + return Task.__get_task(task_id=task_id, project_name=project_name, task_name=task_name) + + @property + def output_uri(self): + return self.storage_uri + + @output_uri.setter + def output_uri(self, value): + self.storage_uri = value + + def set_comment(self, comment): + """ + Set a comment text to the task. + + In remote, this is a no-op. + + :param comment: The comment of the task + :type comment: str + """ + if not running_remotely() or not self.is_main_task(): + self._edit(comment=comment) + self.reload() + + def add_tags(self, tags): + """ + Add tags to this task. Old tags are not deleted + + In remote, this is a no-op. + + :param tags: An iterable or space separated string of new tags (string) to add. + :type tags: str or iterable of str + """ + + if not running_remotely() or not self.is_main_task(): + if isinstance(tags, six.string_types): + tags = tags.split(" ") + + self.data.tags.extend(tags) + self._edit(tags=list(set(self.data.tags))) + + def connect(self, mutable): + """ + Connect an object to a task (see introduction to Task connect design) + + :param mutable: can be any object Task supports integrating with: + - argparse : for argument passing + - dict : for argument passing + - TaskParameters : for argument passing + - model : for initial model warmup or model update/snapshot uploads + :return: connect_task() return value if supported + :raise: raise exception on unsupported objects + """ + + dispatch = OrderedDict(( + (OutputModel, self._connect_output_model), + (InputModel, self._connect_input_model), + (ArgumentParser, self._connect_argparse), + (dict, self._connect_dictionary), + (TaskParameters, self._connect_task_parameters), + )) + + for mutable_type, method in dispatch.items(): + if isinstance(mutable, mutable_type): + return method(mutable) + + raise Exception('Unsupported mutable type %s: no connect function found' % type(mutable).__name__) + + def get_logger(self, flush_period=NotSet): + """ + get a logger object for reporting based on the task + + :param flush_period: The period of the logger flush. + If None of any other False value, will not flush periodically. + If a logger was created before, this will be the new period and + the old one will be discarded. + + :return: .Logger object + """ + if not self._logger: + # force update of base logger to this current task (this is the main logger task) + self._setup_log(replace_existing=self.is_main_task()) + # Get a logger object + self._logger = Logger(private_task=self) + # make sure we set our reported to async mode + # we make sure we flush it in self._at_exit + self.reporter.async_enable = True + # if we just created the logger, set default flush period + if not flush_period or flush_period is NotSet: + flush_period = DevWorker.report_period + + if isinstance(flush_period, (int, float)): + flush_period = int(abs(flush_period)) + + if flush_period is None or isinstance(flush_period, int): + self._logger.set_flush_period(flush_period) + + return self._logger + + def mark_started(self): + """ + Manually Mark the task as started (will happen automatically) + """ + # UI won't let us see metrics if we're not started + self.started() + self.reload() + + def mark_stopped(self): + """ + Manually Mark the task as stopped (also used in self._at_exit) + """ + # flush any outstanding logs + self.flush(wait_for_uploads=True) + # mark task as stopped + self.stopped() + + def flush(self, wait_for_uploads=False): + """ + flush any outstanding reports or console logs + + :param wait_for_uploads: if True the flush will exit only after all outstanding uploads are completed + :return: True + """ + self._dev_mode_periodic() + + # make sure model upload is done + if BackendModel.get_num_results() > 0 and wait_for_uploads: + BackendModel.wait_for_results() + + # flush any outstanding logs + if self._logger: + # noinspection PyProtectedMember + self._logger._flush_stdout_handler() + self.reporter.flush() + LoggerRoot.flush() + + return True + + def reset(self, set_started_on_success=False, force=False): + """ + Reset the task. Task will be reloaded following a successful reset. + + Notice: when running remotely the task will not be reset (as it will clear all logs and metrics) + + :param set_started_on_success: automatically set started if reset was successful + :param force: force task reset even if running remotely + """ + if not running_remotely() or not self.is_main_task() or force: + super(Task, self).reset(set_started_on_success=set_started_on_success) + + def close(self): + """ + Close the current Task. Enables to manually shutdown the task. + Should only be called if you are absolutely sure there is no need for the Task. + """ + self._at_exit() + + def is_current_task(self): + """ + Check if this task is the main task (returned by Task.init()) + + NOTE: This call is deprecated. Please use Task.is_main_task() + + If Task.init() was never called, this method will *not* create + it, making this test cheaper than Task.init() == task + + :return: True if this task is the current task + """ + return self.is_main_task() + + def is_main_task(self): + """ + Check if this task is the main task (returned by Task.init()) + + If Task.init() was never called, this method will *not* create + it, making this test cheaper than Task.init() == task + + :return: True if this task is the current task + """ + return self is self.__main_task + + def set_model_config(self, config_text=None, config_dict=None): + """ + Set Task model configuration text/dict (before creating an output model) + When an output model is created it will inherit these properties + + :param config_text: model configuration (unconstrained text string). usually the content of a configuration file. + If `config_text` is not None, `config_dict` must not be provided. + :param config_dict: model configuration parameters dictionary. + If `config_dict` is not None, `config_text` must not be provided. + """ + design = OutputModel._resolve_config(config_text=config_text, config_dict=config_dict) + super(Task, self)._set_model_design(design=design) + + def get_model_config_text(self): + """ + Get Task model configuration text (before creating an output model) + When an output model is created it will inherit these properties + + :return model config_text (unconstrained text string). usually the content of a configuration file. + If `config_text` is not None, `config_dict` must not be provided. + """ + return super(Task, self).get_model_design() + + def get_model_config_dict(self): + """ + Get Task model configuration dictionary (before creating an output model) + When an output model is created it will inherit these properties + + :return model config_text (unconstrained text string). usually the content of a configuration file. + If `config_text` is not None, `config_dict` must not be provided. + """ + config_text = self.get_model_config_text() + return OutputModel._text_to_config_dict(config_text) + + def set_model_label_enumeration(self, enumeration=None): + """ + Set Task output label enumeration (before creating an output model) + When an output model is created it will inherit these properties + + :param enumeration: dictionary of string to integer, enumerating the model output to labels + example: {'background': 0 , 'person': 1} + """ + super(Task, self).set_model_label_enumeration(enumeration=enumeration) + + def _connect_output_model(self, model): + assert isinstance(model, OutputModel) + model.connect(self) + + def _save_output_model(self, model): + """ + Save a reference to the connected output model. + + :param model: The connected output model + """ + self._connected_output_model = model + + def _reconnect_output_model(self): + """ + If there is a saved connected output model, connect it again. + + This is needed if the input model is connected after the output model + is connected, an then we will have to get the model design from the + input model by reconnecting. + """ + if self._connected_output_model: + self.connect(self._connected_output_model) + + def _connect_input_model(self, model): + assert isinstance(model, InputModel) + # we only allow for an input model to be connected once + # at least until we support multiple input models + # notice that we do not check the task's input model because we allow task reuse and overwrite + # add into comment that we are using this model + comment = self.comment or '' + if not comment.endswith('\n'): + comment += '\n' + comment += 'Using model id: {}'.format(model.id) + self.set_comment(comment) + if self._last_input_model_id and self._last_input_model_id != model.id: + self.log.warning('Task connect, second input model is not supported, adding into comment section') + return + self._last_input_model_id = model.id + model.connect(self) + + def _try_set_connected_parameter_type(self, option): + # """ Raise an error if current value is not None and not equal to the provided option value """ + # value = self._connected_parameter_type + # if not value or value == option: + # self._connected_parameter_type = option + # return option + # + # def title(option): + # return " ".join(map(str.capitalize, option.split("_"))) + # + # raise ValueError( + # "Task already connected to {}. " + # "Task can be connected to only one the following argument options: {}".format( + # title(value), + # ' / '.join(map(title, self._ConnectedParametersType._options()))) + # ) + + # added support for multiple type connections through _Arguments + return option + + def _connect_argparse(self, parser, args=None, namespace=None, parsed_args=None): + # do not allow argparser to connect to jupyter notebook + # noinspection PyBroadException + try: + if 'IPython' in sys.modules: + from IPython import get_ipython + ip = get_ipython() + if ip is not None and 'IPKernelApp' in ip.config: + return + except Exception: + pass + + self._try_set_connected_parameter_type(self._ConnectedParametersType.argparse) + + if self.is_main_task(): + argparser_update_currenttask(self) + + if (parser is None or parsed_args is None) and argparser_parseargs_called(): + _parser, _parsed_args = get_argparser_last_args() + if parser is None: + parser = _parser + if parsed_args is None and parser == _parser: + parsed_args = _parsed_args + + if running_remotely() and self.is_main_task(): + # This hack prevents Argparse from crashing when running remotely with different set of parameters + sys.argv = sys.argv[:1] + self._arguments.copy_to_parser(parser, parsed_args) + else: + self._arguments.copy_defaults_from_argparse(parser, args=args, namespace=namespace, parsed_args=parsed_args) + + def _connect_dictionary(self, dictionary): + self._try_set_connected_parameter_type(self._ConnectedParametersType.dictionary) + + if running_remotely() and self.is_main_task(): + dictionary = self._arguments.copy_to_dict(dictionary) + else: + dictionary = self._arguments.copy_from_dict(dictionary) + + return dictionary + + def _connect_task_parameters(self, attr_class): + self._try_set_connected_parameter_type(self._ConnectedParametersType.task_parameters) + + if running_remotely() and self.is_main_task(): + attr_class.update_from_dict(self.get_parameters()) + else: + self.set_parameters(attr_class.to_dict()) + + def _validate(self, check_output_dest_credentials=False): + if running_remotely(): + super(Task, self)._validate(check_output_dest_credentials=False) + + def _output_model_updated(self): + """ Called when a connected output model is updated """ + if running_remotely() or not self.is_main_task(): + return + + # Make sure we know we've started, just in case we didn't so far + self._dev_mode_task_start(model_updated=True) + + # Store uncommitted code changes + self._store_uncommitted_code_changes() + + def _store_uncommitted_code_changes(self): + if running_remotely() or not self.is_main_task(): + return + + if not self.__store_diff_on_train: + # Feature turned off + return + + # # ToDo: Add support for back-end, currently doing nothing + # script = self.data.script + # if script and script.requirements: + # # We already have predefined requirements + # return + # + # script = ScriptInfo.get(check_uncommitted=True).script or {} + # freeze = pip_freeze() + # + # requirements = { + # "diff": script.get("diff", ""), + # "pip": freeze + # } + # + # self.send(tasks.SetRequirementsRequest(task=self.id, requirements=requirements)) + # self.reload() + return + + def _dev_mode_task_start(self, model_updated=False): + """ Called when we suspect the task has started running """ + self._dev_mode_setup_worker(model_updated=model_updated) + + if TaskStopSignal.enabled and not self._dev_stop_signal: + self._dev_stop_signal = TaskStopSignal(task=self) + + def _dev_mode_stop_task(self, stop_reason): + self.get_logger().warn( + "### TASK STOPPED - USER ABORTED - {} ###".format( + stop_reason.upper().replace('_', ' ') + ) + ) + self.flush(wait_for_uploads=True) + self.stopped() + + if self._dev_worker: + self._dev_worker.unregister() + + # NOTICE! This will end the entire execution tree! + self._kill_all_child_processes(send_kill=False) + time.sleep(2.0) + self._kill_all_child_processes(send_kill=True) + # noinspection PyProtectedMember + os._exit(1) + + @staticmethod + def _kill_all_child_processes(send_kill=False): + # get current process if pid not provided + include_parent = True + pid = os.getpid() + try: + parent = psutil.Process(pid) + except psutil.Error: + # could not find parent process id + return + for child in parent.children(recursive=True): + if send_kill: + child.kill() + else: + child.terminate() + # kill ourselves + if send_kill: + parent.kill() + else: + parent.terminate() + + def _dev_mode_periodic(self): + if self._dev_mode_periodic_flag or not self.is_main_task(): + # Ensures we won't get into an infinite recursion since we might call self.flush() down the line + return + self._dev_mode_periodic_flag = True + + try: + if self._dev_stop_signal: + stop_reason = self._dev_stop_signal.test() + if stop_reason and not self._at_exit_called: + self._dev_mode_stop_task(stop_reason) + + if self._dev_worker: + self._dev_worker.status_report() + finally: + self._dev_mode_periodic_flag = False + + def _dev_mode_setup_worker(self, model_updated=False): + if running_remotely() or not self.is_main_task(): + return + + if self._dev_worker: + return self._dev_worker + + if not DevWorker.is_enabled(model_updated): + return None + + self._dev_worker = DevWorker() + self._dev_worker.register() + + logger = self.get_logger() + flush_period = logger.get_flush_period() + if not flush_period or flush_period > self._dev_worker.report_period: + logger.set_flush_period(self._dev_worker.report_period) + + # Remove 'development' tag + tags = self.get_tags() + try: + tags.remove('development') + except ValueError: + pass + else: + self.set_tags(tags) + + def _at_exit(self): + """ + Will happen automatically once we exit code, i.e. atexit + :return: + """ + if self._at_exit_called: + return + + # noinspection PyBroadException + try: + # from here do not get into watch dog + self._at_exit_called = True + self._dev_stop_signal = None + self._dev_mode_periodic_flag = True + wait_for_uploads = True + # first thing mark task as stopped, so we will not end up with "running" on lost tasks + # if we are running remotely, the daemon will take care of it + if not running_remotely() and self.is_main_task(): + # from here, do not check worker status + if self._dev_worker: + self._dev_worker.unregister() + # check if we crashed, ot the signal is not interrupt (manual break) + if self.__exit_hook: + if self.__exit_hook.exception is not None or self.__exit_hook.signal not in (None, 2): + self.mark_failed(status_reason='Exception') + wait_for_uploads = False + else: + self.stopped() + wait_for_uploads = (self.__exit_hook.signal is None) + else: + self.stopped() + + # wait for uploads + print_done_waiting = False + if wait_for_uploads and (BackendModel.get_num_results() > 0 or self.reporter.get_num_results() > 0): + self.log.info('Waiting to finish uploads') + print_done_waiting = True + # from here, do not send log in background thread + self._logger.set_flush_period(None) + if wait_for_uploads: + self.flush(wait_for_uploads=True) + if print_done_waiting: + self.log.info('Finished uploading') + else: + self._logger._flush_stdout_handler() + # this is so in theory we can close a main task and start a new one + Task.__main_task = None + except Exception: + # make sure we do not interrupt the exit process + pass + + @classmethod + def __register_at_exit(cls, exit_callback): + class ExitHooks(object): + _orig_exit = None + _orig_exc_handler = None + + def __init__(self, callback): + self.exit_code = None + self.exception = None + self.signal = None + self._exit_callback = callback + self._org_handlers = {} + + def update_callback(self, callback): + if self._exit_callback: + atexit.unregister(self._exit_callback) + self._exit_callback = callback + atexit.register(self._exit_callback) + + def hook(self): + if self._orig_exit is None: + self._orig_exit = sys.exit + sys.exit = self.exit + if self._orig_exc_handler is None: + self._orig_exc_handler = sys.excepthook + sys.excepthook = self.exc_handler + atexit.register(self._exit_callback) + import signal + catch_signals = [signal.SIGINT, signal.SIGTERM, signal.SIGSEGV, signal.SIGABRT, + signal.SIGILL, signal.SIGFPE, signal.SIGQUIT] + for s in catch_signals: + # noinspection PyBroadException + try: + self._org_handlers[s] = signal.getsignal(s) + signal.signal(s, self.signal_handler) + except Exception: + pass + + def exit(self, code=0): + self.exit_code = code + self._orig_exit(code) + + def exc_handler(self, exctype, value, traceback, *args, **kwargs): + self.exception = value + return self._orig_exc_handler(exctype, value, traceback, *args, **kwargs) + + def signal_handler(self, sig, frame): + self.signal = sig + if self._exit_callback: + self._exit_callback() + org_handler = self._org_handlers[sig] + if isinstance(org_handler, Callable): + return org_handler(sig, frame) + return org_handler + + if cls.__exit_hook is None: + # noinspection PyBroadException + try: + cls.__exit_hook = ExitHooks(exit_callback) + cls.__exit_hook.hook() + except Exception: + cls.__exit_hook = None + elif cls.__main_task is None: + cls.__exit_hook.update_callback(exit_callback) + + @classmethod + def __get_task(cls, task_id=None, project_name=None, task_name=None): + if task_id: + return cls(private=cls.__create_protection, task_id=task_id) + + res = cls._send( + cls._get_default_session(), + projects.GetAllRequest( + name=exact_match_regex(project_name) + ) + ) + project = get_single_result(entity='project', query=project_name, results=res.response.projects) + + res = cls._send( + cls._get_default_session(), + tasks.GetAllRequest( + project=[project.id], + name=exact_match_regex(task_name), + only_fields=['id', 'name'] + ) + ) + task = get_single_result(entity='task', query=task_name, results=res.response.tasks) + + return cls( + private=cls.__create_protection, + task_id=task.id, + log_to_backend=False, + ) + + @classmethod + def __get_hash_key(cls, *args): + def normalize(x): + return "<{}>".format(x) if x is not None else "" + + return ":".join(map(normalize, args)) + + @classmethod + def __get_last_used_task_id(cls, default_project_name, default_task_name, default_task_type): + hash_key = cls.__get_hash_key(default_project_name, default_task_name, default_task_type) + + # check if we have a cached task_id we can reuse + # it must be from within the last 24h and with the same project/name/type + task_sessions = SessionCache.load_dict(str(cls)) + + task_data = task_sessions.get(hash_key) + if task_data is None: + return None + + try: + task_data['type'] = cls.TaskTypes(task_data['type']) + except (ValueError, KeyError): + LoggerRoot.get_base_logger().warning( + "Corrupted session cache entry: {}. " + "Unsupported task type: {}" + "Creating a new task.".format(hash_key, task_data['type']), + ) + + return None + + return task_data + + @classmethod + def __update_last_used_task_id(cls, default_project_name, default_task_name, default_task_type, task_id): + hash_key = cls.__get_hash_key(default_project_name, default_task_name, default_task_type) + + task_id = str(task_id) + # update task session cache + task_sessions = SessionCache.load_dict(str(cls)) + last_task_session = {'time': time.time(), 'project': default_project_name, 'name': default_task_name, + 'type': default_task_type, 'id': task_id} + + # remove stale sessions + for k in list(task_sessions.keys()): + if ((time.time() - task_sessions[k].get('time', 0)) > + 60 * 60 * cls.__task_id_reuse_time_window_in_hours): + task_sessions.pop(k) + # update current session + task_sessions[hash_key] = last_task_session + # store + SessionCache.store_dict(str(cls), task_sessions) + + @classmethod + def __task_timed_out(cls, task_data): + return \ + task_data and \ + task_data.get('id') and \ + task_data.get('time') and \ + (time.time() - task_data.get('time')) > (60 * 60 * cls.__task_id_reuse_time_window_in_hours) + + @classmethod + def __get_task_api_obj(cls, task_id, only_fields=None): + if not task_id: + return None + + all_tasks = cls._send( + cls._get_default_session(), + tasks.GetAllRequest(id=[task_id], only_fields=only_fields), + ).response.tasks + + # The task may not exist in environment changes + if not all_tasks: + return None + + return all_tasks[0] + + @classmethod + def __task_is_relevant(cls, task_data): + """ + Check that a cached task is relevant for reuse. + + A task is relevant for reuse if: + 1. It is not timed out i.e it was last use in the previous 24 hours. + 2. It's name, project and type match the data in the server, so not + to override user changes made by using the UI. + + :param task_data: A mapping from 'id', 'name', 'project', 'type' keys + to the task's values, as saved in the cache. + + :return: True if the task is relevant for reuse, False if not. + """ + if not task_data: + return False + + # in dev-worker mode, never reuse a task + if DevWorker.is_enabled(): + return False + + if cls.__task_timed_out(task_data): + return False + + task_id = task_data.get('id') + + if not task_id: + return False + + task = cls.__get_task_api_obj(task_id, ('id', 'name', 'project', 'type')) + + if task is None: + return False + + project_name = None + if task.project: + project = cls._send( + cls._get_default_session(), + projects.GetByIdRequest(project=task.project) + ).response.project + + if project: + project_name = project.name + + compares = ( + (task.name, 'name'), + (project_name, 'project'), + (task.type, 'type'), + ) + + return all(server_data == task_data.get(task_data_key) + for server_data, task_data_key in compares) + + @classmethod + def __close_timed_out_task(cls, task_data): + if not task_data: + return False + + task = cls.__get_task_api_obj(task_data.get('id'), ('id', 'status')) + + if task is None: + return False + + stopped_statuses = ( + tasks.TaskStatusEnum.stopped, + tasks.TaskStatusEnum.published, + tasks.TaskStatusEnum.publishing, + tasks.TaskStatusEnum.closed, + tasks.TaskStatusEnum.failed, + ) + + if task.status not in stopped_statuses: + cls._send( + cls._get_default_session(), + tasks.StoppedRequest( + task=task.id, + force=True, + status_message="Stopped timed out development task" + ), + ) + + return True + return False diff --git a/trains/task_parameters.py b/trains/task_parameters.py new file mode 100644 index 00000000..3d9086ab --- /dev/null +++ b/trains/task_parameters.py @@ -0,0 +1,163 @@ +import six +import attr +from attr import validators + + +__all__ = ['range_validator', 'param', 'percent_param', 'TaskParameters'] + + +def _canonize_validator(current_validator): + """ + Convert current_validator to a new list and return it. + + If current_validator is None return an empty list. + If current_validator is a list, return a copy of it. + If current_validator is another type of iterable, return a list version of it. + If current_validator is a single value, return a one-list containing it. + """ + + if not current_validator: + return [] + + if isinstance(current_validator, (list, tuple)): + current_validator = list(current_validator) + else: + current_validator = [current_validator] + + return current_validator + + +def range_validator(min_value, max_value): + """ + A parameter validator that checks range constraint on a parameter. + + :param min_value: The minimum limit of the range, inclusive. None for no minimum limit. + :param max_value: The maximum limit of the range, inclusive. None for no maximum limit. + :return: A new range validator + """ + def _range_validator(instance, attribute, value): + if ((min_value is not None) and (value < min_value)) or \ + ((max_value is not None) and (value > max_value)): + raise ValueError("{} must be in range [{}, {}]".format(attribute.name, min_value, max_value)) + + return _range_validator + + +def param( + validator=None, + range=None, + type=None, + desc=None, + metadata=None, + *args, + **kwargs +): + """ + A parameter inside a TaskParameters class. + + See TaskParameters for more information. + + :param validator: A validator or validators list. + Any validator from attr.validators is applicable. + + :param range: The legal values range of the parameter. + A tuple (min_limit, max_limit). None for no limitation. + + :param type: The type of the parameter. + Supported types are int, str and float. None to place no limit of the type + + :param desc: A string description of the parameter, for future use. + + :param metadata: A dictionary metadata of the parameter, for future use. + + :param args: Additional arguments to pass to attr.attrib constructor. + :param kwargs: Additional keyword arguments to pass to attr.attrib constructor. + + :return: An attr.attrib instance to use with TaskParameters class. + + Warning: Do not create an immutable param using args or kwargs. It will cause + connect method of the TaskParameters class to fail. + """ + + metadata = metadata or {} + metadata["desc"] = desc + + validator = _canonize_validator(validator) + + if type: + validator.append(validators.optional(validators.instance_of(type))) + + if range: + validator.append(range_validator(*range)) + + return attr.ib(validator=validator, type=type, metadata=metadata, *args, **kwargs) + + +def percent_param(*args, **kwargs): + """ + A param with type float and range limit (0, 1). + """ + return param(range=(0, 1), type=float, *args, **kwargs) + + +class _AttrsMeta(type): + def __new__(mcs, name, bases, dct): + new_class = super(_AttrsMeta, mcs).__new__(mcs, name, bases, dct) + return attr.s(new_class) + + +@six.add_metaclass(_AttrsMeta) +class TaskParameters(object): + """ + Base class for task parameters. + + Inherit this class to create a parameter set to connect to a task. + + Usage Example: + class MyParams(TaskParameters): + iterations = param( + type=int, + desc="Number of iterations to run", + range=(0, 100000), + ) + + target_accuracy = percent_param( + desc="The target accuracy of the model", + ) + """ + + def to_dict(self): + """ + :return: A new dictionary with keys are the parameters names and values + are the corresponding values. + """ + return attr.asdict(self) + + def update_from_dict(self, source_dict): + """ + Update the parameters using values from a dictionary. + + :param source_dict: A dictionary with an entry for each parameter to + update. + """ + for key, value in source_dict.items(): + if not hasattr(self, key): + raise ValueError("Unknown key {} in {} object".format(key, type(self).__name__)) + + setattr(self, key, value) + + def connect(self, task): + """ + Connect to a task. + + When running locally, the task will save the parameters from self. + When running with a worker, self will be updated according to the task's + saved parameters. + + :param task: The task to connect to. + :type task: .Task + """ + + return task.connect(self) + + diff --git a/trains/utilities/__init__.py b/trains/utilities/__init__.py new file mode 100644 index 00000000..e552b90e --- /dev/null +++ b/trains/utilities/__init__.py @@ -0,0 +1 @@ +from .dicts import Logs diff --git a/trains/utilities/absl_bind.py b/trains/utilities/absl_bind.py new file mode 100644 index 00000000..48ef1337 --- /dev/null +++ b/trains/utilities/absl_bind.py @@ -0,0 +1,88 @@ +""" absl-py FLAGS binding utility functions """ +from trains.backend_interface.task.args import _Arguments +from ..config import running_remotely + + +class PatchAbsl(object): + _original_DEFINE_flag = None + _task = None + + @classmethod + def update_current_task(cls, current_task): + cls._task = current_task + cls._patch_absl() + + @classmethod + def _patch_absl(cls): + if cls._original_DEFINE_flag: + return + # noinspection PyBroadException + try: + from absl.flags import _defines + cls._original_DEFINE_flag = _defines.DEFINE_flag + _defines.DEFINE_flag = cls._patched_define_flag + # if absl was already set, let's update our task params + from absl import flags + cls._update_current_flags(flags.FLAGS) + except Exception: + # there is no absl + pass + + @staticmethod + def _patched_define_flag(*args, **kwargs): + if not PatchAbsl._task or not PatchAbsl._original_DEFINE_flag: + if PatchAbsl._original_DEFINE_flag: + return PatchAbsl._original_DEFINE_flag(*args, **kwargs) + else: + return None + # noinspection PyBroadException + try: + flag = args[0] if len(args) >= 1 else None + module_name = args[2] if len(args) >= 3 else None + param_name = None + if flag: + param_name = ((module_name + _Arguments._prefix_sep) if module_name else '') + flag.name + except Exception: + flag = None + param_name = None + + if running_remotely(): + # noinspection PyBroadException + try: + if param_name and flag: + param_dict = PatchAbsl._task._arguments.copy_to_dict({param_name: flag.value}, + prefix=_Arguments._prefix_tf_defines) + flag.value = param_dict.get(param_name, flag.value) + except Exception: + pass + ret = PatchAbsl._original_DEFINE_flag(*args, **kwargs) + else: + if flag and param_name: + value = flag.value + PatchAbsl._task.update_parameters({_Arguments._prefix_tf_defines + param_name: value}) + ret = PatchAbsl._original_DEFINE_flag(*args, **kwargs) + return ret + + @classmethod + def _update_current_flags(cls, FLAGS): + if not cls._task: + return + # noinspection PyBroadException + try: + if running_remotely(): + param_dict = cls._task._arguments.copy_to_dict({}, prefix=_Arguments._prefix_tf_defines) + for k, v in param_dict.items(): + # noinspection PyBroadException + try: + parts = k.split(_Arguments._prefix_sep) + k = parts[0] + if k in FLAGS: + FLAGS[k].value = v + except Exception: + pass + else: + # clear previous parameters + parameters = dict([(k, FLAGS[k].value) for k in FLAGS]) + cls._task._arguments.copy_from_dict(parameters, prefix=_Arguments._prefix_tf_defines) + except Exception: + pass diff --git a/trains/utilities/args.py b/trains/utilities/args.py new file mode 100644 index 00000000..a6b28481 --- /dev/null +++ b/trains/utilities/args.py @@ -0,0 +1,176 @@ +""" Argparse utilities""" +import sys +from six import PY2 +from argparse import ArgumentParser, _SubParsersAction + + +class PatchArgumentParser: + _original_parse_args = None + _original_parse_known_args = None + _original_add_subparsers = None + _add_subparsers_counter = 0 + _current_task = None + _calling_current_task = False + _last_parsed_args = None + _last_arg_parser = None + + @staticmethod + def add_subparsers(self, **kwargs): + if 'dest' not in kwargs: + if kwargs.get('title'): + kwargs['dest'] = '/'+kwargs['title'] + else: + PatchArgumentParser._add_subparsers_counter += 1 + kwargs['dest'] = '/subparser%d' % PatchArgumentParser._add_subparsers_counter + return PatchArgumentParser._original_add_subparsers(self, **kwargs) + + @staticmethod + def parse_args(self, args=None, namespace=None): + return PatchArgumentParser._patched_parse_args(PatchArgumentParser._original_parse_args, + self, args=args, namespace=namespace) + + @staticmethod + def parse_known_args(self, args=None, namespace=None): + return PatchArgumentParser._patched_parse_args(PatchArgumentParser._original_parse_known_args, + self, args=args, namespace=namespace) + + @staticmethod + def _patched_parse_args(original_parse_fn, self, args=None, namespace=None): + # if we are running remotely, we always have a task id, so we better patch the argparser as soon as possible. + if not PatchArgumentParser._current_task: + from ..config import running_remotely + if running_remotely(): + # this will cause the current_task() to set PatchArgumentParser._current_task + from trains import Task + # noinspection PyBroadException + try: + Task.init() + except Exception: + pass + # automatically connect to current task: + if PatchArgumentParser._current_task: + from ..config import running_remotely + + if PatchArgumentParser._calling_current_task: + # if we are here and running remotely by now we should try to parse the arguments + if original_parse_fn: + PatchArgumentParser._last_parsed_args = \ + original_parse_fn(self, args=args, namespace=namespace) + return PatchArgumentParser._last_parsed_args + + PatchArgumentParser._calling_current_task = True + # Store last instance and result + PatchArgumentParser._last_arg_parser = self + parsed_args = None + # parse if we are running in dev mode + if not running_remotely() and original_parse_fn: + parsed_args = original_parse_fn(self, args=args, namespace=namespace) + PatchArgumentParser._last_parsed_args = parsed_args + # noinspection PyBroadException + try: + # sync to/from task + PatchArgumentParser._current_task._connect_argparse(self, args=args, namespace=namespace, + parsed_args=parsed_args[0] + if isinstance(parsed_args, tuple) else parsed_args) + except Exception: + pass + # sync back and parse + if running_remotely() and original_parse_fn: + # if we are running python2 check if we have subparsers, + # if we do we need to patch the args, because there is no default subparser + if PY2: + import itertools + def _get_sub_parsers_defaults(subparser, prev=[]): + actions_grp = [a._actions for a in subparser.choices.values()] if isinstance(subparser, _SubParsersAction) else \ + [subparser._actions] + sub_parsers_defaults = [[subparser]] if hasattr(subparser, 'default') and subparser.default else [] + for actions in actions_grp: + sub_parsers_defaults += [_get_sub_parsers_defaults(a, prev) + for a in actions if isinstance(a, _SubParsersAction) and + hasattr(a, 'default') and a.default] + + return list(itertools.chain.from_iterable(sub_parsers_defaults)) + sub_parsers_defaults = _get_sub_parsers_defaults(self) + if sub_parsers_defaults: + if args is None: + # args default to the system args + import sys as _sys + args = _sys.argv[1:] + else: + args = list(args) + # make sure we append the subparsers + for a in sub_parsers_defaults: + if a.default not in args: + args.append(a.default) + + PatchArgumentParser._last_parsed_args = original_parse_fn(self, args=args, namespace=namespace) + else: + PatchArgumentParser._last_parsed_args = parsed_args or {} + + PatchArgumentParser._calling_current_task = False + return PatchArgumentParser._last_parsed_args + + # Store last instance and result + PatchArgumentParser._last_arg_parser = self + PatchArgumentParser._last_parsed_args = {} if not original_parse_fn else \ + original_parse_fn(self, args=args, namespace=namespace) + return PatchArgumentParser._last_parsed_args + + +def patch_argparse(): + # make sure we only patch once + if not sys.modules.get('argparse') or hasattr(sys.modules['argparse'].ArgumentParser, '_parse_args_patched'): + return + # mark patched argparse + sys.modules['argparse'].ArgumentParser._parse_args_patched = True + # patch argparser + PatchArgumentParser._original_parse_args = sys.modules['argparse'].ArgumentParser.parse_args + PatchArgumentParser._original_parse_known_args = sys.modules['argparse'].ArgumentParser.parse_known_args + PatchArgumentParser._original_add_subparsers = sys.modules['argparse'].ArgumentParser.add_subparsers + sys.modules['argparse'].ArgumentParser.parse_args = PatchArgumentParser.parse_args + sys.modules['argparse'].ArgumentParser.parse_known_args = PatchArgumentParser.parse_known_args + sys.modules['argparse'].ArgumentParser.add_subparsers = PatchArgumentParser.add_subparsers + + +# Notice! we are patching argparser, sop we know if someone parsed arguments before connecting to task +patch_argparse() + + +def call_original_argparser(self, args=None, namespace=None): + if PatchArgumentParser._original_parse_args: + return PatchArgumentParser._original_parse_args(self, args=args, namespace=namespace) + + +def argparser_parseargs_called(): + return PatchArgumentParser._last_arg_parser is not None + + +def argparser_update_currenttask(task): + PatchArgumentParser._current_task = task + + +def get_argparser_last_args(): + return (PatchArgumentParser._last_arg_parser, + PatchArgumentParser._last_parsed_args[0] if isinstance(PatchArgumentParser._last_parsed_args, tuple) else + PatchArgumentParser._last_parsed_args) + + +def add_params_to_parser(parser, params): + assert isinstance(parser, ArgumentParser) + assert isinstance(params, dict) + + def get_type_details(v): + for t in (int, float, str): + try: + value = t(v) + return t, value + except ValueError: + continue + + # AJB temporary protection from ui problems sending empty dicts + params.pop('', None) + + for param, value in params.items(): + type, type_value = get_type_details(value) + parser.add_argument('--%s' % param, type=type, default=type_value) + return parser diff --git a/trains/utilities/async_manager.py b/trains/utilities/async_manager.py new file mode 100644 index 00000000..cd0efdc1 --- /dev/null +++ b/trains/utilities/async_manager.py @@ -0,0 +1,53 @@ +from threading import Lock +import time + + +class AsyncManagerMixin(object): + _async_results_lock = Lock() + _async_results = [] + + @classmethod + def _add_async_result(cls, result, wait_on_max_results=None, wait_time=30, wait_cb=None): + while True: + try: + cls._async_results_lock.acquire() + # discard completed results + cls._async_results = [r for r in cls._async_results if not r.ready()] + num_results = len(cls._async_results) + if wait_on_max_results is not None and num_results >= wait_on_max_results: + # At least max_results results are still pending, wait + if wait_cb: + wait_cb(num_results) + if wait_time: + time.sleep(wait_time) + continue + # add result + if result and not result.ready(): + cls._async_results.append(result) + break + finally: + cls._async_results_lock.release() + + @classmethod + def wait_for_results(cls, timeout=None, max_num_uploads=None): + remaining = timeout + count = 0 + for r in cls._async_results: + if r.ready(): + continue + t = time.time() + r.wait(timeout=remaining) + count += 1 + if max_num_uploads is not None and max_num_uploads - count <= 0: + break + if timeout is not None: + remaining = max(0, remaining - max(0, time.time() - t)) + if not remaining: + break + + @classmethod + def get_num_results(cls): + if cls._async_results is not None: + return len([r for r in cls._async_results if not r.ready()]) + else: + return 0 diff --git a/trains/utilities/config.py b/trains/utilities/config.py new file mode 100644 index 00000000..71ad1ba8 --- /dev/null +++ b/trains/utilities/config.py @@ -0,0 +1,46 @@ +from __future__ import division + +import six +import humanfriendly + + +def parse_human_size(value): + if isinstance(value, six.string_types): + return humanfriendly.parse_size(value) + return value + + +def get_percentage(config, key, required=True, default=None): + if required: + value = config.get(key) + else: + value = config.get(key, default) + if value is None: + return + try: + if isinstance(value, six.string_types): + value = value.strip() + if value.endswith('%'): + # "50%" => 0.5 + return float(value.strip('%')) / 100. + # "50" => 50 + + value = float(value) + if value < 1: + # 0.5 => 50% => 0.5 + return value + + # 50 => 0.5, 10.5 => 0.105 + return value / 100. + + except ValueError as e: + raise ValueError('Config: failed parsing %s: %s' % (key, e)) + + +def get_human_size_default(config, key, default=None): + raw_value = config.get(key, default) + + if raw_value is None: + return default + + return parse_human_size(raw_value) \ No newline at end of file diff --git a/trains/utilities/deferred.py b/trains/utilities/deferred.py new file mode 100644 index 00000000..0a20a9d1 --- /dev/null +++ b/trains/utilities/deferred.py @@ -0,0 +1,121 @@ +import threading +from functools import wraps + +import attr +import six + + +class DeferredExecutionPool(object): + @attr.s + class _DeferredAction(object): + method = attr.ib() + args = attr.ib() + kwargs = attr.ib() + + def __init__(self, instance): + self._instance = instance + self._pool = [] + self._lock = threading.Lock() + + def add(self, callable_, *args, **kwargs): + self._pool.append(self._DeferredAction(callable_, args, kwargs)) + + def clear(self): + with self._lock: + pool = self._pool + self._pool = [] + return pool + + def apply(self): + pool = self.clear() + for action in pool: + action.method(self._instance, *action.args, **action.kwargs) + + def copy_from(self, other): + if not isinstance(self._instance, type(other._instance)): + raise ValueError("Copy deferred actions must be with the same instance type") + + self._pool = other._pool[:] + + +class ParameterizedDefaultDict(dict): + def __init__(self, factory, *args, **kwargs): + super(ParameterizedDefaultDict, self).__init__(*args, **kwargs) + self._factory = factory + + def __missing__(self, key): + self[key] = self._factory(key) + return self[key] + + +class DeferredExecution(object): + def __init__(self, pool_cls=DeferredExecutionPool): + self._pools = ParameterizedDefaultDict(pool_cls) + + def __get__(self, instance, owner): + if not instance: + return self + + return self._pools[instance] + + def defer_execution(self, condition_or_attr_name=True): + """ + Deferred execution decorator, designed to wrap class functions for classes containing a deferred execution pool. + :param condition_or_attr_name: Condition controlling whether wrapped function should be deferred. True by default. + If a callable is provided, it will be called with the class instance (self) as first argument. + If a string is provided, a class instance (self) attribute by that name is evaluated. + :return: + """ + def decorator(func): + @wraps(func) + def wrapper(instance, *args, **kwargs): + if self._resolve_condition(instance, condition_or_attr_name): + self._pools[instance].add(func, *args, **kwargs) + else: + return func(instance, *args, **kwargs) + return wrapper + return decorator + + @staticmethod + def _resolve_condition(instance, condition_or_attr_name): + if callable(condition_or_attr_name): + return condition_or_attr_name(instance) + elif isinstance(condition_or_attr_name, six.string_types): + return getattr(instance, condition_or_attr_name) + return condition_or_attr_name + + def _apply(self, instance, condition_or_attr_name): + if self._resolve_condition(instance, condition_or_attr_name): + self._pools[instance].apply() + + def apply_after(self, condition_or_attr_name=True): + """ + Decorator for applying deferred execution pool after wrapped function has completed + :param condition_or_attr_name: Condition controlling whether deferred pool should be applied. True by default. + If a callable is provided, it will be called with the class instance (self) as first argument. + If a string is provided, a class instance (self) attribute by that name is evaluated. + """ + def decorator(func): + @wraps(func) + def wrapper(instance, *args, **kwargs): + res = func(instance, *args, **kwargs) + self._apply(instance, condition_or_attr_name) + return res + return wrapper + return decorator + + def apply_before(self, condition_or_attr_name=True): + """ + Decorator for applying deferred execution pool before wrapped function is executed + :param condition_or_attr_name: Condition controlling whether deferred pool should be applied. True by default. + If a callable is provided, it will be called with the class instance (self) as first argument. + If a string is provided, a class instance (self) attribute by that name is evaluated. + """ + def decorator(func): + @wraps(func) + def wrapper(instance, *args, **kwargs): + self._apply(instance, condition_or_attr_name) + return func(instance, *args, **kwargs) + return wrapper + return decorator + diff --git a/trains/utilities/dicts.py b/trains/utilities/dicts.py new file mode 100644 index 00000000..5c5649fd --- /dev/null +++ b/trains/utilities/dicts.py @@ -0,0 +1,98 @@ +""" Utilities """ + +_epsilon = 0.00001 + + +class Logs: + _logs_instances = [] + + def __init__(self, data={}): + self._data = data or {} + self._logs_instances.append(self) + + def reset(self): + self._data = {} + + @property + def data(self): + return self._data + + @classmethod + def get_instances(cls): + return cls._logs_instances + + +class BlobsDict(dict): + """ + Overloading getitem so that the 'data' copy is only done when the dictionary item is accessed. + """ + def __init__(self, *args, **kwargs): + super(BlobsDict, self).__init__(*args, **kwargs) + + def __getitem__(self, k): + val = super(BlobsDict, self).__getitem__(k) + if isinstance(val, dict): + return BlobsDict(val) + # We need to ask isinstance without actually importing blob here + # so we accept that in order to appreciate beauty in life we must have a dash of ugliness. + # ans instead of - + # elif isinstance(val, Blob): + # we ask: + elif hasattr(val, '__class__') and val.__class__.__name__ == 'Blob': + return val.data + else: + return val + + +class NestedBlobsDict(BlobsDict): + """A dictionary that applies an arbitrary key-altering function + before accessing the keys.""" + def __init__(self, *args, **kwargs): + super(NestedBlobsDict, self).__init__(*args, **kwargs) + + def __getitem__(self, keys_str=''): + + if keys_str == '': + return super(NestedBlobsDict, self).__getitem__(self) + + keylist = keys_str.split('.') + + cur = super(NestedBlobsDict, self).__getitem__(keylist[0]) + if len(keylist) == 1: + return cur + else: + return NestedBlobsDict(cur)['.'.join(keylist[1:])] + + def __contains__(self, keys_str): + keylist = self.keys() + return keys_str in keylist + + def as_dict(self): + return dict(self) + + def get(self, keys_str, default=None): + try: + return self[keys_str] + except: + return None + + def _keys(self, cur_dict, path): + deep_keys = [] + cur_keys = dict.keys(cur_dict) + + for key in cur_keys: + if isinstance(cur_dict[key], dict): + if len(path) > 0: + deep_keys.extend(self._keys(cur_dict[key], path+ '.' + key)) + else: + deep_keys.extend(self._keys(cur_dict[key], key)) + else: + if len(path) > 0: + deep_keys.append(path + '.' + key) + else: + deep_keys.append( key) + + return deep_keys + + def keys(self): + return self._keys(self, '') diff --git a/trains/utilities/enum.py b/trains/utilities/enum.py new file mode 100644 index 00000000..05b6649e --- /dev/null +++ b/trains/utilities/enum.py @@ -0,0 +1,24 @@ +""" Enum utilities """ + + +class EnumOptions(object): + """ Base class for enum-like classes using class-attributes with string values to represent enum key/value pairs """ + __cache = None + + @classmethod + def values(cls): + """ Extract list of enum-like options based on the derived classes' attributes. + Any class attribute who's key doesn't start with an underscore and who's value is not a class method + or callable is considered an option. + Returns a list of attribute names representing the options. + """ + if cls.__cache is None: + cls.__cache = [v for k, v in vars(cls).items() if + not k.startswith('_') and not callable(v) and not isinstance(v, classmethod)] + return cls.__cache + + +class Options(object): + @classmethod + def _all(cls): + return {k: v for k, v in vars(cls) if not k.startswith('_')} diff --git a/trains/utilities/frameworks.py b/trains/utilities/frameworks.py new file mode 100644 index 00000000..00b7951f --- /dev/null +++ b/trains/utilities/frameworks.py @@ -0,0 +1,1611 @@ +import base64 +import sys +import threading +import weakref +from collections import defaultdict +from logging import ERROR, WARNING, getLogger +from pathlib import Path + +import cv2 +import numpy as np +import six + +from ..config import running_remotely +from ..model import InputModel, OutputModel, Framework + +try: + from google.protobuf.json_format import MessageToDict +except ImportError: + MessageToDict = None + +if six.PY2: + # python2.x + import __builtin__ as builtins +else: + # python3.x + import builtins + + +TrainsFrameworkAdapter = 'TrainsFrameworkAdapter' +_recursion_guard = {} + + +class _Empty(object): + def __init__(self): + self.trains_in_model = None + + +class PostImportHookPatching(object): + _patched = False + _post_import_hooks = defaultdict(list) + + @staticmethod + def _init_hook(): + if PostImportHookPatching._patched: + return + PostImportHookPatching._patched = True + + if six.PY2: + # python2.x + builtins.__org_import__ = builtins.__import__ + builtins.__import__ = PostImportHookPatching._patched_import2 + else: + # python3.x + builtins.__org_import__ = builtins.__import__ + builtins.__import__ = PostImportHookPatching._patched_import3 + + @staticmethod + def _patched_import2(name, globals={}, locals={}, fromlist=[], level=-1): + already_imported = name in sys.modules + mod = builtins.__org_import__( + name, + globals=globals, + locals=locals, + fromlist=fromlist, + level=level) + + if not already_imported and name in PostImportHookPatching._post_import_hooks: + for hook in PostImportHookPatching._post_import_hooks[name]: + hook() + return mod + + @staticmethod + def _patched_import3(name, globals=None, locals=None, fromlist=(), level=0): + already_imported = name in sys.modules + mod = builtins.__org_import__( + name, + globals=globals, + locals=locals, + fromlist=fromlist, + level=level) + + if not already_imported and name in PostImportHookPatching._post_import_hooks: + for hook in PostImportHookPatching._post_import_hooks[name]: + hook() + return mod + + @staticmethod + def add_on_import(name, func): + PostImportHookPatching._init_hook() + if not name in PostImportHookPatching._post_import_hooks or \ + func not in PostImportHookPatching._post_import_hooks[name]: + PostImportHookPatching._post_import_hooks[name].append(func) + + @staticmethod + def remove_on_import(name, func): + if name in PostImportHookPatching._post_import_hooks and func in PostImportHookPatching._post_import_hooks[name]: + PostImportHookPatching._post_import_hooks[name].remove(func) + + +def _patched_call(original_fn, patched_fn): + def _inner_patch(*args, **kwargs): + ident = threading.get_ident() + if ident in _recursion_guard: + return original_fn(*args, **kwargs) + _recursion_guard[ident] = 1 + try: + ret = patched_fn(original_fn, *args, **kwargs) + except Exception as ex: + raise ex + finally: + try: + _recursion_guard.pop(ident) + except KeyError: + pass + return ret + return _inner_patch + + +class WeightsFileHandler(object): + _model_out_store_lookup = {} + _model_in_store_lookup = {} + _model_store_lookup_lock = threading.Lock() + + @staticmethod + def restore_weights_file(model, filepath, framework, task): + if task is None: + return filepath + + if not filepath: + getLogger(TrainsFrameworkAdapter).warning("Could retrieve model location, model not restored") + return filepath + + try: + WeightsFileHandler._model_store_lookup_lock.acquire() + + # check if object already has InputModel + trains_in_model, ref_model = WeightsFileHandler._model_in_store_lookup.get(id(model), (None, None)) + if ref_model is not None and model != ref_model(): + # old id pop it - it was probably reused because the object is dead + WeightsFileHandler._model_in_store_lookup.pop(id(model)) + trains_in_model, ref_model = None, None + + # check if object already has InputModel + model_name_id = getattr(model, 'name', '') + try: + config_text = None + config_dict = trains_in_model.config_dict if trains_in_model else None + except Exception: + config_dict = None + try: + config_text = trains_in_model.config_text if trains_in_model else None + except Exception: + config_text = None + trains_in_model = InputModel.import_model( + weights_url=filepath, + config_dict=config_dict, + config_text=config_text, + name=task.name + ' ' + model_name_id, + label_enumeration=task.get_labels_enumeration(), + framework=framework, + create_as_published=False, + ) + try: + ref_model = weakref.ref(model) + except Exception: + ref_model = None + WeightsFileHandler._model_in_store_lookup[id(model)] = (trains_in_model, ref_model) + # todo: support multiple models for the same task + task.connect(trains_in_model) + # if we are running remotely we should deserialize the object + # because someone might have changed the config_dict + if running_remotely(): + # reload the model + model_config = trains_in_model.config_dict + # verify that this is the same model so we are not deserializing a diff model + if (config_dict and config_dict.get('config') and model_config and model_config.get('config') and + config_dict.get('config').get('name') == model_config.get('config').get('name')) or \ + (not config_dict and not model_config): + filepath = trains_in_model.get_weights() + # update filepath to point to downloaded weights file + # actual model weights loading will be done outside the try/exception block + except Exception as ex: + getLogger(TrainsFrameworkAdapter).warning(str(ex)) + finally: + WeightsFileHandler._model_store_lookup_lock.release() + + return filepath + + @staticmethod + def create_output_model(model, saved_path, framework, task, singlefile=False, model_name=None): + if task is None: + return saved_path + + try: + WeightsFileHandler._model_store_lookup_lock.acquire() + + # check if object already has InputModel + trains_out_model, ref_model = WeightsFileHandler._model_out_store_lookup.get(id(model), (None, None)) + if ref_model is not None and model != ref_model(): + # old id pop it - it was probably reused because the object is dead + WeightsFileHandler._model_out_store_lookup.pop(id(model)) + trains_out_model, ref_model = None, None + + # check if object already has InputModel + if trains_out_model is None: + trains_out_model = OutputModel( + task=task, + # config_dict=config, + name=(task.name + ' - ' + model_name) if model_name else None, + label_enumeration=task.get_labels_enumeration(), + framework=framework,) + try: + ref_model = weakref.ref(model) + except Exception: + ref_model = None + WeightsFileHandler._model_out_store_lookup[id(model)] = (trains_out_model, ref_model) + + if not saved_path: + getLogger(TrainsFrameworkAdapter).warning("Could retrieve model location, stored as unknown ") + return saved_path + + # check if we have output storage, and generate list of files to upload + if trains_out_model.upload_storage_uri: + if Path(saved_path).is_dir(): + files = [str(f) for f in Path(saved_path).rglob('*') if f.is_file()] + elif singlefile: + files = [str(Path(saved_path).absolute())] + else: + files = [str(f) for f in Path(saved_path).parent.glob(str(Path(saved_path).name)+'.*')] + else: + files = None + + # upload files if we found them, or just register the original path + if files: + if len(files) > 1: + try: + target_filename = Path(saved_path).stem + except Exception: + target_filename = None + trains_out_model.update_weights_package(weights_filenames=files, auto_delete_file=False, + target_filename=target_filename) + else: + trains_out_model.update_weights(weights_filename=files[0], auto_delete_file=False) + else: + trains_out_model.update_weights(weights_filename=None, register_uri=saved_path) + except Exception as ex: + getLogger(TrainsFrameworkAdapter).warning(str(ex)) + finally: + WeightsFileHandler._model_store_lookup_lock.release() + + return saved_path + + +class EventTrainsWriter(object): + """ + TF SummaryWriter implementation that converts the tensorboard's summary into + Trains events and reports the events (metrics) for an Trains task (logger). + """ + _add_lock = threading.Lock() + _series_name_lookup = {} + + @property + def variants(self): + return self._variants + + def prepare_report(self): + return self.variants.copy() + + @staticmethod + def tag_splitter(tag, num_split_parts, split_char='/', join_char='_', default_title='variant'): + """ + Split a tf.summary tag line to variant and metric. + Variant is the first part of the splitted tag, metric is the second. + :param str tag: + :param int num_split_parts: + :param str split_char: a character to split the tag on + :param str join_char: a character to join the the splits + :param str default_title: variant to use in case no variant can be inferred automatically + :return: (str, str) variant and metric + """ + splitted_tag = tag.split(split_char) + series = join_char.join(splitted_tag[-num_split_parts:]) + title = join_char.join(splitted_tag[:-num_split_parts]) or default_title + return title, series + + def __init__(self, logger, report_freq=100, image_report_freq=None, histogram_update_freq_multiplier=10, + histogram_granularity=50, max_keep_images=None): + """ + Create a compatible Trains backend to the TensorFlow SummaryToEventTransformer + Everything will be serialized directly to the Trains backend, instead of to the standard TF FileWriter + + :param logger: The task.logger to use for sending the metrics (def: task.get_logger()) + :param report_freq: How often to update the statistics values + :param image_report_freq: How often to upload images (step % image_update_freq == 0) + :param histogram_update_freq_multiplier: How often to upload histogram + (step//update_freq) % histogram_update_freq_multiplier == 0 + :param histogram_granularity: How many histograms (lines) to display in the 3d histogram plot + :param max_keep_images: Maximum number of images to save before starting to reuse files (per title/metric pair) + """ + # We are the events_writer, so that's what we'll pass + self.max_keep_images = max_keep_images + self.report_freq = report_freq + self.image_report_freq = image_report_freq if image_report_freq else report_freq + self.histogram_granularity = histogram_granularity + self.histogram_update_freq_multiplier = histogram_update_freq_multiplier + self._logger = logger + self._visualization_mode = 'BGR' + self._variants = defaultdict(lambda: ()) + self._scalar_report_cache = {} + self._hist_report_cache = {} + self._hist_x_granularity = 50 + self._max_step = 0 + + def _decode_image(self, img_str, width, height, color_channels): + try: + image_string = np.asarray(bytearray(base64.b64decode(img_str)), dtype=np.uint8) + image = cv2.imdecode(image_string, cv2.IMREAD_COLOR) + val = image.reshape(height, width, -1).astype(np.uint8) + if val.ndim == 3 and val.shape[2] == 3: + if self._visualization_mode == 'BGR': + val = val[:, :, [2, 1, 0]] + else: + val = val + elif (val.ndim == 2) or (val.ndim == 3 and val.shape[2] == 1): + val = np.tile(np.atleast_3d(val), (1, 1, 3)) + elif val.ndim == 3 and val.shape[2] == 4: + if self._visualization_mode == 'BGR': + val = val[:, :, [2, 1, 0]] + else: + val = val[:, :, [0, 1, 2]] + except Exception: + self._logger.warning('Failed decoding debug image [%d, %d, %d]' % (width, height, color_channels)) + val = None + return val + + def _add_image_numpy(self, tag, step, img_data_np, max_keep_images=None): + # only report images every specific interval + if step % self.image_report_freq != 0: + return None + + if img_data_np is None: + return + + title, series = self.tag_splitter(tag, num_split_parts=3, default_title='Images') + if img_data_np.dtype != np.uint8: + # assume scale 0-1 + img_data_np = (img_data_np*255).astype(np.uint8) + + # if 3d, pack into one big image + if img_data_np.ndim == 4: + dims = img_data_np.shape + stack_dim = int(np.sqrt(dims[0])) + res = img_data_np.reshape(stack_dim, stack_dim, *dims[1:]).transpose((0, 2, 1, 3, 4)) + tile_size = res.shape[0] * res.shape[1] + img_data_np = res.reshape(tile_size, tile_size, -1) + + self._logger.report_image_and_upload( + title=title, + series=series, + iteration=step, + matrix=img_data_np, + max_image_history=self.max_keep_images if max_keep_images is None else max_keep_images, + ) + + def _add_image(self, tag, step, img_data): + # only report images every specific interval + if step % self.image_report_freq != 0: + return None + + width = img_data['width'] + height = img_data['height'] + colorspace = img_data['colorspace'] + img_str = img_data['encodedImageString'] + matrix = self._decode_image(img_str, width=width, height=height, color_channels=colorspace) + if matrix is None: + return + + return self._add_image_numpy(tag=tag, step=step, img_data_np=matrix) + + def _add_scalar(self, tag, step, scalar_data): + title, series = self.tag_splitter(tag, num_split_parts=1, default_title='Scalars') + + # update scalar cache + num, value = self._scalar_report_cache.get((title, series), (0, 0)) + self._scalar_report_cache[(title, series)] = (num + 1, value + scalar_data) + + # only report images every specific interval + if step % self.report_freq != 0: + return None + + # calculate mean and zero cache + num, value = self._scalar_report_cache.get((title, series), (0, 0)) + scalar_data = value / num + self._scalar_report_cache[(title, series)] = (0, 0) + + self._logger.report_scalar( + title=title, + series=series, + iteration=step, + value=scalar_data, + ) + + def _add_histogram(self, tag, step, histo_data): + def _sample_histograms(_hist_iters, _histogram_granularity): + # resample history based on distribution of samples across time (steps) + ratio = ((_hist_iters[-1] - _hist_iters[_histogram_granularity]) / + (_hist_iters[_histogram_granularity - 1] - _hist_iters[0])) if \ + _hist_iters.size > _histogram_granularity else 0. + cur_idx_below = np.arange(0, min(_hist_iters.size, _histogram_granularity - 1)) + np.random.shuffle(cur_idx_below) + cur_idx_below = cur_idx_below[:int(_histogram_granularity * (1.0 - ratio / (1 + ratio)) + 0.5)] + if ratio > 0.0: + cur_idx_above = np.arange(_histogram_granularity - 1, _hist_iters.size) + np.random.shuffle(cur_idx_above) + cur_idx_above = cur_idx_above[:int(_histogram_granularity * ratio / (1 + ratio))] + else: + cur_idx_above = np.array([]) + _cur_idx = np.unique(np.sort(np.concatenate((cur_idx_below, cur_idx_above)).astype(np.int))) + return _cur_idx + + # only collect histogram every specific interval + if step % self.report_freq != 0 or step < self.report_freq - 1: + return None + + # generate forward matrix of the histograms + # Y-axis (rows) is iteration (from 0 to current Step) + # X-axis averaged bins (conformed sample 'bucketLimit') + # Z-axis actual value (interpolated 'bucket') + title, series = self.tag_splitter(tag, num_split_parts=1, default_title='Histograms') + + # get histograms from cache + hist_list, hist_iters, minmax = self._hist_report_cache.get((title, series), ([], np.array([]), None)) + + # resample data so we are always constrained in number of histogram we keep + if hist_iters.size >= self.histogram_granularity**2: + idx = _sample_histograms(hist_iters, self.histogram_granularity) + hist_iters = hist_iters[idx] + hist_list = [hist_list[i] for i in idx] + + # check if current sample is not already here (actually happens some times) + if step in hist_iters: + return None + + # add current sample, if not already here + hist_iters = np.append(hist_iters, step) + hist = np.array(list(zip(histo_data['bucketLimit'], histo_data['bucket'])), dtype=np.float32) + hist = hist[~np.isinf(hist[:, 0]), :] + hist_list.append(hist) + # keep track of min/max values of histograms (for later re-binning) + if minmax is None: + minmax = hist[:, 0].min(), hist[:, 0].max() + else: + minmax = min(minmax[0], hist[:, 0].min()), max(minmax[1], hist[:, 0].max()) + + # update the cache + self._hist_report_cache[(title, series)] = hist_list, hist_iters, minmax + + # only report histogram every specific interval, but do report the first few, so you know there are histograms + if hist_iters.size < 1 or (hist_iters.size >= self.histogram_update_freq_multiplier and + hist_iters.size % self.histogram_update_freq_multiplier != 0): + return None + + # resample histograms on a unified bin axis + _minmax = minmax[0] - 1, minmax[1] + 1 + prev_xedge = np.arange(start=_minmax[0], + step=(_minmax[1]-_minmax[0])/(self._hist_x_granularity-2), stop=_minmax[1]) + # uniformly select histograms and the last one + cur_idx = _sample_histograms(hist_iters, self.histogram_granularity) + report_hist = np.zeros(shape=(len(cur_idx), prev_xedge.size), dtype=np.float32) + for i, n in enumerate(cur_idx): + h = hist_list[n] + report_hist[i, :] = np.interp(prev_xedge, h[:, 0], h[:, 1], right=0, left=0) + yedges = hist_iters[cur_idx] + xedges = prev_xedge + + # if only a single line make, add another zero line, for the scatter plot to draw + if report_hist.shape[0] < 2: + report_hist = np.vstack((np.zeros_like(report_hist), report_hist)) + + # create 3d line (scatter) of histograms + skipx = max(1, int(xedges.size / 10)) + skipy = max(1, int(yedges.size / 10)) + xlabels = ['%.2f' % v if i % skipx == 0 else '' for i, v in enumerate(xedges[:-1])] + ylabels = [str(int(v)) if i % skipy == 0 else '' for i, v in enumerate(yedges)] + self._logger.report_surface( + title=title, + series=series, + iteration=0, + xtitle=' ', + ytitle='iteration', + xlabels=xlabels, + ylabels=ylabels, + matrix=report_hist, + camera=(-0.1, +1.3, 1.4)) + + def _add_plot(self, tag, step, values, vdict): + try: + plot_values = np.frombuffer(base64.b64decode(values['tensorContent'].encode('utf-8')), + dtype=np.float32) + plot_values = plot_values.reshape((int(values['tensorShape']['dim'][0]['size']), + int(values['tensorShape']['dim'][1]['size']))) + if 'metadata' in vdict: + if tag not in self._series_name_lookup: + self._series_name_lookup[tag] = [(tag, vdict['metadata']['displayName'], + vdict['metadata']['pluginData']['pluginName'])] + else: + # this should not happen, maybe it's another run, let increase the value + self._series_name_lookup[tag] += [(tag+'_%d' % len(self._series_name_lookup[tag])+1, + vdict['metadata']['displayName'], + vdict['metadata']['pluginData']['pluginName'])] + + tag, series, plugin_name = self._series_name_lookup.get(tag, [(tag, tag, '')])[-1] + + if 'pr_curve' in plugin_name: + # our thresholds are evenly distributed, in that + # width = 1.0 / (num_thresholds - 1) + # thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0] + num_thresholds = plot_values.shape[1] + width = 1.0 / (num_thresholds - 1) + thresholds = np.arange(0.0, 1.0, width, dtype=plot_values.dtype) + data_points = ['TP ', 'FP ', 'TN ', 'FN ', 'Precision ', ' Recall'] + series = [{'name': series, 'data': np.vstack((thresholds, plot_values[-2])).T, + 'labels': [''.join(data_points) + '
' + + ' '.join(['%-3.2f' % v for v in plot_values[:, j]]) for j in + range(plot_values.shape[1])]}] + reverse_xaxis = True + else: + reverse_xaxis = False + series = [{'name': series, 'data': plot_values}] + self._logger.report_line_plot(title=tag, series=series, xaxis='', yaxis='', + iteration=step, reverse_xaxis=reverse_xaxis) + except Exception: + pass + + def add_event(self, event, step=None, walltime=None, **kwargs): + supported_metrics = { + 'simpleValue', 'image', 'histo', 'tensor' + } + + def get_data(value_dict, metric_search_order): + data = None + metric_type = 'Unsupported' + for variant in metric_search_order: + data = value_dict.get(variant) + if data is not None: + metric_type = variant + break + return metric_type, data + + # Support multiple threads accessing this instance (i.e. let TF/Keras do what they need) + with self._add_lock: + # TODO: add report frequency threshold (i.e. if we are sending too much data, increase the report_freq) + # we should measure reports per second and throttle back the reporting details accordingly + msg_dict = MessageToDict(event) + summary = msg_dict.get('summary') + if summary is None: + msg_dict.pop('step', None) + msg_dict.pop('wallTime', None) + keys_list = [key for key in msg_dict.keys() if len(key) > 0] + keys_list = ', '.join(keys_list) + self._logger.debug('event summary not found, message type unsupported: %s' % keys_list) + return + value_dicts = summary.get('value') + walltime = walltime or msg_dict.get('step') + step = step or msg_dict.get('step') + if step is None: + # when we start a new epoch there is no step in the msg_dict, + # we have to extract it manually + if hasattr(event, 'step'): + step = int(event.step) + else: + step = 0 + self._logger.debug('Recieved event without step, assuming step = {}'.format(step), WARNING) + else: + step = int(step) + self._max_step = max(self._max_step, step) + if value_dicts is None: + self._logger.debug("Summary with arrived without 'value'", ERROR) + return + + for vdict in value_dicts: + tag = vdict.pop('tag', None) + if tag is None: + # we should not get here + self._logger.debug('No tag for \'value\' existing keys %s' % ', '.join(vdict.keys())) + continue + metric, values = get_data(vdict, supported_metrics) + if metric == 'simpleValue': + self._add_scalar(tag=tag, step=step, scalar_data=values) + elif metric == 'histo': + self._add_histogram(tag=tag, step=step, histo_data=values) + elif metric == 'image': + self._add_image(tag=tag, step=step, img_data=values) + elif metric == 'tensor' and values.get('dtype') == 'DT_STRING': + # text, just print to console + text = base64.b64decode('\n'.join(values['stringVal'])).decode('utf-8') + self._logger.report_text(msg='SUMMARY LOG: {} {}'.format(tag, text), print_console=False) + elif metric == 'tensor' and values.get('dtype') == 'DT_FLOAT': + self._add_plot(tag, step, values, vdict) + else: + self._logger.debug('Event unsupported. tag = %s, vdict keys [%s]' % (tag, ', '.join(vdict.keys))) + continue + + def get_logdir(self): + """ Returns a temporary directory name for compatibility with FileWriter. This directory is not actually used. + :return: '.' + """ + return '.' + + def flush(self): + """Flushes the event file to disk. + + Call this method to make sure that all pending events have been written to + disk. + """ + self._logger.flush() + + def close(self): + """Flushes the event file to disk and close the file. + + Call this method when you do not need the summary writer anymore. + """ + self._logger.flush() + + def reopen(self): + """Reopens the EventFileWriter. + + Can be called after `close()` to add more events in the same directory. + The events will go into a new events file. + + Does nothing if the EventFileWriter was not closed. + """ + pass + + +class ProxyEventsWriter(object): + def __init__(self, events): + self._events = events + + def _get_sentinel_event(self): + ret = None + for ev in self._events: + if hasattr(ev, '_get_sentinel_event'): + ret = ev._get_sentinel_event() + return ret + + def get_logdir(self): + ret = None + for ev in self._events: + if hasattr(ev, 'get_logdir'): + ret = ev.get_logdir() + return ret + + def reopen(self): + ret = None + for ev in self._events: + if hasattr(ev, 'reopen'): + ret = ev.reopen() + return ret + + def add_event(self, *args, **kwargs): + ret = None + for ev in self._events: + if hasattr(ev, 'add_event'): + ret = ev.add_event(*args, **kwargs) + return ret + + def flush(self): + ret = None + for ev in self._events: + if hasattr(ev, 'flush'): + ret = ev.flush() + return ret + + def close(self): + ret = None + for ev in self._events: + if hasattr(ev, 'close'): + ret = ev.close() + return ret + + +class PatchSummaryToEventTransformer(object): + __main_task = None + __original_getattribute = None + __original_getattributeX = None + _original_add_event = None + _original_add_eventT = None + _original_add_eventX = None + defaults_dict = dict( + report_freq=1, image_report_freq=1, histogram_update_freq_multiplier=5, + histogram_granularity=50) + + @staticmethod + def trains_object(self): + if isinstance(self.event_writer, ProxyEventsWriter): + trains_writer = [e for e in self.event_writer._events if isinstance(e, EventTrainsWriter)] + return trains_writer[0] if trains_writer else None + elif isinstance(self.event_writer, EventTrainsWriter): + return self.event_writer + if not self.__dict__.get('_trains_defaults'): + self.__dict__['_trains_defaults'] = {} + return self.__dict__['_trains_defaults'] + + @staticmethod + def update_current_task(task, **kwargs): + PatchSummaryToEventTransformer.defaults_dict.update(kwargs) + PatchSummaryToEventTransformer.__main_task = task + # make sure we patched the SummaryToEventTransformer + PatchSummaryToEventTransformer._patch_summary_to_event_transformer() + PostImportHookPatching.add_on_import('tensorflow', + PatchSummaryToEventTransformer._patch_summary_to_event_transformer) + PostImportHookPatching.add_on_import('torch', + PatchSummaryToEventTransformer._patch_summary_to_event_transformer) + PostImportHookPatching.add_on_import('tensorboardX', + PatchSummaryToEventTransformer._patch_summary_to_event_transformer) + + @staticmethod + def _patch_summary_to_event_transformer(): + if 'tensorflow' in sys.modules: + try: + from tensorflow.python.summary.writer.writer import SummaryToEventTransformer + # only patch once + if PatchSummaryToEventTransformer.__original_getattribute is None: + PatchSummaryToEventTransformer.__original_getattribute = SummaryToEventTransformer.__getattribute__ + SummaryToEventTransformer.__getattribute__ = PatchSummaryToEventTransformer._patched_getattribute + setattr(SummaryToEventTransformer, 'trains', + property(PatchSummaryToEventTransformer.trains_object)) + except Exception as ex: + getLogger(TrainsFrameworkAdapter).warning(str(ex)) + + if 'torch' in sys.modules: + try: + # only patch once + if PatchSummaryToEventTransformer._original_add_eventT is None: + from torch.utils.tensorboard.writer import FileWriter as FileWriterT + PatchSummaryToEventTransformer._original_add_eventT = FileWriterT.add_event + FileWriterT.add_event = PatchSummaryToEventTransformer._patched_add_eventT + setattr(FileWriterT, 'trains', None) + except ImportError: + # this is a new version of TensorflowX + pass + except Exception as ex: + getLogger(TrainsFrameworkAdapter).warning(str(ex)) + + if 'tensorboardX' in sys.modules: + try: + # only patch once + if PatchSummaryToEventTransformer.__original_getattributeX is None: + from tensorboardX.writer import SummaryToEventTransformer as SummaryToEventTransformerX + PatchSummaryToEventTransformer.__original_getattributeX = SummaryToEventTransformerX.__getattribute__ + SummaryToEventTransformerX.__getattribute__ = PatchSummaryToEventTransformer._patched_getattributeX + setattr(SummaryToEventTransformerX, 'trains', + property(PatchSummaryToEventTransformer.trains_object)) + except ImportError: + # this is a new version of TensorflowX + pass + except Exception as ex: + getLogger(TrainsFrameworkAdapter).warning(str(ex)) + + if PatchSummaryToEventTransformer.__original_getattributeX is None: + try: + # only patch once + if PatchSummaryToEventTransformer._original_add_eventX is None: + from tensorboardX.writer import FileWriter as FileWriterX + PatchSummaryToEventTransformer._original_add_eventX = FileWriterX.add_event + FileWriterX.add_event = PatchSummaryToEventTransformer._patched_add_eventX + setattr(FileWriterX, 'trains', None) + except ImportError: + # this is a new version of TensorflowX + pass + except Exception as ex: + getLogger(TrainsFrameworkAdapter).warning(str(ex)) + + @staticmethod + def _patched_add_eventT(self, *args, **kwargs): + if not hasattr(self, 'trains') or not PatchSummaryToEventTransformer.__main_task: + return PatchSummaryToEventTransformer._original_add_eventT(self, *args, **kwargs) + if not self.trains: + self.trains = EventTrainsWriter(PatchSummaryToEventTransformer.__main_task.get_logger(), + **PatchSummaryToEventTransformer.defaults_dict) + try: + self.trains.add_event(*args, **kwargs) + except Exception: + pass + return PatchSummaryToEventTransformer._original_add_eventT(self, *args, **kwargs) + + @staticmethod + def _patched_add_eventX(self, *args, **kwargs): + if not hasattr(self, 'trains') or not PatchSummaryToEventTransformer.__main_task: + return PatchSummaryToEventTransformer._original_add_eventX(self, *args, **kwargs) + if not self.trains: + self.trains = EventTrainsWriter(PatchSummaryToEventTransformer.__main_task.get_logger(), + **PatchSummaryToEventTransformer.defaults_dict) + try: + self.trains.add_event(*args, **kwargs) + except Exception: + pass + return PatchSummaryToEventTransformer._original_add_eventX(self, *args, **kwargs) + + @staticmethod + def _patched_getattribute(self, attr): + get_base = PatchSummaryToEventTransformer.__original_getattribute + return PatchSummaryToEventTransformer._patched_getattribute_(self, attr, get_base) + + @staticmethod + def _patched_getattributeX(self, attr): + get_base = PatchSummaryToEventTransformer.__original_getattributeX + return PatchSummaryToEventTransformer._patched_getattribute_(self, attr, get_base) + + @staticmethod + def _patched_getattribute_(self, attr, get_base): + # no main task, zero chance we have an Trains event logger + if PatchSummaryToEventTransformer.__main_task is None: + return get_base(self, attr) + + # check if we already have an Trains event logger + __dict__ = get_base(self, '__dict__') + if 'event_writer' not in __dict__ or \ + isinstance(__dict__['event_writer'], (ProxyEventsWriter, EventTrainsWriter)): + return get_base(self, attr) + + # patch the events writer field, and add a double Event Logger (Trains and original) + base_eventwriter = __dict__['event_writer'] + defaults_dict = __dict__.get('_trains_defaults') or PatchSummaryToEventTransformer.defaults_dict + trains_event = EventTrainsWriter(PatchSummaryToEventTransformer.__main_task.get_logger(), **defaults_dict) + + # order is important, the return value of ProxyEventsWriter is the last object in the list + __dict__['event_writer'] = ProxyEventsWriter([trains_event, base_eventwriter]) + return get_base(self, attr) + + +class _ModelAdapter(object): + """ Model adapter which extends the save and save_weights methods of a Keras Model instance """ + _model = None # type: Any + _output_model = None # type: OutputModel + + def __init__(self, model, output_model): + super(_ModelAdapter, self).__init__() + super(_ModelAdapter, self).__setattr__('_model', model) + super(_ModelAdapter, self).__setattr__('_output_model', output_model) + super(_ModelAdapter, self).__setattr__('_logger', getLogger('TrainsModelAdapter')) + + def __getattr__(self, attr): + return getattr(self._model, attr) + + def __setattr__(self, key, value): + return setattr(self._model, key, value) + + def save(self, filepath, overwrite=True, include_optimizer=True): + self._model.save(filepath=filepath, overwrite=overwrite, include_optimizer=include_optimizer) + # TODO: auto generate new objects of filename changes + try: + self._output_model.update_weights(weights_filename=filepath, auto_delete_file=True) + except Exception as ex: + self._logger.error(str(ex)) + + def save_weights(self, filepath, overwrite=True): + self._model.save_weights(filepath=filepath, overwrite=overwrite) + # TODO: auto generate new objects of filename changes + try: + self._output_model.update_weights(weights_filename=filepath, auto_delete_file=True) + except Exception as ex: + self._logger.error(str(ex)) + + +class PatchModelCheckPointCallback(object): + __main_task = None + __original_getattribute = None + defaults_dict = dict( + config_text=None, + config_dict=None, + label_enumeration=None, + name=None, + comment=None) + + @staticmethod + def trains_object(self): + if isinstance(self.model, _ModelAdapter): + return self.model._output_model + if not self.__dict__.get('_trains_defaults'): + self.__dict__['_trains_defaults'] = {} + return self.__dict__['_trains_defaults'] + + @staticmethod + def update_current_task(task, **kwargs): + PatchModelCheckPointCallback.defaults_dict.update(kwargs) + PatchModelCheckPointCallback.__main_task = task + # make sure we patched the SummaryToEventTransformer + PatchModelCheckPointCallback._patch_model_checkpoint() + PostImportHookPatching.add_on_import('keras', PatchModelCheckPointCallback._patch_model_checkpoint) + PostImportHookPatching.add_on_import('tensorflow', PatchModelCheckPointCallback._patch_model_checkpoint) + + @staticmethod + def _patch_model_checkpoint(): + is_keras = 'keras' in sys.modules + is_tf_keras = 'tensorflow' in sys.modules + callbacks = None + if is_keras: + try: + import keras.callbacks as callbacks + except ImportError: + is_keras = False + if not is_keras and is_tf_keras: + try: + # hack: make sure tensorflow.__init__ is called + import tensorflow + import tensorflow.python.keras.callbacks as callbacks + except ImportError: + is_tf_keras = False + callbacks = None + # we have nothing, quit + if not is_keras and not is_tf_keras: + return + + try: + # only patch once + if PatchModelCheckPointCallback.__original_getattribute is None and callbacks is not None: + PatchModelCheckPointCallback.__original_getattribute = callbacks.ModelCheckpoint.__getattribute__ + callbacks.ModelCheckpoint.__getattribute__ = PatchModelCheckPointCallback._patched_getattribute + setattr(callbacks.ModelCheckpoint, 'trains', + property(PatchModelCheckPointCallback.trains_object)) + + except Exception as ex: + getLogger(TrainsFrameworkAdapter).warning(str(ex)) + + @staticmethod + def _patched_getattribute(self, attr): + get_base = PatchModelCheckPointCallback.__original_getattribute + + # no main task, zero chance we have an Trains event logger + if PatchModelCheckPointCallback.__main_task is None: + return get_base(self, attr) + + # check if we already have an Trains event logger + __dict__ = get_base(self, '__dict__') + if 'model' not in __dict__ or \ + isinstance(__dict__['model'], _ModelAdapter): + return get_base(self, attr) + + # patch the events writer field, and add a double Event Logger (Trains and original) + base_model = __dict__['model'] + defaults_dict = __dict__.get('_trains_defaults') or PatchModelCheckPointCallback.defaults_dict + output_model = OutputModel( + PatchModelCheckPointCallback.__main_task, + config_text=defaults_dict.get('config_text'), + config_dict=defaults_dict.get('config_dict'), + name=defaults_dict.get('name'), + comment=defaults_dict.get('comment'), + label_enumeration=defaults_dict.get('label_enumeration') or + PatchModelCheckPointCallback.__main_task.get_labels_enumeration(), + framework=Framework.keras, + ) + output_model.set_upload_destination( + PatchModelCheckPointCallback.__main_task.get_output_destination(raise_on_error=False)) + trains_model = _ModelAdapter(base_model, output_model) + + # order is important, the return value of ProxyEventsWriter is the last object in the list + __dict__['model'] = trains_model + return get_base(self, attr) + + +class PatchTensorFlowEager(object): + __main_task = None + __original_fn_scalar = None + __original_fn_hist = None + __original_fn_image = None + __trains_event_writer = None + defaults_dict = dict( + report_freq=1, image_report_freq=1, histogram_update_freq_multiplier=5, + histogram_granularity=50) + + @staticmethod + def update_current_task(task, **kwargs): + PatchTensorFlowEager.defaults_dict.update(kwargs) + PatchTensorFlowEager.__main_task = task + # make sure we patched the SummaryToEventTransformer + PatchTensorFlowEager._patch_model_checkpoint() + PostImportHookPatching.add_on_import('tensorflow', PatchTensorFlowEager._patch_model_checkpoint) + + @staticmethod + def _patch_model_checkpoint(): + if PatchTensorFlowEager.__original_fn_scalar is not None: + return + if 'tensorflow' in sys.modules: + try: + # hack: make sure tensorflow.__init__ is called + import tensorflow + from tensorflow.python.ops import gen_summary_ops + PatchTensorFlowEager.__original_fn_scalar = gen_summary_ops.write_scalar_summary + gen_summary_ops.write_scalar_summary = PatchTensorFlowEager._write_scalar_summary + PatchTensorFlowEager.__original_fn_image = gen_summary_ops.write_image_summary + gen_summary_ops.write_image_summary = PatchTensorFlowEager._write_image_summary + PatchTensorFlowEager.__original_fn_hist = gen_summary_ops.write_histogram_summary + gen_summary_ops.write_histogram_summary = PatchTensorFlowEager._write_hist_summary + except ImportError: + pass + except Exception as ex: + getLogger(TrainsFrameworkAdapter).warning(str(ex)) + + @staticmethod + def _get_event_writer(): + if not PatchTensorFlowEager.__main_task: + return None + if PatchTensorFlowEager.__trains_event_writer is None: + PatchTensorFlowEager.__trains_event_writer = EventTrainsWriter( + logger=PatchTensorFlowEager.__main_task.get_logger(), **PatchTensorFlowEager.defaults_dict) + return PatchTensorFlowEager.__trains_event_writer + + @staticmethod + def trains_object(self): + return PatchTensorFlowEager.__trains_event_writer + + @staticmethod + def _write_scalar_summary(writer, step, tag, value, name=None, **kwargs): + event_writer = PatchTensorFlowEager._get_event_writer() + if event_writer: + try: + event_writer._add_scalar(tag=str(tag), step=int(step.numpy()), scalar_data=value.numpy()) + except Exception as ex: + getLogger(TrainsFrameworkAdapter).warning(str(ex)) + return PatchTensorFlowEager.__original_fn_scalar(writer, step, tag, value, name, **kwargs) + + @staticmethod + def _write_hist_summary(writer, step, tag, values, name, **kwargs): + event_writer = PatchTensorFlowEager._get_event_writer() + if event_writer: + try: + event_writer._add_histogram(tag=str(tag), step=int(step.numpy()), histo_data=values.numpy()) + except Exception as ex: + getLogger(TrainsFrameworkAdapter).warning(str(ex)) + return PatchTensorFlowEager.__original_fn_hist(writer, step, tag, values, name, **kwargs) + + @staticmethod + def _write_image_summary(writer, step, tag, tensor, bad_color, max_images, name, **kwargs): + event_writer = PatchTensorFlowEager._get_event_writer() + if event_writer: + try: + event_writer._add_image_numpy(tag=str(tag), step=int(step.numpy()), img_data_np=tensor.numpy(), + max_keep_images=max_images) + except Exception as ex: + getLogger(TrainsFrameworkAdapter).warning(str(ex)) + return PatchTensorFlowEager.__original_fn_image(writer, step, tag, tensor, bad_color, max_images, name, + **kwargs) + + +class PatchKerasModelIO(object): + __main_task = None + __patched = None + + @staticmethod + def update_current_task(task, **kwargs): + PatchKerasModelIO.__main_task = task + PatchKerasModelIO._patch_model_checkpoint() + PostImportHookPatching.add_on_import('tensorflow', PatchKerasModelIO._patch_model_checkpoint) + PostImportHookPatching.add_on_import('keras', PatchKerasModelIO._patch_model_checkpoint) + + @staticmethod + def _patch_model_checkpoint(): + if 'keras' in sys.modules: + try: + from keras.engine.network import Network + except ImportError: + Network = None + try: + from keras.engine.sequential import Sequential + except ImportError: + Sequential = None + try: + from keras import models as keras_saving + except ImportError: + keras_saving = None + PatchKerasModelIO._patch_io_calls(Network, Sequential, keras_saving) + if 'tensorflow' in sys.modules: + try: + # hack: make sure tensorflow.__init__ is called + import tensorflow + from tensorflow.python.keras.engine.network import Network + except ImportError: + Network = None + try: + # hack: make sure tensorflow.__init__ is called + import tensorflow + from tensorflow.python.keras.engine.sequential import Sequential + except ImportError: + Sequential = None + try: + # hack: make sure tensorflow.__init__ is called + import tensorflow + from tensorflow.python.keras import models as keras_saving + except ImportError: + keras_saving = None + PatchKerasModelIO._patch_io_calls(Network, Sequential, keras_saving) + + @staticmethod + def _patch_io_calls(Network, Sequential, keras_saving): + try: + # only patch once + if not PatchKerasModelIO.__patched: + PatchKerasModelIO.__patched = True + if Sequential is not None: + Sequential._updated_config = _patched_call(Sequential._updated_config, + PatchKerasModelIO._updated_config) + Sequential.from_config = _patched_call(Sequential.from_config, PatchKerasModelIO._from_config) + + if Network is not None: + Network._updated_config = _patched_call(Network._updated_config, PatchKerasModelIO._updated_config) + Network.from_config = _patched_call(Network.from_config, PatchKerasModelIO._from_config) + Network.save = _patched_call(Network.save, PatchKerasModelIO._save) + Network.save_weights = _patched_call(Network.save_weights, PatchKerasModelIO._save_weights) + Network.load_weights = _patched_call(Network.load_weights, PatchKerasModelIO._load_weights) + + if keras_saving is not None: + keras_saving.save_model = _patched_call(keras_saving.save_model, PatchKerasModelIO._save_model) + keras_saving.load_model = _patched_call(keras_saving.load_model, PatchKerasModelIO._load_model) + except Exception as ex: + getLogger(TrainsFrameworkAdapter).warning(str(ex)) + + @staticmethod + def _updated_config(original_fn, self): + config = original_fn(self) + # check if we have main task + if PatchKerasModelIO.__main_task is None: + return config + + try: + # check if object already has InputModel + if not hasattr(self, 'trains_out_model'): + self.trains_out_model = None + + # check if object already has InputModel + model_name_id = config.get('name', getattr(self, 'name', 'unknown')) + if self.trains_out_model is not None: + self.trains_out_model.config_dict = config + else: + # todo: support multiple models for the same task + self.trains_out_model = OutputModel( + task=PatchKerasModelIO.__main_task, + config_dict=config, + name=PatchKerasModelIO.__main_task.name + ' ' + model_name_id, + label_enumeration=PatchKerasModelIO.__main_task.get_labels_enumeration(), + framework=Framework.keras, + ) + except Exception as ex: + getLogger(TrainsFrameworkAdapter).warning(str(ex)) + + return config + + @staticmethod + def _from_config(original_fn, *args, **kwargs): + try: + self = original_fn(*args, **kwargs) + except Exception as ex: + if not running_remotely(): + raise ex + self = _Empty() + + # check if we have main task + if PatchKerasModelIO.__main_task is None: + return self + + try: + # check if object already has InputModel + if not hasattr(self, 'trains_in_model'): + self.trains_in_model = None + + # get config + config_dict = kwargs['config'] if 'config' in kwargs else args[0] + # check if object already has InputModel + self.trains_in_model = InputModel.empty( + config_dict=config_dict, + label_enumeration=PatchKerasModelIO.__main_task.get_labels_enumeration(), + ) + # todo: support multiple models for the same task + PatchKerasModelIO.__main_task.connect(self.trains_in_model) + # if we are running remotely we should deserialize the object + # because someone might have changed the configuration + if running_remotely(): + # reload the model + model_config = self.trains_in_model.config_dict + # verify that this is the same model so we are not deserializing a diff model + if (config_dict and config_dict.get('config') and model_config and model_config.get('config') and + config_dict.get('config').get('name') == model_config.get('config').get('name')) or \ + (not config_dict and not model_config): + if 'config' in kwargs: + kwargs['config'] = model_config + else: + args = (model_config,) + args[1:] + model = original_fn(*args, **kwargs) + model.trains_in_model = self.trains_in_model + return model + + except Exception as ex: + getLogger(TrainsFrameworkAdapter).warning(str(ex)) + + return self + + @staticmethod + def _load_weights(original_fn, self, *args, **kwargs): + # check if we have main task + if PatchKerasModelIO.__main_task is None: + return original_fn(self, *args, **kwargs) + + # get filepath + filepath = kwargs['filepath'] if 'filepath' in kwargs else args[0] + if running_remotely(): + # register/load model weights + filepath = WeightsFileHandler.restore_weights_file(self, filepath, Framework.keras, + PatchKerasModelIO.__main_task) + if 'filepath' in kwargs: + kwargs['filepath'] = filepath + else: + args = (filepath,) + args[1:] + # load model + return original_fn(self, *args, **kwargs) + + # try to load the files, if something happened exception will be raised before we register the file + model = original_fn(self, *args, **kwargs) + # register/load model weights + WeightsFileHandler.restore_weights_file(self, filepath, Framework.keras, PatchKerasModelIO.__main_task) + return model + + @staticmethod + def _save(original_fn, self, *args, **kwargs): + if hasattr(self, 'trains_out_model'): + self.trains_out_model._processed = False + original_fn(self, *args, **kwargs) + # no need to specially call, because the original save uses "save_model" which we overload + if not hasattr(self, 'trains_out_model') or not self.trains_out_model._processed: + PatchKerasModelIO._update_outputmodel(self, *args, **kwargs) + + @staticmethod + def _save_weights(original_fn, self, *args, **kwargs): + original_fn(self, *args, **kwargs) + PatchKerasModelIO._update_outputmodel(self, *args, **kwargs) + + @staticmethod + def _update_outputmodel(self, *args, **kwargs): + # check if we have main task + if PatchKerasModelIO.__main_task is None: + return + + try: + # get filepath + filepath = kwargs['filepath'] if 'filepath' in kwargs else args[0] + + # this will already generate an output model + config = self._updated_config() + + # check if object already has InputModel + if not hasattr(self, 'trains_out_model'): + self.trains_out_model = None + + # check if object already has InputModel + if self.trains_out_model is not None: + self.trains_out_model.config_dict = config + else: + model_name_id = getattr(self, 'name', 'unknown') + # todo: support multiple models for the same task + self.trains_out_model = OutputModel( + task=PatchKerasModelIO.__main_task, + config_dict=config, + name=PatchKerasModelIO.__main_task.name + ' ' + model_name_id, + label_enumeration=PatchKerasModelIO.__main_task.get_labels_enumeration(), + framework=Framework.keras, + ) + # check if we have output storage + if self.trains_out_model.upload_storage_uri: + self.trains_out_model.update_weights(weights_filename=filepath, auto_delete_file=False) + else: + self.trains_out_model.update_weights(weights_filename=None, register_uri=filepath) + # if anyone asks, we were here + self.trains_out_model._processed = True + except Exception as ex: + getLogger(TrainsFrameworkAdapter).warning(str(ex)) + + @staticmethod + def _save_model(original_fn, model, filepath, *args, **kwargs): + original_fn(model, filepath, *args, **kwargs) + if PatchKerasModelIO.__main_task: + PatchKerasModelIO._update_outputmodel(model, filepath) + + @staticmethod + def _load_model(original_fn, filepath, *args, **kwargs): + if not PatchKerasModelIO.__main_task: + return original_fn(filepath, *args, **kwargs) + + empty = _Empty() + if running_remotely(): + # register/load model weights + filepath = WeightsFileHandler.restore_weights_file(empty, filepath, Framework.keras, + PatchKerasModelIO.__main_task) + model = original_fn(filepath, *args, **kwargs) + else: + model = original_fn(filepath, *args, **kwargs) + # register/load model weights + WeightsFileHandler.restore_weights_file(empty, filepath, Framework.keras, PatchKerasModelIO.__main_task) + # update the input model object + if empty.trains_in_model: + try: + model.trains_in_model = empty.trains_in_model + except Exception: + pass + + return model + + +class PatchTensorflowModelIO(object): + __main_task = None + __patched = None + + @staticmethod + def update_current_task(task, **kwargs): + PatchTensorflowModelIO.__main_task = task + PatchTensorflowModelIO._patch_model_checkpoint() + PostImportHookPatching.add_on_import('tensorflow', PatchTensorflowModelIO._patch_model_checkpoint) + + @staticmethod + def _patch_model_checkpoint(): + if PatchTensorflowModelIO.__patched: + return + + if 'tensorflow' not in sys.modules: + return + + PatchTensorflowModelIO.__patched = True + + try: + # hack: make sure tensorflow.__init__ is called + import tensorflow + from tensorflow.python.training.saver import Saver + try: + Saver.save = _patched_call(Saver.save, PatchTensorflowModelIO._save) + except Exception: + pass + try: + Saver.restore = _patched_call(Saver.restore, PatchTensorflowModelIO._restore) + except Exception: + pass + except ImportError: + pass + except Exception: + pass # print('Failed patching tensorflow') + + try: + # make sure we import the correct version of save + import tensorflow + from tensorflow.saved_model.experimental import save + # actual import + import tensorflow.saved_model.experimental as saved_model + except ImportError: + try: + # make sure we import the correct version of save + import tensorflow + from tensorflow.saved_model import save + # actual import + import tensorflow.saved_mode as saved_model + except ImportError: + saved_model = None + except Exception: + saved_model = None + pass # print('Failed patching tensorflow') + except Exception: + saved_model = None + pass # print('Failed patching tensorflow') + + if saved_model is not None: + saved_model.save = _patched_call(saved_model.save, PatchTensorflowModelIO._save_model) + + try: + # make sure we import the correct version of save + import tensorflow + # actual import + from tensorflow.saved_model import load + import tensorflow.saved_model as saved_model_load + saved_model_load.load = _patched_call(saved_model_load.load, PatchTensorflowModelIO._load) + except ImportError: + pass + except Exception: + pass # print('Failed patching tensorflow') + + try: + # make sure we import the correct version of save + import tensorflow + # actual import + from tensorflow.saved_model import loader as loader1 + loader1.load = _patched_call(loader1.load, PatchTensorflowModelIO._load) + except ImportError: + pass + except Exception: + pass # print('Failed patching tensorflow') + + try: + # make sure we import the correct version of save + import tensorflow + # actual import + from tensorflow.compat.v1.saved_model import loader as loader2 + loader2.load = _patched_call(loader2.load, PatchTensorflowModelIO._load) + except ImportError: + pass + except Exception: + pass # print('Failed patching tensorflow') + + try: + import tensorflow + from tensorflow.train import Checkpoint + try: + Checkpoint.save = _patched_call(Checkpoint.save, PatchTensorflowModelIO._ckpt_save) + except Exception: + pass + try: + Checkpoint.restore = _patched_call(Checkpoint.restore, PatchTensorflowModelIO._ckpt_restore) + except Exception: + pass + try: + Checkpoint.write = _patched_call(Checkpoint.write, PatchTensorflowModelIO._ckpt_write) + except Exception: + pass + except ImportError: + pass + except Exception: + pass # print('Failed patching tensorflow') + + @staticmethod + def _save(original_fn, self, sess, save_path, *args, **kwargs): + saved_path = original_fn(self, sess, save_path, *args, **kwargs) + if not saved_path: + return saved_path + # store output Model + return WeightsFileHandler.create_output_model(self, saved_path, Framework.tensorflow, + PatchTensorflowModelIO.__main_task) + + @staticmethod + def _save_model(original_fn, obj, export_dir, *args, **kwargs): + original_fn(obj, export_dir, *args, **kwargs) + # store output Model + WeightsFileHandler.create_output_model(obj, export_dir, Framework.tensorflow, + PatchTensorflowModelIO.__main_task) + + @staticmethod + def _restore(original_fn, self, sess, save_path, *args, **kwargs): + if PatchTensorflowModelIO.__main_task is None: + return original_fn(self, sess, save_path, *args, **kwargs) + + if running_remotely(): + # register/load model weights + save_path = WeightsFileHandler.restore_weights_file(self, save_path, Framework.tensorflow, + PatchTensorflowModelIO.__main_task) + # load model + return original_fn(self, sess, save_path, *args, **kwargs) + + # load model, if something is wrong, exception will be raised before we register the input model + model = original_fn(self, sess, save_path, *args, **kwargs) + # register/load model weights + WeightsFileHandler.restore_weights_file(self, save_path, Framework.tensorflow, + PatchTensorflowModelIO.__main_task) + return model + + @staticmethod + def _load(original_fn, sess, tags, export_dir, *args, **saver_kwargs): + if PatchTensorflowModelIO.__main_task is None: + return original_fn(sess, tags, export_dir, *args, **saver_kwargs) + + # register input model + empty = _Empty() + if running_remotely(): + export_dir = WeightsFileHandler.restore_weights_file(empty, export_dir, Framework.tensorflow, + PatchTensorflowModelIO.__main_task) + model = original_fn(sess, tags, export_dir, *args, **saver_kwargs) + else: + # try to load model before registering, it might fail + model = original_fn(sess, tags, export_dir, *args, **saver_kwargs) + WeightsFileHandler.restore_weights_file(empty, export_dir, Framework.tensorflow, + PatchTensorflowModelIO.__main_task) + + if empty.trains_in_model: + try: + model.trains_in_model = empty.trains_in_model + except Exception: + pass + return model + + @staticmethod + def _ckpt_save(original_fn, self, file_prefix, *args, **kwargs): + checkpoint_path = original_fn(self, file_prefix, *args, **kwargs) + if PatchTensorflowModelIO.__main_task is None: + return checkpoint_path + WeightsFileHandler.create_output_model(self, checkpoint_path, Framework.tensorflow, + PatchTensorflowModelIO.__main_task) + return checkpoint_path + + @staticmethod + def _ckpt_write(original_fn, self, file_prefix, *args, **kwargs): + checkpoint_path = original_fn(self, file_prefix, *args, **kwargs) + if PatchTensorflowModelIO.__main_task is None: + return checkpoint_path + WeightsFileHandler.create_output_model(self, checkpoint_path, Framework.tensorflow, + PatchTensorflowModelIO.__main_task) + return checkpoint_path + + @staticmethod + def _ckpt_restore(original_fn, self, save_path, *args, **kwargs): + if PatchTensorflowModelIO.__main_task is None: + return original_fn(self, save_path, *args, **kwargs) + + # register input model + empty = _Empty() + if running_remotely(): + save_path = WeightsFileHandler.restore_weights_file(empty, save_path, Framework.tensorflow, + PatchTensorflowModelIO.__main_task) + model = original_fn(self, save_path, *args, **kwargs) + else: + # try to load model before registering it, in case it fails. + model = original_fn(self, save_path, *args, **kwargs) + WeightsFileHandler.restore_weights_file(empty, save_path, Framework.tensorflow, + PatchTensorflowModelIO.__main_task) + + if empty.trains_in_model: + try: + model.trains_in_model = empty.trains_in_model + except Exception: + pass + return model + + +class PatchPyTorchModelIO(object): + __main_task = None + __patched = None + + @staticmethod + def update_current_task(task, **kwargs): + PatchPyTorchModelIO.__main_task = task + PatchPyTorchModelIO._patch_model_io() + PostImportHookPatching.add_on_import('torch', PatchPyTorchModelIO._patch_model_io) + + @staticmethod + def _patch_model_io(): + if PatchPyTorchModelIO.__patched: + return + + if 'torch' not in sys.modules: + return + + PatchPyTorchModelIO.__patched = True + + try: + # hack: make sure tensorflow.__init__ is called + import torch + torch.save = _patched_call(torch.save, PatchPyTorchModelIO._save) + torch.load = _patched_call(torch.load, PatchPyTorchModelIO._load) + except ImportError: + pass + except Exception: + pass # print('Failed patching pytorch') + + @staticmethod + def _save(original_fn, obj, f, *args, **kwargs): + ret = original_fn(obj, f, *args, **kwargs) + if not PatchPyTorchModelIO.__main_task: + return ret + + if isinstance(f, six.string_types): + filename = f + elif hasattr(f, 'name'): + filename = f.name + try: + f.flush() + except Exception: + pass + else: + filename = None + + # if the model a screptive name based on the file name + try: + model_name = Path(filename).stem + except Exception: + model_name = None + WeightsFileHandler.create_output_model(obj, filename, Framework.pytorch, PatchPyTorchModelIO.__main_task, + singlefile=True, model_name=model_name) + return ret + + @staticmethod + def _load(original_fn, f, *args, **kwargs): + if isinstance(f, six.string_types): + filename = f + elif hasattr(f, 'name'): + filename = f.name + else: + filename = None + + if not PatchPyTorchModelIO.__main_task: + return original_fn(f, *args, **kwargs) + + # register input model + empty = _Empty() + if running_remotely(): + filename = WeightsFileHandler.restore_weights_file(empty, filename, Framework.pytorch, + PatchPyTorchModelIO.__main_task) + model = original_fn(filename or f, *args, **kwargs) + else: + # try to load model before registering, in case we fail + model = original_fn(filename or f, *args, **kwargs) + WeightsFileHandler.restore_weights_file(empty, filename, Framework.pytorch, + PatchPyTorchModelIO.__main_task) + + if empty.trains_in_model: + try: + model.trains_in_model = empty.trains_in_model + except Exception: + pass + return model diff --git a/trains/utilities/matplotlib_bind.py b/trains/utilities/matplotlib_bind.py new file mode 100644 index 00000000..75a5677d --- /dev/null +++ b/trains/utilities/matplotlib_bind.py @@ -0,0 +1,188 @@ +import sys + +import cv2 +import numpy as np +from six import BytesIO + +from ..config import running_remotely + + +class PatchedMatplotlib: + _patched_original_plot = None + __patched_original_imshow = None + _global_plot_counter = -1 + _global_image_counter = -1 + _current_task = None + + class _PatchWarnings(object): + def __init__(self): + pass + + def warn(self, text, *args, **kwargs): + raise ValueError(text) + + def __getattr__(self, item): + def bypass(*args, **kwargs): + pass + return bypass + + @staticmethod + def patch_matplotlib(): + # only once + if PatchedMatplotlib._patched_original_plot is not None: + return True + # noinspection PyBroadException + try: + # we support matplotlib version 2.0.0 and above + import matplotlib + if int(matplotlib.__version__.split('.')[0]) < 2: + return False + + if running_remotely(): + # disable GUI backend - make headless + sys.modules['matplotlib'].rcParams['backend'] = 'agg' + import matplotlib.pyplot + sys.modules['matplotlib'].pyplot.switch_backend('agg') + import matplotlib.pyplot as plt + import plotly.tools as tls + from matplotlib import _pylab_helpers + PatchedMatplotlib._patched_original_plot = sys.modules['matplotlib'].pyplot.show + PatchedMatplotlib._patched_original_imshow = sys.modules['matplotlib'].pyplot.imshow + sys.modules['matplotlib'].pyplot.show = PatchedMatplotlib.patched_show + # sys.modules['matplotlib'].pyplot.imshow = PatchedMatplotlib.patched_imshow + # patch plotly so we know it failed us. + from plotly.matplotlylib import renderer + renderer.warnings = PatchedMatplotlib._PatchWarnings() + except Exception: + return False + + # patch IPython matplotlib inline mode + # noinspection PyBroadException + try: + if 'IPython' in sys.modules: + from IPython import get_ipython + ip = get_ipython() + if ip and matplotlib.is_interactive(): + ip.events.register('post_execute', PatchedMatplotlib.ipython_post_execute_hook) + except Exception: + pass + + return True + + @staticmethod + def update_current_task(task): + if PatchedMatplotlib.patch_matplotlib(): + PatchedMatplotlib._current_task = task + + @staticmethod + def patched_imshow(*args, **kw): + ret = PatchedMatplotlib._patched_original_imshow(*args, **kw) + PatchedMatplotlib._report_figure(force_save_as_image=True) + return ret + + @staticmethod + def patched_show(*args, **kw): + PatchedMatplotlib._report_figure() + ret = PatchedMatplotlib._patched_original_plot(*args, **kw) + if PatchedMatplotlib._current_task and running_remotely(): + # clear the current plot, because no one else will + # noinspection PyBroadException + try: + if sys.modules['matplotlib'].rcParams['backend'] == 'agg': + import matplotlib.pyplot as plt + plt.clf() + except Exception: + pass + return ret + + @staticmethod + def _report_figure(force_save_as_image=False, stored_figure=None, set_active=True): + if not PatchedMatplotlib._current_task: + return + + # noinspection PyBroadException + try: + import matplotlib.pyplot as plt + import plotly.tools as tls + from plotly import optional_imports + from matplotlib import _pylab_helpers + # store the figure object we just created (if it is not already there) + stored_figure = stored_figure or _pylab_helpers.Gcf.get_active() + if not stored_figure: + # nothing for us to do + return + # get current figure + mpl_fig = stored_figure.canvas.figure # plt.gcf() + # convert to plotly + image = None + plotly_fig = None + if not force_save_as_image: + # noinspection PyBroadException + try: + def our_mpl_to_plotly(fig): + matplotlylib = optional_imports.get_module('plotly.matplotlylib') + if matplotlylib: + renderer = matplotlylib.PlotlyRenderer() + matplotlylib.Exporter(renderer, close_mpl=False).run(fig) + return renderer.plotly_fig + + plotly_fig = our_mpl_to_plotly(mpl_fig) + except Exception: + pass + + # plotly could not serialize the plot, we should convert to image + if not plotly_fig: + plotly_fig = None + buffer_ = BytesIO() + plt.savefig(buffer_, format="png", bbox_inches='tight', pad_inches=0) + buffer_.seek(0) + image = cv2.imdecode(np.frombuffer(buffer_.getbuffer(), dtype=np.uint8), cv2.IMREAD_UNCHANGED) + + # check if we need to restore the active object + if set_active and not _pylab_helpers.Gcf.get_active(): + _pylab_helpers.Gcf.set_active(stored_figure) + + # get the main task + reporter = PatchedMatplotlib._current_task.reporter + if reporter is not None: + if mpl_fig.texts: + plot_title = mpl_fig.texts[0].get_text() + else: + gca = mpl_fig.gca() + plot_title = gca.title.get_text() if gca.title else None + + # remove borders and size, we should let the web take care of that + if plotly_fig: + PatchedMatplotlib._global_plot_counter += 1 + title = plot_title or 'untitled %d' % PatchedMatplotlib._global_plot_counter + plotly_fig.layout.margin = {} + plotly_fig.layout.autosize = True + plotly_fig.layout.height = None + plotly_fig.layout.width = None + # send the plot event + reporter.report_plot(title=title, series='plot', plot=plotly_fig.to_plotly_json(), + iter=PatchedMatplotlib._global_plot_counter if plot_title else 0) + else: + # send the plot as image + PatchedMatplotlib._global_image_counter += 1 + logger = PatchedMatplotlib._current_task.get_logger() + title = plot_title or 'untitled %d' % PatchedMatplotlib._global_image_counter + logger.report_image_and_upload(title=title, series='plot image', matrix=image, + iteration=PatchedMatplotlib._global_image_counter + if plot_title else 0) + except Exception: + # plotly failed + pass + + return + + @staticmethod + def ipython_post_execute_hook(): + # noinspection PyBroadException + try: + from matplotlib import _pylab_helpers + for i, f_mgr in enumerate(_pylab_helpers.Gcf.get_all_fig_managers()): + if not f_mgr.canvas.figure.stale: + PatchedMatplotlib._report_figure(stored_figure=f_mgr) + except Exception: + pass diff --git a/trains/utilities/plotly.py b/trains/utilities/plotly.py new file mode 100644 index 00000000..ef454504 --- /dev/null +++ b/trains/utilities/plotly.py @@ -0,0 +1,353 @@ +import numpy as np +from attr import attrs, attrib + + +def create_2d_histogram_plot(np_row_wise, labels, title=None, xtitle=None, ytitle=None, series=None, xlabels=None, + comment=None): + """ + Create a 2D Plotly histogram chart from a 2D numpy array + :param np_row_wise: 2D numpy data array + :param labels: Histogram labels + :param title: Chart title + :param xtitle: X-Series title + :param ytitle: Y-Series title + :param comment: comment underneath the title + :return: Plotly chart dict + """ + np_row_wise = np.atleast_2d(np_row_wise) + assert len(np_row_wise.shape) == 2, "Expected a 2D numpy array" + # using labels without xlabels leads to original behavior + if labels is not None and xlabels is None: + assert len(labels) == np_row_wise.shape[0], "Please provide a label for each data row" + elif xlabels is None: + fake_label = series or '' + labels = [fake_label] * np_row_wise.shape[0] + elif labels: + if len(labels) == 1: + labels = [labels] * np_row_wise.shape[0] + assert len(xlabels) == np_row_wise.shape[1] + + data = [_np_row_to_plotly_data_item(np_row=np_row_wise[i, :], label=labels[i] if labels else None, xlabels=xlabels) + for i in range(np_row_wise.shape[0])] + return _plotly_hist_dict(title=title, xtitle=xtitle, ytitle=ytitle, data=data, comment=comment) + + +def _to_np_array(value): + if not isinstance(value, np.ndarray): + value = np.array(value) + + return value + + +@attrs +class SeriesInfo(object): + name = attrib(type=str) + data = attrib(type=np.ndarray, converter=_to_np_array) + labels = attrib(default=None) + + @data.validator + def _validate_data(self, _, value): + if value.ndim != 2: + raise ValueError("Expected series data to be 2D numpy array") + + if value.shape[1] != 2: + raise ValueError("Expected series data to have 2 columns") + + def __attrs_post_init__(self): + if (self.labels is not None) and (len(self.labels) != self.data.shape[0]): + raise ValueError( + "If 'labels' is provided, it must be a list or tuple, " + "the same length as the data" + ) + + +def create_line_plot(title, series, xtitle, ytitle, mode='lines', reverse_xaxis=False, comment=None): + plotly_obj = _plotly_scatter_layout_dict( + title=title if not comment else (title + '
' + comment + ''), + xaxis_title=xtitle, + yaxis_title=ytitle, + ) + + if reverse_xaxis: + plotly_obj["layout"]["xaxis"]["autorange"] = "reversed" + + plotly_obj["data"].extend({ + "name": s.name, + "x": s.data[:, 0].tolist(), + "y": s.data[:, 1].tolist(), + "mode": mode, + "text": s.labels, + "type": "scatter", + } for s in series) + + return plotly_obj + + +def create_2d_scatter_series(np_row_wise, title="Scatter", series_name="Series", xtitle="x", ytitle="y", mode="lines", + labels=None, comment=None): + """ + Create a 2D scatter Plotly graph from a 2 column numpy array + :param np_row_wise: 2 column numpy data array [(x0,y0), (x1,y1) ...] + :param title: Chart title + :param series_name: Series name + :param xtitle: X-axis title + :param ytitle: Y-axis title + :param mode: scatter type mode ('lines' / 'markers' / 'lines+markers') + :param labels: label (text) per point on the scatter graph + :param comment: comment underneath the title + :return: Plotly chart dict + :return: + """ + plotly_obj = _plotly_scatter_layout_dict(title=title, xaxis_title=xtitle, yaxis_title=ytitle, comment=comment) + assert np_row_wise.ndim == 2, "Expected a 2D numpy array" + assert np_row_wise.shape[1] == 2, "Expected two columns X/Y e.g. [(x0,y0), (x1,y1) ...]" + + this_scatter_data = { + "name": series_name, + "x": np_row_wise[:, 0].tolist(), + "y": np_row_wise[:, 1].tolist(), + "mode": mode, + "text": labels, + "type": "scatter" + } + plotly_obj["data"].append(this_scatter_data) + return plotly_obj + + +def create_3d_scatter_series(np_row_wise, title="Scatter", series_name="Series", xtitle="x", ytitle="y", ztitle="z", + mode="lines", color=((217, 217, 217, 0.14),), marker_size=5, line_width=0.8, + labels=None, fill_axis=-1, plotly_obj=None): + """ + Create a 3D scatter Plotly graph from a 3 column numpy array + :param np_row_wise: 3 column numpy data array [(x0,y0,z0), (x1,y1,z1) ...] + :param title: Chart title + :param series_name: Series name + :param xtitle: X-axis title + :param ytitle: Y-axis title + :param ztitle: Z-axis title + :param labels: label (text) per point on the scatter graph + :param fill_axis: fill area under the curve + :return: Plotly chart dict + :return: + """ + if not plotly_obj: + plotly_obj = plotly_scatter3d_layout_dict(title=title, xaxis_title=xtitle, yaxis_title=ytitle, zaxis_title=ztitle) + assert np_row_wise.ndim == 2, "Expected a 2D numpy array" + assert np_row_wise.shape[1] == 3, "Expected three columns X/Y/Z e.g. [(x0,y0,z0), (x1,y1,z1) ...]" + + c = color[0] + c = (int(c[0]), int(c[1]), int(c[2]), float(c[3])) + this_scatter_data = { + "name": series_name, + "x": np_row_wise[:, 0].tolist(), + "y": np_row_wise[:, 1].tolist(), + "z": np_row_wise[:, 2].tolist(), + "text": labels, + "type": "scatter3d", + "mode": mode, + 'marker': { + 'size': marker_size, + 'line': { + 'color': 'rgba(%d, %d, %d, %f.2)' % (c[0], c[1], c[2], c[3]), + 'width': line_width + }, + 'opacity': 0.8 + }, + } + plotly_obj["data"].append(this_scatter_data) + return plotly_obj + + +def create_value_matrix(np_value_matrix, title="Heatmap Matrix", xlabels=None, ylabels=None, xtitle="X", ytitle="Y", + custom_colors=True, series=None, comment=None): + conf_matrix_plot = { + "data": [ + { + "x": xlabels, + "y": ylabels, + "z": np_value_matrix.tolist(), + "type": "heatmap" + } + ], + "layout": { + "showlegend": True, + "title": title if not comment else (title + '
' + comment + ''), + + "xaxis": { + "title": xtitle, + }, + "yaxis": { + "title": ytitle + }, + "name": series, + } + } + + if custom_colors: + scale, bar = _get_z_colorbar_data() + conf_matrix_plot["data"][0].update({"colorscale": scale}) + conf_matrix_plot["data"][0].update({"colorbar": bar}) + + return conf_matrix_plot + + +def create_3d_surface(np_value_matrix, title="3D Surface", xlabels=None, ylabels=None, xtitle="X", ytitle="Y", + ztitle="Z", custom_colors=True, series=None, camera=None, comment=None): + conf_matrix_plot = { + "data": [ + { + "z": np_value_matrix.tolist(), + "type": "surface", + "contours": { + "y": { + "show": False, + "highlightcolor": "#fff4ff", + "project": {"y": True} + } + }, + "showscale": False, + } + ], + "layout": { + "scene": { + "xaxis": { + "title": xtitle, + "showgrid": False, + "nticks": 10, + "ticktext": xlabels, + "tickvals": list(range(len(xlabels))) if xlabels else None, + }, + "yaxis": { + "title": ytitle, + "showgrid": False, + "nticks": 10, + "ticktext": ylabels, + "tickvals": list(range(len(ylabels))) if ylabels else ylabels, + }, + "zaxis": { + "title": ztitle, + "nticks": 5, + }, + }, + "showlegend": False, + "title": title if not comment else (title + '
' + comment + ''), + "name": series, + } + } + if camera: + conf_matrix_plot['layout']['scene']['camera'] = {"eye": {"x": camera[0], "y": camera[1], "z": camera[2]}} + + if custom_colors: + scale, bar = _get_z_colorbar_data() + conf_matrix_plot["data"][0].update({"colorscale": scale}) + conf_matrix_plot["data"][0].update({"colorbar": bar}) + + return conf_matrix_plot + + +def _get_z_colorbar_data(z_data=None, values=None, colors=None): + if values is None: + values = [0, 1. / 10, 2. / 10, 6. / 10, 9. / 10] + if colors is None: + colors = [(71, 17, 100), (53, 92, 140), (37, 139, 141), (66, 189, 112), (141, 314, 68), (221, 226, 24)] + if z_data is not None: + data = np.array(z_data) + max_z = data.max() + scaler = max_z + values = [float(v * scaler) for v in values[0:5]] + values.append(1.0) # poltly quirk? + # we do not want to show the first and last value + tickvalues = [" %.3f " % v for v in values[1:]] + tickvalues = [float(v) for v in tickvalues] + # tickvalues.pop() + colorscale = [[v, 'rgb' + str(color)] for v, color in zip(values, colors)] + colorbar = {"tick0": 0, "tickmode": "array", "tickvals": tickvalues} + + return colorscale, colorbar + + +def _plotly_hist_dict(title, xtitle, ytitle, data=None, comment=None): + """ + Create a basic Plotly chart dictionary + :param title: Chart title + :param xtitle: X-Series title + :param ytitle: Y-Series title + :param data: Data items + :type data: list + :return: Plotly chart dict + """ + return { + "data": data or [], + "layout": { + "title": title if not comment else (title + '
' + comment + ''), + "xaxis": { + "title": xtitle + }, + "yaxis": { + "title": ytitle + }, + "barmode": "stack", + "bargap": 0.08, + "bargroupgap": 0 + } + } + + +def _np_row_to_plotly_data_item(np_row, label, xlabels=None): + """ + Convert a numpy data row into a Plotly chart data item + :param np_row: numpy 1D data row + :param label: Item label + :return: Plotly data item dict + """ + bins = list(range(np_row.shape[0])) if xlabels is None else list(xlabels) + # mylabels = ['"' + label + '"'] * len(bins) + this_trace_data = { + "name": label, + "y": np_row.tolist(), + "x": bins, + # "text": mylabels, + "type": "bar" + } + return this_trace_data + + +def _plotly_scatter_layout_dict(title="Scatter", xaxis_title="X", yaxis_title="Y", series=None, comment=None): + return { + "data": [], + "layout": { + "title": title if not comment else (title + '
' + comment + ''), + "xaxis": { + "title": xaxis_title, + "showspikes": True, + "spikethickness": 1, + "spikesnap": "cursor", + "spikemode": "toaxis+across", + }, + "yaxis": { + "title": yaxis_title, + "showspikes": True, + "spikethickness": 1, + "spikesnap": "cursor", + "spikemode": "toaxis+across", + }, + "name": series, + } + } + + +def plotly_scatter3d_layout_dict(title="Scatter", xaxis_title="X", yaxis_title="Y", zaxis_title="Z", + series=None, show_legend=True, comment=None): + return { + "data": [], + "layout": { + "showlegend": show_legend, + "title": title if not comment else (title + '
' + comment + ''), + "scene": { + 'xaxis': {'title': xaxis_title}, + 'yaxis': {'title': yaxis_title}, + 'zaxis': {'title': zaxis_title}, + }, + "name": series, + } + } diff --git a/trains/utilities/py3_interop.py b/trains/utilities/py3_interop.py new file mode 100644 index 00000000..83596a62 --- /dev/null +++ b/trains/utilities/py3_interop.py @@ -0,0 +1,39 @@ +""" Convenience classes supporting python3-like concepts """ +import abc + +import six + + +@six.add_metaclass(abc.ABCMeta) +class AbstractContextManager(object): + """An abstract base class for context managers. Supported in contextlib from python 3.6 and up """ + + def __enter__(self): + """Return `self` upon entering the runtime context.""" + return self + + @abc.abstractmethod + def __exit__(self, exc_type, exc_value, traceback): + """Raise any exception triggered within the runtime context.""" + return None + + @classmethod + def __subclasshook__(cls, C): + if cls is AbstractContextManager: + if (any("__enter__" in B.__dict__ for B in C.__mro__) and any("__exit__" in B.__dict__ for B in C.__mro__)): + return True + return NotImplemented + + +try: + + from abc import abstractclassmethod + +except ImportError: + + class abstractclassmethod(classmethod): + __isabstractmethod__ = True + + def __init__(self, callable): + callable.__isabstractmethod__ = True + super(abstractclassmethod, self).__init__(callable) diff --git a/trains/utilities/seed.py b/trains/utilities/seed.py new file mode 100644 index 00000000..fff6c169 --- /dev/null +++ b/trains/utilities/seed.py @@ -0,0 +1,76 @@ +import sys +import random + +try: + import numpy as np +except Exception: + np = None +try: + import cv2 +except Exception: + cv2 = None + + +def make_deterministic(seed=1337, cudnn_deterministic=False): + """ + Ensure deterministic behavior across PyTorch using the provided random seed. + This function makes sure that torch, numpy and random use the same random seed. + + When using trains's task, call this function using the task's random seed like so: + make_deterministic(task.get_random_seed()) + + :param int seed: Seed number + :param bool cudnn_deterministic: In order to make computations deterministic on your specific platform + and PyTorch release, set this value to True. torch will only allow those CuDNN algorithms that are + (believed to be) deterministic. This can have a performance impact (slower execution) depending on your model. + """ + seed = int(seed) & 0xFFFFFFFF + torch = sys.modules.get("torch") + tf = sys.modules.get("tensorflow") + + if cudnn_deterministic: + try: + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + except Exception: + pass + + random.seed(seed) + + if np is not None: + np.random.seed(seed) + + if cv2 is not None: + try: + cv2.setRNGSeed(seed) + except Exception: + pass + + if torch is not None: + try: + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + except Exception: + pass + + if tf is not None: + # reset graph state + try: + import tensorflow + from tensorflow.python.eager.context import _context + eager_mode_bypass = _context is None + except Exception: + eager_mode_bypass = False + + if not eager_mode_bypass: + try: + tf.set_random_seed(seed) + except Exception: + pass + try: + tf.random.set_random_seed(seed) + except Exception: + pass + + +make_deterministic() diff --git a/trains/version.py b/trains/version.py new file mode 100644 index 00000000..a0129fb6 --- /dev/null +++ b/trains/version.py @@ -0,0 +1 @@ +__version__ = '0.9.0rc2'