From b4cb27b27d98850f5a7be3187b83a38ef619defe Mon Sep 17 00:00:00 2001 From: allegroai Date: Sun, 6 Mar 2022 01:25:56 +0200 Subject: [PATCH 01/19] ClearML-Serving v2 initial working commit --- README.md | 356 ++- clearml_serving/__main__.py | 556 +++-- clearml_serving/engines/__init__.py | 0 clearml_serving/engines/triton/Dockerfile | 22 + clearml_serving/engines/triton/__init__.py | 0 clearml_serving/engines/triton/entrypoint.sh | 18 + .../engines/triton/requirements.txt | 6 + .../engines/triton/triton_helper.py | 508 +++++ clearml_serving/preprocess/example.py | 40 + clearml_serving/service.py | 17 - clearml_serving/serving/Dockerfile | 21 + clearml_serving/serving/__init__.py | 0 clearml_serving/serving/entrypoint.sh | 28 + clearml_serving/serving/main.py | 95 + .../serving/model_request_processor.py | 901 ++++++++ clearml_serving/serving/preprocess_service.py | 356 +++ clearml_serving/serving/requirements.txt | 14 + clearml_serving/serving_service.py | 589 ----- clearml_serving/triton_helper.py | 219 -- clearml_serving/version.py | 2 +- docs/design_diagram.png | Bin 0 -> 340024 bytes ...6565642f4d4e4953545f64696769742e706e67.png | Bin 3225 -> 0 bytes .../client.py | 88 - .../http_triton.py | 1970 ----------------- .../sample_image.webp | Bin 27042 -> 0 bytes examples/keras/preprocess.py | 36 + examples/keras/readme.md | 46 + examples/keras/requirements.txt | 1 + .../{keras_mnist.py => train_keras_mnist.py} | 43 +- examples/lightgbm/preprocess.py | 23 + examples/lightgbm/readme.md | 42 + examples/lightgbm/train_model.py | 22 + examples/pytorch/preprocess.py | 35 + examples/pytorch/readme.md | 49 + examples/pytorch/requirements.txt | 5 + examples/pytorch/train_pytorch_mnist.py | 142 ++ examples/sklearn/preprocess.py | 19 + examples/sklearn/readme.md | 39 + examples/sklearn/train_model.py | 15 + examples/xgboost/preprocess.py | 21 + examples/xgboost/readme.md | 40 + examples/xgboost/train_model.py | 28 + requirements.txt | 2 +- 43 files changed, 3236 insertions(+), 3178 deletions(-) create mode 100644 clearml_serving/engines/__init__.py create mode 100644 clearml_serving/engines/triton/Dockerfile create mode 100644 clearml_serving/engines/triton/__init__.py create mode 100755 clearml_serving/engines/triton/entrypoint.sh create mode 100644 clearml_serving/engines/triton/requirements.txt create mode 100644 clearml_serving/engines/triton/triton_helper.py create mode 100644 clearml_serving/preprocess/example.py delete mode 100644 clearml_serving/service.py create mode 100644 clearml_serving/serving/Dockerfile create mode 100644 clearml_serving/serving/__init__.py create mode 100755 clearml_serving/serving/entrypoint.sh create mode 100644 clearml_serving/serving/main.py create mode 100644 clearml_serving/serving/model_request_processor.py create mode 100644 clearml_serving/serving/preprocess_service.py create mode 100644 clearml_serving/serving/requirements.txt delete mode 100644 clearml_serving/serving_service.py delete mode 100644 clearml_serving/triton_helper.py create mode 100644 docs/design_diagram.png delete mode 100644 examples/clearml_serving_simple_http_inference_request/68747470733a2f2f646174616d61646e6573732e6769746875622e696f2f6173736574732f696d616765732f74665f66696c655f666565642f4d4e4953545f64696769742e706e67.png delete mode 100644 examples/clearml_serving_simple_http_inference_request/client.py delete mode 100644 examples/clearml_serving_simple_http_inference_request/http_triton.py delete mode 100644 examples/clearml_serving_simple_http_inference_request/sample_image.webp create mode 100644 examples/keras/preprocess.py create mode 100644 examples/keras/readme.md rename examples/keras/{keras_mnist.py => train_keras_mnist.py} (75%) create mode 100644 examples/lightgbm/preprocess.py create mode 100644 examples/lightgbm/readme.md create mode 100644 examples/lightgbm/train_model.py create mode 100644 examples/pytorch/preprocess.py create mode 100644 examples/pytorch/readme.md create mode 100644 examples/pytorch/requirements.txt create mode 100644 examples/pytorch/train_pytorch_mnist.py create mode 100644 examples/sklearn/preprocess.py create mode 100644 examples/sklearn/readme.md create mode 100644 examples/sklearn/train_model.py create mode 100644 examples/xgboost/preprocess.py create mode 100644 examples/xgboost/readme.md create mode 100644 examples/xgboost/train_model.py diff --git a/README.md b/README.md index 8a0a36e..f325a4c 100644 --- a/README.md +++ b/README.md @@ -3,9 +3,9 @@ -**ClearML Serving - ML-Ops made easy** +**ClearML Serving - Model deployment made easy** -## **`clearml-serving`
Model-Serving Orchestration and Repository Solution** +## **`clearml-serving`
Model Serving (ML/DL), Orchestration and Repository Made Easy** [![GitHub license](https://img.shields.io/github/license/allegroai/clearml-serving.svg)](https://img.shields.io/github/license/allegroai/clearml-serving.svg) @@ -17,128 +17,276 @@ - - -**`clearml-serving`** is a command line utility for the flexible orchestration of your model deployment. -**`clearml-serving`** can make use of a variety of serving engines (**Nvidia Triton, OpenVino Model Serving, KFServing**) -setting them up for serving wherever you designate a ClearML Agent or on your ClearML Kubernetes cluster +**`clearml-serving`** is a command line utility for model deployment and orchestration. +It enables model deployment including serving and preprocessing code to a Kubernetes cluster or custom container based solution. Features: -* Spin serving engines on your Kubernetes cluster or ClearML Agent machine from CLI -* Full usage & performance metrics integrated with ClearML UI -* Multi-model support in a single serving engine container -* Automatically deploy new model versions -* Support Canary model releases -* Integrates to ClearML Model Repository -* Deploy & upgrade endpoints directly from ClearML UI -* Programmatic interface for endpoint/versions/metric control +* Easy to deploy & configure + * Support Machine Learning Models (Scikit Learn, XGBoost, LightGBM) + * Support Deep Learning Models (Tensorflow, PyTorch, ONNX) + * Customizable RestAPI for serving (i.e. allow per model pre/post-processing for easy integration) +* Flexibility + * On-line model deployment + * On-line endpoint model/version deployment (i.e. no need to take the service down) + * Per model standalone preprocessing and postprocessing python code +* Scalability + * Multi model per container + * Multi models per serving service + * Multi-service support (fully seperated multiple serving service running independently) + * Multi cluster support + * Out-of-the-box node auto-scaling based on load/usage +* Efficiency + * multi-container resource utilization + * Support for CPU & GPU nodes + * Auto-batching for DL models +* Automatic deployment + * Automatic model upgrades w/ canary support + * Programmable API for model deployment +* Canary A/B deployment + * Online Canary updates +* Model Monitoring + * Usage Metric reporting + * Metric Dashboard + * Model performance metric + * Model performance Dashboard + +## ClearML Serving Design + +### ClearML Serving Design Principles + +* Modular +* Scalable +* Flexible +* Customizable +* Open Source + + + +## Installation + +### Concepts + +CLI - Secure configuration interface for on-line model upgrade/deployment on running Serving Services +Serving Service Task - Control plane object storing configuration on all the endpoints. Support multiple separated instance, deployed on multiple clusters. +Inference Services - Inference containers, performing model serving pre/post processing. Also support CPU model inferencing. +Serving Engine Services - Inference engine containers (e.g. Nvidia Triton, TorchServe etc.) used by the Inference Services for heavier model inference. +Statistics Service - Single instance per Serving Service collecting and broadcasting model serving & performance statistics +Time-series DB - Statistics collection service used by the Statistics Service, e.g. Prometheus +Dashboard Service - Customizable dashboard-ing solution on top of the collected statistics, e.g. Grafana + +### prerequisites + +* ClearML-Server : Model repository, Service Health, Control plane +* Kubernetes / Single-instance VM : Deploying containers +* CLI : Configuration & model deployment interface -## Installing ClearML Serving +### Initial Setup 1. Setup your [**ClearML Server**](https://github.com/allegroai/clearml-server) or use the [Free tier Hosting](https://app.community.clear.ml) -2. Connect your ClearML Worker(s) to your **ClearML Server** (see [**ClearML Agent**](https://github.com/allegroai/clearml-agent) / [Kubernetes integration](https://github.com/allegroai/clearml-agent#kubernetes-integration-optional)) -3. Install `clearml-serving` (Note: `clearml-serving` is merely a control utility, it does not require any resources for actual serving) +2. Install the CLI on your laptop `clearml` and `clearml-serving` + - `pip3 install https://github.com/allegroai/clearml-serving.git@dev` + - Make sure to configure your machine to connect to your `clearml-server` see [clearml-init](https://clear.ml/docs/latest/docs/getting_started/ds/ds_first_steps#install-clearml) for details +3. Create the Serving Service Controller + - `clearml-serving create --name "serving example"` + - The new serving service UID should be printed `"New Serving Service created: id=aa11bb22aa11bb22` +4. Write down the Serving Service UID + +### Toy model (scikit learn) deployment example + +1. Train toy scikit-learn model + - create new python virtual environment + - `pip3 install -r examples/sklearn/requirements.txt` + - `python3 examples/sklearn/train_model.py` + - Model was automatically registered and uploaded into the model repository. For Manual model registration see [here](#registering--deploying-new-models-manually) +2. Register the new Model on the Serving Service + - `clearml-serving --id model add --engine sklearn --endpoint "test_model_sklearn" --preprocess "examples/sklearn/preprocess.py" --name "train sklearn model" --project "serving examples"` + - **Notice** the preprocessing python code is packaged and uploaded to the "Serving Service", to be used by any inference container, and downloaded in realtime when updated +3. Spin the Inference Container + - Customize container [Dockerfile](clearml_serving/serving/Dockerfile) if needed + - Build container `docker build --tag clearml-serving-inference:latest -f clearml_serving/serving/Dockerfile .` + - Spin the inference container: `docker run -v ~/clearml.conf:/root/clearml.conf -p 8080:8080 -e CLEARML_SERVING_TASK_ID= -e CLEARML_SERVING_POLL_FREQ=5 clearml-serving-inference:latest` +4. Test new model inference endpoint + - `curl -X POST "http://127.0.0.1:8080/serve/test_model_sklearn" -H "accept: application/json" -H "Content-Type: application/json" -d '{"x0": 1, "x1": 2}'` + +**Notice**, now that we have an inference container running, we can add new model inference endpoints directly with the CLI. The inference container will automatically sync once every 5 minutes. + +**Notice** On the first few requests the inference container needs to download the model file and preprocessing python code, this means the request might take a little longer, once everything is cached, it will return almost immediately. + +**Notes:** +> To review the model repository in the ClearML web UI, under the "serving examples" Project on your ClearML account/server ([free hosted](https://app.clear.ml) or [self-deployed](https://github.com/allegroai/clearml-server)). + +> Inference services status, console outputs and machine metrics are available in the ClearML UI in the Serving Service project (default: "DevOps" project) + +> To learn more on training models and the ClearML model repository, see the [ClearML documentation](https://clear.ml/docs) + + +### Nvidia Triton serving engine setup + +Nvidia Triton Serving Engine is used by clearml-serving to do the heavy lifting of deep-learning models on both GPU & CPU nodes. +Inside the Triton container a clearml controller is spinning and monitoring the Triton server. +All the triton models are automatically downloaded into the triton container in real-time, configured, and served. +A single Triton serving container is serving multiple models, based on the registered models on the Serving Service +Communication from the Inference container to the Triton container is done transparently over compressed gRPC channel. + +#### setup + +Optional: build the Triton container + - Customize container [Dockerfile](clearml_serving/engines/triton/Dockerfile) + - Build container `docker build --tag clearml-serving-triton:latest -f clearml_serving/engines/triton/Dockerfile .` + +Spin the triton engine container: `docker run -v ~/clearml.conf:/root/clearml.conf -p 8001:8001 -e CLEARML_SERVING_TASK_ID= -e CLEARML_TRITON_POLL_FREQ=5 -e CLEARML_TRITON_METRIC_FREQ=1 clearml-serving-triton:latest` + +Configure the "Serving Service" with the new Triton Engine gRPC IP:Port. Notice that when deploying on a Kubernetes cluster this should be a TCP ingest endpoint, to allow for transparent auto-scaling of the Triton Engine Containers + +`clearml-serving --id config --triton-grpc-server :8001` + +Spin the inference service (this is the external RestAPI interface) +`docker run -v ~/clearml.conf:/root/clearml.conf -p 8080:8080 -e CLEARML_SERVING_TASK_ID= -e CLEARML_SERVING_POLL_FREQ=5 clearml-serving-inference:latest` + +Now eny model that will register with "Triton" engine, will run the pre/post processing code on the Inference service container, and the model inference itself will be executed on the Triton Engine container. +See Tensorflow [example](examples/keras/readme.md) and Pytorch [example](examples/pytorch/readme.md) for further details. + + +### Container Configuration Variables + +When spinning the Inference container or the Triton Engine container, +we need to specify the `clearml-server` address and access credentials +One way of achieving that is by mounting the `clearml.conf` file into the container's HOME folder (i.e. `-v ~/clearml.conf:/root/clearml.conf`) +We can also pass environment variables instead (see [details](https://clear.ml/docs/latest/docs/configs/env_vars#server-connection): ```bash -pip install clearml-serving +CLEARML_API_HOST="https://api.clear.ml" +CLEARML_WEB_HOST="https://app.clear.ml" +CLEARML_FILES_HOST="https://files.clear.ml" +CLEARML_API_ACCESS_KEY="access_key_here" +CLEARML_API_SECRET_KEY="secret_key_here" ``` -## Using ClearML Serving +To access models stored on an S3 buckets, Google Storage or Azure blob storage (notice that with GS you also need to make sure the access json is available inside the containers). See further details on configuring the storage access [here](https://clear.ml/docs/latest/docs/integrations/storage#configuring-storage) -Clearml-Serving will automatically serve *published* models from your ClearML model repository, so the first step is getting a model into your ClearML model repository. -Background: When using `clearml` in your training code, any model stored by your python code is automatically registered (and, optionally, uploaded) to the model repository. This auto-magic logging is key for continuous model deployment. -To learn more on training models and the ClearML model repository, see the [ClearML documentation](https://clear.ml/docs/latest/docs/) - -### Training a toy model with Keras (about 2 minutes on a laptop) - -The main goal of `clearml-serving` is to seamlessly integrate with the development process and the model repository. -This is achieved by combining ClearML's auto-magic logging which creates and uploads models directly from -the python training code, with accessing these models as they are automatically added into the model repository using the ClearML Server's REST API and its pythonic interface. -Let's demonstrate this seamless integration by training a toy Keras model to classify images based on the MNIST dataset. -Once we have a trained model in the model repository we will serve it using `clearml-serving`. - -We'll also see how we can retrain another version of the model, and have the model serving engine automatically upgrade to the new model version. - -#### Keras mnist toy train example (single epoch mock training): - -1. install `tensorflow` (and of course `cleamrl`) - ```bash - pip install "tensorflow>2" clearml - ``` - -2. Execute the training code - ```bash - cd examples/keras - python keras_mnist.py - ``` - **Notice:** The only required integration code with `clearml` are the following two lines: - ```python - from clearml import Task - task = Task.init(project_name="examples", task_name="Keras MNIST serve example", output_uri=True) - ``` - This call will make sure all outputs are automatically logged to the ClearML Server, this includes: console, Tensorboard, cmdline arguments, git repo etc. - It also means any model stored by the code will be automatically uploaded and logged in the ClearML model repository. - - -3. Review the models in the ClearML web UI: - Go to the "Projects" section of your ClearML server ([free hosted](https://app.community.clear.ml) or [self-deployed](https://github.com/allegroai/clearml-server)). - in the "examples" project, go to the Models tab (model repository). - We should have a model named "Keras MNIST serve example - serving_model". - Once a model-serving service is available, Right-clicking on the model and selecting "Publish" will trigger upgrading the model on the serving engine container. - -Next we will spin the Serving Service and the serving-engine - -### Serving your models - -In order to serve your models, `clearml-serving` will spawn a serving service which stores multiple endpoints and their configuration, -collects metric reports, and updates models when new versions are published in the model repository. -In addition, a serving engine is launched, which is the container actually running the inference engine. -(Currently supported engines are Nvidia-Triton, coming soon are Intel OpenVIno serving-engine and KFServing) - -Now that we have a published model in the ClearML model repository, we can spin a serving service and a serving engine. - -Starting a Serving Service: - -1. Create a new serving instance. - This is the control plane Task, we will see all its configuration logs and metrics in the "serving" project. We can have multiple serving services running in the same system. - In this example we will make use of Nvidia-Triton engines. ```bash -clearml-serving triton --project "serving" --name "serving example" -``` -2. Add models to the serving engine with specific endpoints. -Reminder: to view your model repository, login to your ClearML account, - go to "examples" project and review the "Models" Tab -```bash -clearml-serving triton --endpoint "keras_mnist" --model-project "examples" --model-name "Keras MNIST serve example - serving_model" +AWS_ACCESS_KEY_ID +AWS_SECRET_ACCESS_KEY +AWS_DEFAULT_REGION + +GOOGLE_APPLICATION_CREDENTIALS + +AZURE_STORAGE_ACCOUNT +AZURE_STORAGE_KEY ``` -3. Launch the serving service. - The service will be launched on your "services" queue, which by default runs services on the ClearML server machine. - (Read more on services queue [here](https://clear.ml/docs/latest/docs/clearml_agent#services-mode)) - We set our serving-engine to launch on the "default" queue, +### Registering & Deploying new models manually + +Uploading an existing model file into the model repository can be done via the `clearml` RestAPI, the python interface, or with the `clearml-serving` CLI + +> To learn more on training models and the ClearML model repository, see the [ClearML documentation](https://clear.ml/docs) + +- local model file on our laptop: 'examples/sklearn/sklearn-model.pkl' +- Upload the model file to the `clearml-server` file storage and register it +`clearml-serving --id model upload --name "manual sklearn model" --project "serving examples" --framework "scikit-learn" --path examples/sklearn/sklearn-model.pkl` +- We now have a new Model in the "serving examples" project, by the name of "manual sklearn model". The CLI output prints the UID of the newly created model, we will use it to register a new endpoint +- In the `clearml` web UI we can see the new model listed under the `Models` tab in the associated project. we can also download the model file itself directly from the web UI +- Register a new endpoint with the new model +`clearml-serving --id model add --engine sklearn --endpoint "test_model_sklearn" --preprocess "examples/sklearn/preprocess.py" --model-id ` + +**Notice** we can also provide a differnt storage destination for the model, such as S3/GS/Azure, by passing +`--destination="s3://bucket/folder"`, `gs://bucket/folder`, `azure://bucket/folder`. Yhere is no need to provide a unique path tp the destination argument, the location of the model will be a unique path based on the serving service ID and the model name + + +### Automatic model deployment + +The clearml Serving Service support automatic model deployment and upgrades, directly connected with the model repository and API. When the model auto-deploy is configured, a new model versions will be automatically deployed when you "publish" or "tag" a new model in the `clearml` model repository. This automation interface allows for simpler CI/CD model deployment process, as a single API automatically deploy (or remove) a model from the Serving Service. + +#### automatic model deployment example + +1. Configure the model auto-update on the Serving Service +- `clearml-serving --id model auto-update --engine sklearn --endpoint "test_model_sklearn_auto" --preprocess "preprocess.py" --name "train sklearn model" --project "serving examples" --max-versions 2` +2. Deploy the Inference container (if not already deployed) +3. Publish a new model the model repository +- Go to the "serving examples" project in the ClearML web UI, click on the Models Tab, search for "train sklearn model" right click and select "Publish" +- Use the RestAPI [details](https://clear.ml/docs/latest/docs/references/api/models#post-modelspublish_many) +- Use Python interface: +```python +from clearml import Model +Model(model_id="unique_model_id_here").publish() +``` +4. The new model is available on a new endpoint version (1), test with: +`curl -X POST "http://127.0.0.1:8080/serve/test_model_sklearn_auto/1" -H "accept: application/json" -H "Content-Type: application/json" -d '{"x0": 1, "x1": 2}' + +### Canary endpoint setup + +Canary endpoint deployment add a new endpoint where the actual request is sent to a preconfigured set of endpoints with pre-provided distribution. For example, let's create a new endpoint "test_model_sklearn_canary", we can provide a list of endpoints and probabilities (weights). + ```bash -clearml-serving launch --queue default +clearml-serving --id model canary --endpoint "test_model_sklearn_canary" --weights 0.1 0.9 --input-endpoints test_model_sklearn/2 test_model_sklearn/1 +``` +This means that any request coming to `/test_model_sklearn_canary/` will be routed with probability of 90% to +`/test_model_sklearn/1/` and with probability of 10% to `/test_model_sklearn/2/` + +**Note:** +> As with any other Serving Service configuration, we can configure the Canary endpoint while the Inference containers are already running and deployed, they will get updated in their next update cycle (default: once every 5 minutes) + +We Can also prepare a "fixed" canary endpoint, always splitting the load between the last two deployed models: +```bash +clearml-serving --id model canary --endpoint "test_model_sklearn_canary" --weights 0.1 0.9 --input-endpoints-prefix test_model_sklearn/ ``` -4. Optional: If you do not have a machine connected to your ClearML cluster, either read more on our Kubernetes integration, or spin a bare-metal worker and connect it with your ClearML Server. - `clearml-serving` is leveraging the orchestration capabilities of `ClearML` to launch the serving engine on the cluster. - Read more on the [ClearML Agent](https://github.com/allegroai/clearml-agent) orchestration module [here](https://clear.ml/docs/latest/docs/clearml_agent) - If you have not yet setup a ClearML worker connected to your `clearml` account, you can do this now using: - ```bash - pip install clearml-agent - clearml-agent daemon --docker --queue default --detached - ``` +This means that is we have two model inference endpoints: `/test_model_sklearn/1/`, `/test_model_sklearn/2/` +the 10% probability (weight 0.1) will match the last (order by version number) endpoint, i.e. `/test_model_sklearn/2/` and the 90% will match `/test_model_sklearn/2/` +When we add a new model endpoint version, e.g. `/test_model_sklearn/3/`, the canary distribution will automatically match the 90% probability to `/test_model_sklearn/2/` and the 10% to the new endpoint `/test_model_sklearn/3/` + +Example: +1. Add two endpoints: + - `clearml-serving --id model add --engine sklearn --endpoint "test_model_sklearn" --preprocess "examples/sklearn/preprocess.py" --name "train sklearn model" --version 1 --project "serving examples"` + - `clearml-serving --id model add --engine sklearn --endpoint "test_model_sklearn" --preprocess "examples/sklearn/preprocess.py" --name "train sklearn model" --version 2 --project "serving examples"` +2. Add Canary endpoint: + - `clearml-serving --id model canary --endpoint "test_model_sklearn_canary" --weights 0.1 0.9 --input-endpoints test_model_sklearn/2 test_model_sklearn/1` +3. Test Canary endpoint: + - `curl -X POST "http://127.0.0.1:8080/serve/test_model" -H "accept: application/json" -H "Content-Type: application/json" -d '{"x0": 1, "x1": 2}'` -**We are done!** -To test the new served model, you can `curl` to the new endpoint: -```bash -curl :8000/v2/models/keras_mnist/versions/1 -``` +### Model inference Examples -**Notice**: If we re-run our keras training example and publish a new model in the repository, the engine will automatically update to the new model. +- Scikit-Learn [example](examples/sklearn/readme.md) - random data +- XGBoost [example](examples/xgboost/readme.md) - iris dataset +- LightGBM [example](examples/lightgbm/readme.md) - iris dataset +- PyTorch [example](examples/pytorch/readme.md) - mnist dataset +- TensorFlow/Keras [example](examples/keras/readme.md) - mnist dataset -Further reading on advanced topics [here](coming-soon) +### Status + + - [x] FastAPI integration for inference service + - [x] multi-process Gunicorn for inference service + - [x] Dynamic preprocess python code loading (no need for container/process restart) + - [x] Model files download/caching (http/s3/gs/azure) + - [x] Scikit-learn. XGBoost, LightGBM integration + - [x] Custom inference, including dynamic code loading + - [x] Manual model upload/registration to model repository (http/s3/gs/azure) + - [x] Canary load balancing + - [x] Auto model endpoint deployment based on model repository state + - [x] Machine/Node health metrics + - [x] Dynamic online configuration + - [x] CLI configuration tool + - [x] Nvidia Triton integration + - [x] GZip request compression + - [ ] TorchServe engine integration + - [ ] Prebuilt Docker containers (dockerhub) + - [x] Scikit-Learn example + - [x] XGBoost example + - [x] LightGBM example + - [x] PyTorch example + - [x] TensorFlow/Keras example + - [ ] Model ensemble example + - [ ] Model pipeline example + - [ ] Statistics Service + - [ ] Kafka install instructions + - [ ] Prometheus install instructions + - [ ] Grafana install instructions + - [ ] Kubernetes Helm Chart + +## Contributing + +**PRs are always welcomed** :heart: See more details in the ClearML [Guidelines for Contributing](https://github.com/allegroai/clearml/blob/master/docs/contributing.md). diff --git a/clearml_serving/__main__.py b/clearml_serving/__main__.py index 7dc608c..af41569 100644 --- a/clearml_serving/__main__.py +++ b/clearml_serving/__main__.py @@ -1,179 +1,441 @@ import json -import os -from argparse import ArgumentParser, FileType +import os.path +from argparse import ArgumentParser +from pathlib import Path -from .serving_service import ServingService +from clearml_serving.serving.model_request_processor import ModelRequestProcessor, CanaryEP +from clearml_serving.serving.preprocess_service import ModelMonitoring, ModelEndpoint + +verbosity = False -def restore_state(args): - session_state_file = os.path.expanduser('~/.clearml_serving.json') - # noinspection PyBroadException - try: - with open(session_state_file, 'rt') as f: - state = json.load(f) - except Exception: - state = {} - # store command line passed ID - args.cmd_id = getattr(args, 'id', None) - # restore ID from state - args.id = getattr(args, 'id', None) or state.get('id') - return args - - -def store_state(args, clear=False): - session_state_file = os.path.expanduser('~/.clearml_serving.json') - if clear: - state = {} - else: - state = {str(k): str(v) if v is not None else None - for k, v in args.__dict__.items() if not str(k).startswith('_') and k not in ('command', )} - # noinspection PyBroadException - try: - with open(session_state_file, 'wt') as f: - json.dump(state, f, sort_keys=True) - except Exception: - pass - - -def cmd_triton(args): - if not args.id and not args.name: - raise ValueError("Serving service must have a name, use --name ") - - if args.cmd_id or (args.id and not args.name): - a_serving = ServingService(task_id=args.cmd_id or args.id) - else: - a_serving = ServingService(task_project=args.project, task_name=args.name, engine_type='triton') - args.id = a_serving.get_id() - - if args.endpoint: - print("Nvidia Triton Engine ID: {} - Adding serving endpoint: \n".format(args.id) + - ("model-project: '{}', model-name: '{}', model-tags: '{}', config-file: '{}'".format( - args.model_project or '', - args.model_name or '', - args.model_tags or '', - args.config or '') if not args.model_id else - "model-id: '{}', config-file: '{}'".format(args.model_id or '', args.config or ''))) - - if not args.endpoint and (args.model_project or args.model_tags or args.model_id or args.model_name): - raise ValueError("Serving endpoint must be provided, add --endpoint ") - - if args.endpoint: - a_serving.add_model_serving( - serving_url=args.endpoint, - model_project=args.model_project, - model_name=args.model_name, - model_tags=args.model_tags, - model_ids=[args.model_id] if args.model_id else None, - config_file=args.config, - max_versions=args.versions, - ) - - a_serving.serialize(force=True) - store_state(args) - - -def cmd_launch(args): - print('Launching Serving Engine: service: {}, queue: {}'.format(args.id, args.queue)) - +def func_model_upload(args): + if not args.path and not args.url: + raise ValueError("Either --path or --url must be specified") + if args.path and args.url: + raise ValueError("Either --path or --url but not both") + if args.path and not os.path.exists(args.path): + raise ValueError("--path='{}' could not be found".format(args.path)) if not args.id: - raise ValueError("Serving service must specify serving service ID, use --id ") - - a_serving = ServingService(task_id=args.id) - - if a_serving.get_engine_type() not in ('triton',): - raise ValueError("Error, serving engine type \'{}\' is not supported".format(a_serving.get_engine_type())) - - # launch services queue - a_serving.launch(queue_name=args.service_queue) - # launch engine - a_serving.launch_engine( - queue_name=args.queue, - container=args.engine_container or None, - container_args=args.engine_container_args or None, - ) + raise ValueError("Serving Service ID must be provided, use --id ") + from clearml import Task, OutputModel + from clearml.backend_interface.util import get_or_create_project + # todo: make it look nice + t = Task.get_task(task_id=args.id) + print("Creating new Model name='{}' project='{}' tags={}".format(args.name, args.project, args.tags or "")) + model = OutputModel(task=t, name=args.name, tags=args.tags or None, framework=args.framework) + destination = args.destination or t.get_output_destination() or t.get_logger().get_default_upload_destination() + model.set_upload_destination(uri=destination) + if args.path: + print("Uploading model file \'{}\' to {}".format(args.path, destination)) + else: + print("Registering model file \'{}\'".format(args.url)) + model.update_weights(weights_filename=args.path, register_uri=args.url, auto_delete_file=False) + if args.project: + # noinspection PyProtectedMember + model._base_model.update( + project_id=get_or_create_project(session=t.session, project_name=args.project) + ) + print("Model created and registered, new Model ID={}".format(model.id)) + if args.publish: + model.publish() + print("Published Model ID={}".format(model.id)) -def cli(verbosity): +def func_model_ls(args): + request_processor = ModelRequestProcessor(task_id=args.id) + print("List model serving and endpoints, control task id={}".format(request_processor.get_id())) + request_processor.deserialize(skip_sync=True) + print("Endpoints:\n{}".format(json.dumps(request_processor.get_endpoints(), indent=2))) + print("Model Monitoring:\n{}".format(json.dumps(request_processor.get_model_monitoring(), indent=2))) + print("Canary:\n{}".format(json.dumps(request_processor.get_canary_endpoints(), indent=2))) + + +def func_create_service(args): + request_processor = ModelRequestProcessor( + force_create=True, name=args.name, project=args.project, tags=args.tags or None) + print("New Serving Service created: id={}".format(request_processor.get_id())) + + +def func_config_service(args): + request_processor = ModelRequestProcessor(task_id=args.id) + print("Configure serving service id={}".format(request_processor.get_id())) + request_processor.deserialize(skip_sync=True) + if args.base_serving_url: + print("Configuring serving service [id={}] base_serving_url={}".format( + request_processor.get_id(), args.base_serving_url)) + request_processor.configure(external_serving_base_url=args.base_serving_url) + if args.triton_grpc_server: + print("Configuring serving service [id={}] triton_grpc_server={}".format( + request_processor.get_id(), args.triton_grpc_server)) + request_processor.configure(external_triton_grpc_server=args.triton_grpc_server) + + +def func_list_services(_): + running_services = ModelRequestProcessor.list_control_plane_tasks() + print("Currently running Serving Services:\n") + if not running_services: + print("No running services found") + else: + for s in running_services: + print(s) + + +def func_model_remove(args): + request_processor = ModelRequestProcessor(task_id=args.id) + print("Serving service Task {}, Removing Model endpoint={}".format(request_processor.get_id(), args.endpoint)) + request_processor.deserialize(skip_sync=True) + if request_processor.remove_endpoint(endpoint_url=args.endpoint): + print("Removing static endpoint: {}".format(args.endpoint)) + elif request_processor.remove_model_monitoring(model_base_url=args.endpoint): + print("Removing model monitoring endpoint: {}".format(args.endpoint)) + elif request_processor.remove_canary_endpoint(endpoint_url=args.endpoint): + print("Removing model canary endpoint: {}".format(args.endpoint)) + else: + print("Error: Could not find base endpoint URL: {}".format(args.endpoint)) + return + print("Updating serving service") + request_processor.serialize() + + +def func_canary_add(args): + request_processor = ModelRequestProcessor(task_id=args.id) + print("Serving service Task {}, Adding canary endpoint \'/{}/\'".format( + request_processor.get_id(), args.endpoint)) + request_processor.deserialize(skip_sync=True) + if not request_processor.add_canary_endpoint( + canary=CanaryEP( + endpoint=args.endpoint, + weights=args.weights, + load_endpoints=args.input_endpoints, + load_endpoint_prefix=args.input_endpoint_prefix, + ) + ): + print("Error: Could not add canary endpoint URL: {}".format(args.endpoint)) + return + + print("Updating serving service") + request_processor.serialize() + + +def func_model_auto_update_add(args): + request_processor = ModelRequestProcessor(task_id=args.id) + print("Serving service Task {}, Adding Model monitoring endpoint: \'/{}/\'".format( + request_processor.get_id(), args.endpoint)) + + if args.aux_config: + if len(args.aux_config) == 1 and Path(args.aux_config[0]).exists(): + aux_config = Path(args.aux_config[0]).read_text() + else: + from clearml.utilities.pyhocon import ConfigFactory + aux_config = ConfigFactory.parse_string('\n'.join(args.aux_config)).as_plain_ordered_dict() + else: + aux_config = None + + request_processor.deserialize(skip_sync=True) + if not request_processor.add_model_monitoring( + ModelMonitoring( + base_serving_url=args.endpoint, + engine_type=args.engine, + monitor_project=args.project, + monitor_name=args.name, + monitor_tags=args.tags or None, + only_published=args.published, + max_versions=args.max_versions, + input_size=args.input_size, + input_type=args.input_type, + input_name=args.input_name, + output_size=args.output_size, + output_type=args.output_type, + output_name=args.output_name, + auxiliary_cfg=aux_config, + ), + preprocess_code=args.preprocess + ): + print("Error: Could not find base endpoint URL: {}".format(args.endpoint)) + print("Updating serving service") + request_processor.serialize() + + +def func_model_endpoint_add(args): + request_processor = ModelRequestProcessor(task_id=args.id) + print("Serving service Task {}, Adding Model endpoint \'/{}/\'".format( + request_processor.get_id(), args.endpoint)) + request_processor.deserialize(skip_sync=True) + + if args.aux_config: + if len(args.aux_config) == 1 and Path(args.aux_config[0]).exists(): + aux_config = Path(args.aux_config[0]).read_text() + else: + from clearml.utilities.pyhocon import ConfigFactory + aux_config = ConfigFactory.parse_string('\n'.join(args.aux_config)).as_plain_ordered_dict() + else: + aux_config = None + + if not request_processor.add_endpoint( + ModelEndpoint( + engine_type=args.engine, + serving_url=args.endpoint, + version=args.version, + model_id=args.model_id, + input_size=args.input_size, + input_type=args.input_type, + input_name=args.input_name, + output_size=args.output_size, + output_type=args.output_type, + output_name=args.output_name, + auxiliary_cfg=aux_config, + ), + preprocess_code=args.preprocess, + model_name=args.name, + model_project=args.project, + model_tags=args.tags or None, + model_published=args.published, + ): + print("Error: Could not find base endpoint URL: {}".format(args.endpoint)) + print("Updating serving service") + request_processor.serialize() + + +def cli(): title = 'clearml-serving - CLI for launching ClearML serving engine' print(title) parser = ArgumentParser(prog='clearml-serving', description=title) parser.add_argument('--debug', action='store_true', help='Print debug messages') + parser.add_argument( + '--id', type=str, + help='Control plane Task ID to configure ' + '(if not provided automatically detect the running control plane Task)') subparsers = parser.add_subparsers(help='Serving engine commands', dest='command') - # create the launch command - parser_launch = subparsers.add_parser('launch', help='Launch a previously configured serving service') - parser_launch.add_argument( - '--id', default=None, type=str, - help='Specify a previously configured service ID, if not provided use the last created service') - parser_launch.add_argument( - '--queue', default=None, type=str, required=True, - help='Specify the clearml queue to be used for the serving engine server') - parser_launch.add_argument( - '--engine-container', default=None, type=str, required=False, - help='Specify the serving engine container to use.') - parser_launch.add_argument( - '--engine-container-args', default=None, type=str, required=False, - help='Specify the serving engine container execution arguments (single string). ' - 'Notice: this will override any default container arguments') - parser_launch.add_argument( - '--service-queue', default='services', type=str, - help='Specify the service queue to be used for the serving service, default: services queue') - parser_launch.set_defaults(func=cmd_launch) + parser_list = subparsers.add_parser('list', help='List running Serving Service') + parser_list.set_defaults(func=func_list_services) - # create the parser for the "triton" command - parser_trt = subparsers.add_parser('triton', help='Nvidia Triton Serving Engine') - parser_trt.add_argument( - '--id', default=None, type=str, - help='Add configuration to running serving session, pass serving Task ID, ' - 'if passed ignore --name / --project') - parser_trt.add_argument( - '--name', default=None, type=str, - help='Give serving service a name, should be a unique name') - parser_trt.add_argument( - '--project', default='DevOps', type=str, - help='Serving service project name, default: DevOps') - parser_trt.add_argument( - '--endpoint', required=False, type=str, - help='Serving endpoint, one per model, unique ') - parser_trt.add_argument( - '--versions', type=int, - help='Serving endpoint, support multiple versions, ' - 'max versions to deploy (version number always increase). Default (no versioning).') - parser_trt.add_argument( - '--config', required=False, type=FileType('r'), - help='Model `config.pbtxt` file, one per model, order matching with models') - parser_trt.add_argument( + parser_create = subparsers.add_parser('create', help='Create a new Serving Service') + parser_create.add_argument( + '--name', type=str, + help='[Optional] name the new serving service. Default: Serving-Service') + parser_create.add_argument( + '--tags', type=str, nargs='+', + help='[Optional] Specify tags for the new serving service') + parser_create.add_argument( + '--project', type=str, + help='[Optional] Specify project for the serving service. Default: DevOps') + parser_create.set_defaults(func=func_create_service) + + parser_config = subparsers.add_parser('config', help='Configure a new Serving Service') + parser_config.add_argument( + '--base-serving-url', type=str, + help='External base serving service url. example: http://127.0.0.1:8080/serve') + parser_config.add_argument( + '--triton-grpc-server', type=str, + help='External ClearML-Triton serving container gRPC address. example: 127.0.0.1:9001') + parser_config.set_defaults(func=func_config_service) + + parser_model = subparsers.add_parser('model', help='Configure Model endpoints for an already running Service') + parser_model.set_defaults(func=parser_model.print_help) + + model_cmd = parser_model.add_subparsers(help='model command help') + + parser_model_ls = model_cmd.add_parser('list', help='List current models') + parser_model_ls.set_defaults(func=func_model_ls) + + parser_model_rm = model_cmd.add_parser('remove', help='Remove model by it`s endpoint name') + parser_model_rm.add_argument( + '--endpoint', type=str, help='model endpoint name') + parser_model_rm.set_defaults(func=func_model_remove) + + parser_model_upload = model_cmd.add_parser('upload', help='Upload and register model files/folder') + parser_model_upload.add_argument( + '--name', type=str, required=True, + help='Specifying the model name to be registered in') + parser_model_upload.add_argument( + '--tags', type=str, nargs='+', + help='Optional: Add tags to the newly created model') + parser_model_upload.add_argument( + '--project', type=str, required=True, + help='Specifying the project for the model tp be registered in') + parser_model_upload.add_argument( + '--framework', type=str, choices=("scikit-learn", "xgboost", "lightgbm", "tensorflow", "pytorch"), + help='[Optional] Specify the model framework: "scikit-learn", "xgboost", "lightgbm", "tensorflow", "pytorch"') + parser_model_upload.add_argument( + '--publish', action='store_true', + help='[Optional] Publish the newly created model ' + '(change model state to "published" i.e. locked and ready to deploy') + parser_model_upload.add_argument( + '--path', type=str, + help='Specifying a model file/folder to be uploaded and registered/') + parser_model_upload.add_argument( + '--url', type=str, + help='Optional, Specifying an already uploaded model url ' + '(e.g. s3://bucket/model.bin, gs://bucket/model.bin, azure://bucket/model.bin, ' + 'https://domain/model.bin)') + parser_model_upload.add_argument( + '--destination', type=str, + help='Optional, Specifying the target destination for the model to be uploaded' + '(e.g. s3://bucket/folder/, gs://bucket/folder/, azure://bucket/folder/)') + parser_model_upload.set_defaults(func=func_model_upload) + + parser_model_lb = model_cmd.add_parser('canary', help='Add model Canary/A/B endpoint') + parser_model_lb.add_argument( + '--endpoint', type=str, help='model canary serving endpoint name (e.g. my_model/latest)') + parser_model_lb.add_argument( + '--weights', type=float, nargs='+', help='model canary weights (order matching model ep), (e.g. 0.2 0.8)') + parser_model_lb.add_argument( + '--input-endpoints', type=str, nargs='+', + help='Model endpoint prefixes, can also include version (e.g. my_model, my_model/v1)') + parser_model_lb.add_argument( + '--input-endpoint-prefix', type=str, + help='Model endpoint prefix, lexicographic order or by version (e.g. my_model/1, my_model/v1) ' + 'where the first weight matches the last version.') + parser_model_lb.set_defaults(func=func_canary_add) + + parser_model_monitor = model_cmd.add_parser('auto-update', help='Add/Modify model auto update service') + parser_model_monitor.add_argument( + '--endpoint', type=str, + help='Base Model endpoint (must be unique)') + parser_model_monitor.add_argument( + '--engine', type=str, required=True, + help='Model endpoint serving engine (triton, sklearn, xgboost, lightgbm)') + parser_model_monitor.add_argument( + '--max-versions', type=int, default=1, + help='max versions to store (and create endpoints) for the model. highest number is the latest version') + parser_model_monitor.add_argument( + '--name', type=str, + help='Specify Model Name to be selected and auto updated ' + '(notice regexp selection use \"$name^\" for exact match)') + parser_model_monitor.add_argument( + '--tags', type=str, nargs='+', + help='Specify Tags to be selected and auto updated') + parser_model_monitor.add_argument( + '--project', type=str, + help='Specify Model Project to be selected and auto updated') + parser_model_monitor.add_argument( + '--published', action='store_true', + help='Only select published Model for the auto updated') + parser_model_monitor.add_argument( + '--preprocess', type=str, + help='Specify Pre/Post processing code to be used with the model (point to local file / folder) ' + '- this should hold for all the models' + ) + parser_model_monitor.add_argument( + '--input-size', type=int, nargs='+', + help='Optional: Specify the model matrix input size [Rows x Columns X Channels etc ...]' + ) + parser_model_monitor.add_argument( + '--input-type', type=str, + help='Optional: Specify the model matrix input type, examples: uint8, float32, int16, float16 etc.' + ) + parser_model_monitor.add_argument( + '--input-name', type=str, + help='Optional: Specify the model layer pushing input into, examples: layer_0' + ) + parser_model_monitor.add_argument( + '--output-size', type=int, nargs='+', + help='Optional: Specify the model matrix output size [Rows x Columns X Channels etc ...]' + ) + parser_model_monitor.add_argument( + '--output_type', type=str, + help='Optional: Specify the model matrix output type, examples: uint8, float32, int16, float16 etc.' + ) + parser_model_monitor.add_argument( + '--output-name', type=str, + help='Optional: Specify the model layer pulling results from, examples: layer_99' + ) + parser_model_monitor.add_argument( + '--aux-config', type=int, nargs='+', + help='Specify additional engine specific auxiliary configuration in the form of key=value. ' + 'Example: platform=onnxruntime_onnx response_cache.enable=true max_batch_size=8 ' + 'Notice: you can also pass full configuration file (e.g. Triton "config.pbtxt")' + ) + parser_model_monitor.set_defaults(func=func_model_auto_update_add) + + parser_model_add = model_cmd.add_parser('add', help='Add/Update model') + parser_model_add.add_argument( + '--engine', type=str, required=True, + help='Model endpoint serving engine (triton, sklearn, xgboost, lightgbm)') + parser_model_add.add_argument( + '--endpoint', type=str, required=True, + help='Model endpoint (must be unique)') + parser_model_add.add_argument( + '--version', type=str, default=None, + help='Model endpoint version (default: None)') + parser_model_add.add_argument( '--model-id', type=str, - help='(Optional) Model ID to deploy, if passed model-project/model-name/model-tags are ignored') - parser_trt.add_argument( - '--model-project', type=str, help='Automatic model deployment and upgrade, select model project (exact match)') - parser_trt.add_argument( - '--model-name', type=str, help='Automatic model deployment and upgrade, select model name (exact match)') - parser_trt.add_argument( - '--model-tags', nargs='*', type=str, - help='Automatic model deployment and upgrade, select model name tags to include, ' - 'model has to have all tags to be deployed/upgraded') - parser_trt.set_defaults(func=cmd_triton) + help='Specify a Model ID to be served') + parser_model_add.add_argument( + '--preprocess', type=str, + help='Specify Pre/Post processing code to be used with the model (point to local file / folder)' + ) + parser_model_add.add_argument( + '--input-size', type=int, nargs='+', + help='Optional: Specify the model matrix input size [Rows x Columns X Channels etc ...]' + ) + parser_model_add.add_argument( + '--input-type', type=str, + help='Optional: Specify the model matrix input type, examples: uint8, float32, int16, float16 etc.' + ) + parser_model_add.add_argument( + '--input-name', type=str, + help='Optional: Specify the model layer pushing input into, examples: layer_0' + ) + parser_model_add.add_argument( + '--output-size', type=int, nargs='+', + help='Optional: Specify the model matrix output size [Rows x Columns X Channels etc ...]' + ) + parser_model_add.add_argument( + '--output-type', type=str, + help='Specify the model matrix output type, examples: uint8, float32, int16, float16 etc.' + ) + parser_model_add.add_argument( + '--output-name', type=str, + help='Optional: Specify the model layer pulling results from, examples: layer_99' + ) + parser_model_add.add_argument( + '--aux-config', type=int, nargs='+', + help='Specify additional engine specific auxiliary configuration in the form of key=value. ' + 'Example: platform=onnxruntime_onnx response_cache.enable=true max_batch_size=8 ' + 'Notice: you can also pass full configuration file (e.g. Triton "config.pbtxt")' + ) + parser_model_add.add_argument( + '--name', type=str, + help='[Optional] Instead of specifying model-id select based on Model Name') + parser_model_add.add_argument( + '--tags', type=str, nargs='+', + help='[Optional] Instead of specifying model-id select based on Model Tags') + parser_model_add.add_argument( + '--project', type=str, + help='[Optional] Instead of specifying model-id select based on Model project') + parser_model_add.add_argument( + '--published', action='store_true', + help='[Optional] Instead of specifying model-id select based on Model published') + parser_model_add.set_defaults(func=func_model_endpoint_add) args = parser.parse_args() - verbosity['debug'] = args.debug - args = restore_state(args) + global verbosity + verbosity = args.debug if args.command: - args.func(args) + if args.command not in ("create", "list") and not args.id: + print("Notice! serving service ID not provided, selecting the first active service") + + try: + args.func(args) + except AttributeError: + args.func() else: parser.print_help() def main(): - verbosity = dict(debug=False) + global verbosity try: - cli(verbosity) + cli() except KeyboardInterrupt: print('\nUser aborted') except Exception as ex: print('\nError: {}'.format(ex)) - if verbosity.get('debug'): + if verbosity: raise ex exit(1) diff --git a/clearml_serving/engines/__init__.py b/clearml_serving/engines/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/clearml_serving/engines/triton/Dockerfile b/clearml_serving/engines/triton/Dockerfile new file mode 100644 index 0000000..162940c --- /dev/null +++ b/clearml_serving/engines/triton/Dockerfile @@ -0,0 +1,22 @@ + +FROM nvcr.io/nvidia/tritonserver:22.02-py3 + + +ENV LC_ALL=C.UTF-8 + +# install base package +RUN pip3 install clearml-serving + +# get latest execution code from the git repository +# RUN cd $HOME && git clone https://github.com/allegroai/clearml-serving.git +COPY clearml_serving /root/clearml/clearml_serving + +RUN pip3 install -r /root/clearml/clearml_serving/engines/triton/requirements.txt + +# default serving port +EXPOSE 8001 + +# environement variable to load Task from CLEARML_SERVING_TASK_ID, CLEARML_SERVING_PORT + +WORKDIR /root/clearml/ +ENTRYPOINT ["clearml_serving/engines/triton/entrypoint.sh"] diff --git a/clearml_serving/engines/triton/__init__.py b/clearml_serving/engines/triton/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/clearml_serving/engines/triton/entrypoint.sh b/clearml_serving/engines/triton/entrypoint.sh new file mode 100755 index 0000000..a896525 --- /dev/null +++ b/clearml_serving/engines/triton/entrypoint.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +# print configuration +echo CLEARML_SERVING_TASK_ID="$CLEARML_SERVING_TASK_ID" +echo CLEARML_TRITON_POLL_FREQ="$CLEARML_TRITON_POLL_FREQ" +echo CLEARML_TRITON_METRIC_FREQ="$CLEARML_TRITON_METRIC_FREQ" +echo CLEARML_TRITON_HELPER_ARGS="$CLEARML_TRITON_HELPER_ARGS" +echo EXTRA_PYTHON_PACKAGES="$EXTRA_PYTHON_PACKAGES" + +# we should also have clearml-server configurations + +if [ ! -z "$EXTRA_PYTHON_PACKAGES" ] +then + python3 -m pip install $EXTRA_PYTHON_PACKAGES +fi + +# start service +PYTHONPATH=$(pwd) python3 clearml_serving/engines/triton/triton_helper.py $CLEARML_TRITON_HELPER_ARGS $@ diff --git a/clearml_serving/engines/triton/requirements.txt b/clearml_serving/engines/triton/requirements.txt new file mode 100644 index 0000000..aec1e2f --- /dev/null +++ b/clearml_serving/engines/triton/requirements.txt @@ -0,0 +1,6 @@ +clearml >= 1.1.6 +clearml-serving +tritonclient +grpcio +Pillow +pathlib2 \ No newline at end of file diff --git a/clearml_serving/engines/triton/triton_helper.py b/clearml_serving/engines/triton/triton_helper.py new file mode 100644 index 0000000..5371a19 --- /dev/null +++ b/clearml_serving/engines/triton/triton_helper.py @@ -0,0 +1,508 @@ +import os +import re +import shutil +import subprocess +import numpy as np +from argparse import ArgumentParser +from time import time +from typing import Optional + +from pathlib2 import Path + +from clearml import Task, Logger, InputModel +from clearml.backend_api.utils import get_http_session_with_retry +from clearml_serving.serving.model_request_processor import ModelRequestProcessor, ModelEndpoint +from clearml.utilities.pyhocon import ConfigFactory, ConfigTree, HOCONConverter + + +class TritonHelper(object): + _metric_line_parsing = r"(\w+){(gpu_uuid=\"[\w\W]*\",)?model=\"(\w+)\",\s*version=\"(\d+)\"}\s*([0-9.]*)" + _default_metrics_port = 8002 + + def __init__( + self, + args, # Any + task, # type: Task + serving_id, # type: str + metric_host=None, # type: Optional[str] + metric_port=None, # type: int + ): + # type: (...) -> None + self._http_session = get_http_session_with_retry() + self.args = dict(**args.__dict__) if args else {} + self.task = task + self._serving_service_task_id = serving_id + self._serving_service_task = None # type: Optional[ModelRequestProcessor] + self._current_endpoints = {} + self.metric_host = metric_host or '0.0.0.0' + self.metric_port = metric_port or self._default_metrics_port + self._parse_metric = re.compile(self._metric_line_parsing) + self._timestamp = time() + self._last_update_step = None + print('String Triton Helper service\n{}\n'.format(self.args)) + + def report_metrics(self, remote_logger): + # type: (Optional[Logger]) -> bool + # iterations are seconds from start + iteration = int(time() - self._timestamp) + + report_msg = "reporting metrics: relative time {} sec".format(iteration) + self.task.get_logger().report_text(report_msg) + if remote_logger: + remote_logger.report_text(report_msg, print_console=False) + + # noinspection PyBroadException + try: + # this is inside the container + request = self._http_session.get('http://{}:{}/metrics'.format(self.metric_host, self.metric_port)) # noqa + if not request.ok: + return False + content = request.content.decode().split('\n') + except Exception: + return False + + for line in content: + line = line.strip() + if not line or line.startswith('#'): + continue + # noinspection PyBroadException + try: + metric, gpu_uuid, variant, version, value = self._parse_metric.match(line).groups() + value = float(value) + except Exception: + continue + self.task.get_logger().report_scalar( + title=metric, + series='{}.v{}'.format(variant, version), + iteration=iteration, + value=value + ) + # on the remote logger we add our own Task ID (unique ID), + # to support multiple servers reporting to the same service controller + if remote_logger: + remote_logger.report_scalar( + title=metric, + series='{}.v{}.{}'.format(variant, version, self.task.id), + iteration=iteration, + value=value + ) + + def model_service_update_step(self, model_repository_folder=None, verbose=True): + # type: (Optional[str], bool) -> bool + + if not self._serving_service_task: + return False + + active_endpoints = self._serving_service_task.get_synced_endpoints() + + self._last_update_step = time() + + # nothing to do + if self._current_endpoints == active_endpoints: + return False + + if not model_repository_folder: + model_repository_folder = '/models/' + + if verbose: + print('Updating local model folder: {}'.format(model_repository_folder)) + + for url, endpoint in active_endpoints.items(): + + # skip if there is no change + if url in self._current_endpoints and self._current_endpoints.get(url) == endpoint: + continue + + # skip if this is not a triton engine endpoint: + if endpoint.engine_type != "triton": + continue + + url = url.replace("/", "_") + + folder = Path(model_repository_folder) / url + folder.mkdir(parents=True, exist_ok=True) + + config_pbtxt = folder / 'config.pbtxt' + # download model versions + version = 1 + model_id = endpoint.model_id + + model_folder = folder / str(version) + + model_folder.mkdir(parents=True, exist_ok=True) + model = None + # noinspection PyBroadException + try: + model = InputModel(model_id) + local_path = model.get_local_copy() + except Exception: + local_path = None + if not local_path: + print("Error retrieving model ID {} []".format(model_id, model.url if model else '')) + continue + + local_path = Path(local_path) + + # prepare config.pbtxt + self.create_config_pbtxt( + endpoint, target_pbtxt_file=config_pbtxt.as_posix(), platform=model.framework + ) + + if verbose: + print('Update model v{} in {}'.format(version, model_folder)) + + # if this is a folder copy every and delete the temp folder + if local_path.is_dir() and model and ( + str(model.framework).lower().startswith("tensorflow") or + str(model.framework).lower().startswith("keras") + ): + # we assume we have a `tensorflow.savedmodel` folder + model_folder /= 'model.savedmodel' + model_folder.mkdir(parents=True, exist_ok=True) + # rename to old + old_folder = None + if model_folder.exists(): + old_folder = model_folder.parent / '.old.{}'.format(model_folder.name) + model_folder.replace(old_folder) + if verbose: + print('copy model into {}'.format(model_folder)) + shutil.copytree( + local_path.as_posix(), model_folder.as_posix(), symlinks=False, + ) + if old_folder: + shutil.rmtree(path=old_folder.as_posix()) + # delete temp folder + shutil.rmtree(local_path.as_posix()) + else: + # single file should be moved + if model and str(model.framework).lower().startswith("pytorch"): + target_path = model_folder / "model.pt" + else: + target_path = model_folder / local_path.name + + old_file = None + if target_path.exists(): + old_file = target_path.parent / '.old.{}'.format(target_path.name) + target_path.replace(old_file) + shutil.move(local_path.as_posix(), target_path.as_posix()) + if old_file: + old_file.unlink() + + # todo: trigger triton model reloading (instead of relaying on current poll mechanism) + # based on the model endpoint changes + + # update current state + self._current_endpoints = active_endpoints + + return True + + def maintenance_daemon( + self, + local_model_repo='/models', # type: str + update_frequency_sec=60.0, # type: float + metric_frequency_sec=60.0 # type: float + ): + # type: (...) -> None + + Path(local_model_repo).mkdir(parents=True, exist_ok=True) + + self._serving_service_task = ModelRequestProcessor(task_id=self._serving_service_task_id) + self.model_service_update_step(model_repository_folder=local_model_repo, verbose=True) + + # noinspection PyProtectedMember + remote_logger = self._serving_service_task._task.get_logger() + + # todo: log triton server outputs when running locally + + # we assume we can run the triton server + cmd = [ + 'tritonserver', + '--model-control-mode=poll', + '--model-repository={}'.format(local_model_repo), + '--repository-poll-secs={}'.format(update_frequency_sec), + '--metrics-port={}'.format(self._default_metrics_port), + '--allow-metrics=true', + '--allow-gpu-metrics=true', + ] + for k, v in self.args.items(): + if not v or not str(k).startswith('t_'): + continue + cmd.append('--{}={}'.format(k, v)) + + print('Starting server: {}'.format(cmd)) + try: + proc = subprocess.Popen(cmd) + except FileNotFoundError: + raise ValueError( + "Triton Server Engine (tritonserver) could not be found!\n" + "Verify you running inside the `nvcr.io/nvidia/tritonserver` docker container") + base_freq = min(update_frequency_sec, metric_frequency_sec) + metric_tic = update_tic = time() + while True: + try: + error_code = proc.wait(timeout=base_freq) + if error_code == 0: + print("triton-server process ended with error code {}".format(error_code)) + return + raise ValueError("triton-server process ended with error code {}".format(error_code)) + except subprocess.TimeoutExpired: + pass + pass + + # update models + if time() - update_tic > update_frequency_sec: + print("Info: syncing models from main serving service") + if self.model_service_update_step(model_repository_folder=local_model_repo, verbose=True): + print("Info: Models updated from main serving service") + update_tic = time() + + # update stats + if time() - metric_tic > metric_frequency_sec: + metric_tic = time() + self.report_metrics(remote_logger) + + @classmethod + def create_config_pbtxt(cls, endpoint, target_pbtxt_file, platform=None): + # type: (ModelEndpoint, str, Optional[str]) -> bool + """ + Full spec available here: + https://github.com/triton-inference-server/server/blob/main/docs/model_configuration.md + """ + def _convert_lists(config): + if isinstance(config, list): + return [_convert_lists(i) for i in config] + + if not isinstance(config, ConfigTree): + return config + + for k in list(config.keys()): + v = config[k] + # try to convert to list + if isinstance(v, (ConfigTree, list)): + # noinspection PyBroadException + try: + a_list = config.get_list(k, []) + if a_list: + config[k] = _convert_lists(a_list) + continue + except Exception: + pass + + config[k] = _convert_lists(v) + + return config + + final_config_pbtxt = "" + config_dict = dict() + + if endpoint.auxiliary_cfg and isinstance(endpoint.auxiliary_cfg, str): + final_config_pbtxt = endpoint.auxiliary_cfg + "\n" + elif endpoint.auxiliary_cfg and isinstance(endpoint.auxiliary_cfg, dict): + config_dict = dict(**endpoint.auxiliary_cfg) + + config_dict = ConfigFactory.from_dict(config_dict) + + # The framework for the model. Possible values are: + # "tensorrt_plan", "tensorflow_graphdef", + # "tensorflow_savedmodel", "onnxruntime_onnx", + # "pytorch_libtorch". + # Default for TF: "tensorflow_savedmodel" + + # replace ": [{" with ": [{" (currently not needed) + # pattern = re.compile(r"(?P\w+)(?P\s+)(?P(\[)|({))") + + if endpoint.input_size: + config_dict.put("input.0.dims", endpoint.input_size) + + if endpoint.output_size: + config_dict.put("output.0.dims", endpoint.output_size) + + input_type = None + if endpoint.input_type: + input_type = "TYPE_" + cls.np_to_triton_dtype(np.dtype(endpoint.input_type)) + config_dict.put("input.0.data_type", input_type) + + output_type = None + if endpoint.output_type: + output_type = "TYPE_" + cls.np_to_triton_dtype(np.dtype(endpoint.output_type)) + config_dict.put("output.0.data_type", output_type) + + if endpoint.input_name: + config_dict.put("input.0.name", endpoint.input_name) + + if endpoint.output_name: + config_dict.put("output.0.name", endpoint.output_name) + + if platform and not config_dict.get("platform", None) and not config_dict.get("backend", None): + platform = str(platform).lower() + if platform.startswith("tensorflow") or platform.startswith("keras"): + config_dict["platform"] = "tensorflow_savedmodel" + elif platform.startswith("pytorch") or platform.startswith("caffe"): + config_dict["backend"] = "pytorch" + elif platform.startswith("onnx"): + config_dict["platform"] = "onnxruntime_onnx" + + # convert to lists anything that we can: + if config_dict: + config_dict = _convert_lists(config_dict) + # Convert HOCON standard to predefined message format + config_pbtxt = "\n" + HOCONConverter.to_hocon(config_dict).\ + replace("=", ":").replace(" : ", ": ") + # conform types (remove string quotes) + if input_type: + config_pbtxt = config_pbtxt.replace(f"\"{input_type}\"", f"{input_type}") + if output_type: + config_pbtxt = config_pbtxt.replace(f"\"{output_type}\"", f"{output_type}") + # conform types (remove string quotes) + config_pbtxt = config_pbtxt.replace("\"KIND_CPU\"", "KIND_CPU").replace("\"KIND_GPU\"", "KIND_GPU") + else: + config_pbtxt = "" + + # merge the two + final_config_pbtxt += config_pbtxt + print("INFO: target config.pbtxt file for endpoint '{}':\n{}\n".format( + endpoint.serving_url, final_config_pbtxt)) + + with open(target_pbtxt_file, "w") as config_file: + config_file.write(final_config_pbtxt) + + return True + + @staticmethod + def np_to_triton_dtype(np_dtype): + # type (np.dtype) -> str + """ + copied from tritonclientutils import np_to_triton_dtype + """ + if np_dtype == bool: + return "BOOL" + elif np_dtype == np.int8: + return "INT8" + elif np_dtype == np.int16: + return "INT16" + elif np_dtype == np.int32: + return "INT32" + elif np_dtype == np.int64: + return "INT64" + elif np_dtype == np.uint8: + return "UINT8" + elif np_dtype == np.uint16: + return "UINT16" + elif np_dtype == np.uint32: + return "UINT32" + elif np_dtype == np.uint64: + return "UINT64" + elif np_dtype == np.float16: + return "FP16" + elif np_dtype == np.float32: + return "FP32" + elif np_dtype == np.float64: + return "FP64" + elif np_dtype == np.object_ or np_dtype.type == np.bytes_: + return "BYTES" + return None + + +def main(): + title = 'clearml-serving - Nvidia Triton Engine Controller' + print(title) + parser = ArgumentParser(prog='clearml-serving', description=title) + parser.add_argument( + '--serving-id', default=os.environ.get('CLEARML_SERVING_TASK_ID'), type=str, + help='Specify main serving service Task ID') + parser.add_argument( + '--project', default='serving', type=str, + help='Optional specify project for the serving engine Task') + parser.add_argument( + '--name', default='nvidia-triton', type=str, + help='Optional specify task name for the serving engine Task') + parser.add_argument( + '--update-frequency', default=os.environ.get('CLEARML_TRITON_POLL_FREQ') or 10., type=float, + help='Model update frequency in minutes') + parser.add_argument( + '--metric-frequency', default=os.environ.get('CLEARML_TRITON_METRIC_FREQ') or 1., type=float, + help='Metric reporting update frequency in minutes') + parser.add_argument( + '--inference-task-id', default=None, type=str, + help='Optional: Specify the inference Task ID to report to. default: create a new one') + parser.add_argument( + '--t-http-port', type=str, help=' The port for the server to listen on for HTTP requests') + parser.add_argument( + '--t-http-thread-count', type=str, help=' Number of threads handling HTTP requests') + parser.add_argument( + '--t-allow-grpc', type=str, help=' Allow the server to listen for GRPC requests') + parser.add_argument( + '--t-grpc-port', type=str, help=' The port for the server to listen on for GRPC requests') + parser.add_argument( + '--t-grpc-infer-allocation-pool-size', type=str, + help=' The maximum number of inference request/response objects that remain ' + 'allocated for reuse. As long as the number of in-flight requests doesn\'t exceed ' + 'this value there will be no allocation/deallocation of request/response objects') + parser.add_argument( + '--t-pinned-memory-pool-byte-size', type=str, + help=' The total byte size that can be allocated as pinned system ' + 'memory. If GPU support is enabled, the server will allocate pinned ' + 'system memory to accelerate data transfer between host and devices ' + 'until it exceeds the specified byte size. This option will not affect ' + 'the allocation conducted by the backend frameworks. Default is 256 MB') + parser.add_argument( + '--t-cuda-memory-pool-byte-size', type=str, + help='<:> The total byte size that can be allocated as CUDA memory for ' + 'the GPU device. If GPU support is enabled, the server will allocate ' + 'CUDA memory to minimize data transfer between host and devices ' + 'until it exceeds the specified byte size. This option will not affect ' + 'the allocation conducted by the backend frameworks. The argument ' + 'should be 2 integers separated by colons in the format :. This option can be used multiple times, but only ' + 'once per GPU device. Subsequent uses will overwrite previous uses for ' + 'the same GPU device. Default is 64 MB') + parser.add_argument( + '--t-min-supported-compute-capability', type=str, + help=' The minimum supported CUDA compute capability. GPUs that ' + 'don\'t support this compute capability will not be used by the server') + parser.add_argument( + '--t-buffer-manager-thread-count', type=str, + help=' The number of threads used to accelerate copies and other' + 'operations required to manage input and output tensor contents.' + 'Default is 0') + + args = parser.parse_args() + + # check Args OS overrides + prefix = "CLEARML_TRITON_" + for k, v in os.environ.items(): + if not k.startswith(prefix): + continue + args_var = k.replace(prefix, "", 1).replace("-", "_").lower() + if args_var in args.__dict__: + # casting + t = type(getattr(args, args_var, None)) + setattr(args, args_var, type(t)(v) if t is not None else v) + + task = Task.init( + project_name=args.project, task_name=args.name, task_type=Task.TaskTypes.inference, + continue_last_task=args.inference_task_id or None + ) + print("configuration args: {}".format(args)) + helper = TritonHelper(args, task, serving_id=args.serving_id) + + # safe casting + try: + update_frequency_sec = float(args.update_frequency) * 60.0 + except (ValueError, TypeError): + update_frequency_sec = 600 + try: + metric_frequency_sec = float(args.metric_frequency) * 60.0 + except (ValueError, TypeError): + metric_frequency_sec = 60 + + # this function will never return + helper.maintenance_daemon( + local_model_repo='/models', + update_frequency_sec=update_frequency_sec, + metric_frequency_sec=metric_frequency_sec, + ) + + +if __name__ == '__main__': + main() diff --git a/clearml_serving/preprocess/example.py b/clearml_serving/preprocess/example.py new file mode 100644 index 0000000..b09cfdd --- /dev/null +++ b/clearml_serving/preprocess/example.py @@ -0,0 +1,40 @@ +from typing import Any, Optional + +import numpy as np + + +# Notice Preprocess class Must be named "Preprocess" +class Preprocess(object): + serving_config = None + # example: { + # 'base_serving_url': 'http://127.0.0.1:8080/serve/', + # 'triton_grpc_server': '127.0.0.1:9001', + # }" + + def __init__(self): + # set internal state, this will be called only once. (i.e. not per request) + pass + + def load(self, local_file_name: str) -> Optional[Any]: + """ + Optional, provide loading method for the model + useful if we need to load a model in a specific way for the prediction engine to work + :param local_file_name: file name / path to read load the model from + :return: Object that will be called with .predict() method for inference + """ + pass + + def preprocess(self, body: dict) -> Any: + # do something with the request data, return any type of object. + # The returned object will be passed as is to the inference engine + return body + + def postprocess(self, data: Any) -> dict: + # post process the data returned from the model inference engine + # returned dict will be passed back as the request result as is. + return data + + def process(self, data: Any) -> Any: + # do something with the actual data, return any type of object. + # The returned object will be passed as is to the postprocess function engine + return data diff --git a/clearml_serving/service.py b/clearml_serving/service.py deleted file mode 100644 index 89de65f..0000000 --- a/clearml_serving/service.py +++ /dev/null @@ -1,17 +0,0 @@ -from time import sleep -from clearml import Task -from clearml_serving.serving_service import ServingService - - -def main(): - # we should only be running in remotely by an agent - task = Task.init() - serving = ServingService(task=task) - while True: - serving.update() - serving.stats() - sleep(60.) - - -if __name__ == '__main__': - main() diff --git a/clearml_serving/serving/Dockerfile b/clearml_serving/serving/Dockerfile new file mode 100644 index 0000000..7d6c8c7 --- /dev/null +++ b/clearml_serving/serving/Dockerfile @@ -0,0 +1,21 @@ +FROM python:3.9-bullseye + + +ENV LC_ALL=C.UTF-8 + +# install base package +RUN pip3 install clearml-serving + +# get latest execution code from the git repository +# RUN cd $HOME && git clone https://github.com/allegroai/clearml-serving.git +COPY clearml_serving /root/clearml/clearml_serving + +RUN pip3 install -r /root/clearml/clearml_serving/serving/requirements.txt + +# default serving port +EXPOSE 8080 + +# environement variable to load Task from CLEARML_SERVING_TASK_ID, CLEARML_SERVING_PORT + +WORKDIR /root/clearml/ +ENTRYPOINT ["clearml_serving/serving/entrypoint.sh"] diff --git a/clearml_serving/serving/__init__.py b/clearml_serving/serving/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/clearml_serving/serving/entrypoint.sh b/clearml_serving/serving/entrypoint.sh new file mode 100755 index 0000000..84e63fa --- /dev/null +++ b/clearml_serving/serving/entrypoint.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +# print configuration +echo CLEARML_SERVING_TASK_ID="$CLEARML_SERVING_TASK_ID" +echo CLEARML_SERVING_PORT="$CLEARML_SERVING_PORT" +echo EXTRA_PYTHON_PACKAGES="$EXTRA_PYTHON_PACKAGES" +echo CLEARML_SERVING_NUM_PROCESS="$CLEARML_SERVING_NUM_PROCESS" +echo CLEARML_SERVING_POLL_FREQ="$CLEARML_SERVING_POLL_FREQ" + +GUNICORN_NUM_PROCESS="${CLEARML_SERVING_NUM_PROCESS:-4}" +GUNICORN_SERVING_PORT="${CLEARML_SERVING_PORT:-8080}" + +echo GUNICORN_NUM_PROCESS="$GUNICORN_NUM_PROCESS" +echo GUNICORN_SERVING_PORT="$GUNICORN_SERVING_PORT" + +# we should also have clearml-server configurations + +if [ ! -z "$EXTRA_PYTHON_PACKAGES" ] +then + python3 -m pip install $EXTRA_PYTHON_PACKAGES +fi + +# start service +PYTHONPATH=$(pwd) python3 -m gunicorn \ + --preload clearml_serving.serving.main:app \ + --workers $GUNICORN_NUM_PROCESS \ + --worker-class uvicorn.workers.UvicornWorker \ + --bind 0.0.0.0:$GUNICORN_SERVING_PORT diff --git a/clearml_serving/serving/main.py b/clearml_serving/serving/main.py new file mode 100644 index 0000000..96f8127 --- /dev/null +++ b/clearml_serving/serving/main.py @@ -0,0 +1,95 @@ +import os +from multiprocessing import Lock +import gzip + +from fastapi import FastAPI, Request, Response, APIRouter, HTTPException +from fastapi.routing import APIRoute + +from typing import Optional, Dict, Any, Callable + +from clearml import Task +from clearml_serving.version import __version__ +from clearml_serving.serving.model_request_processor import ModelRequestProcessor +from clearml_serving.serving.preprocess_service import BasePreprocessRequest + + +class GzipRequest(Request): + async def body(self) -> bytes: + if not hasattr(self, "_body"): + body = await super().body() + if "gzip" in self.headers.getlist("Content-Encoding"): + body = gzip.decompress(body) + self._body = body + return self._body + + +class GzipRoute(APIRoute): + def get_route_handler(self) -> Callable: + original_route_handler = super().get_route_handler() + + async def custom_route_handler(request: Request) -> Response: + request = GzipRequest(request.scope, request.receive) + return await original_route_handler(request) + + return custom_route_handler + + +# process Lock, so that we can have only a single process doing the model reloading at a time +singleton_sync_lock = Lock() + +serving_service_task_id = os.environ.get("CLEARML_SERVING_TASK_ID", None) +model_sync_frequency_secs = 5 +try: + model_sync_frequency_secs = float(os.environ.get("CLEARML_SERVING_POLL_FREQ", model_sync_frequency_secs)) +except (ValueError, TypeError): + pass + +# get the serving controller task +# noinspection PyProtectedMember +serving_task = ModelRequestProcessor._get_control_plane_task(task_id=serving_service_task_id) +# create a new serving instance (for visibility and monitoring) +instance_task = Task.init( + project_name=serving_task.get_project_name(), + task_name="{} - serve instance".format(serving_task.name), + task_type="inference", +) +processor = None # type: Optional[ModelRequestProcessor] +# preload modules into memory before forking +BasePreprocessRequest.load_modules() +# start FastAPI app +app = FastAPI(title="ClearML Serving Service", version=__version__, description="ClearML Service Service router") + + +@app.on_event("startup") +async def startup_event(): + global processor + print("Starting up ModelRequestProcessor [pid={}] [service_id={}]".format(os.getpid(), serving_service_task_id)) + processor = ModelRequestProcessor( + task_id=serving_service_task_id, update_lock_guard=singleton_sync_lock, + ) + print("ModelRequestProcessor [id={}] loaded".format(processor.get_id())) + processor.launch(poll_frequency_sec=model_sync_frequency_secs*60) + + +router = APIRouter( + prefix="/serve", + tags=["models"], + responses={404: {"description": "Model Serving Endpoint Not found"}}, + route_class=GzipRoute, # mark-out to remove support for GZip content encoding +) + + +@router.post("/{model_id}") +async def serve_model(model_id: str, version: Optional[str] = None, request: Dict[Any, Any] = None): + try: + return_value = processor.process_request( + base_url=model_id, + version=version, + request_body=request + ) + except Exception as ex: + raise HTTPException(status_code=404, detail="Error processing request: {}".format(ex)) + return return_value + + +app.include_router(router) diff --git a/clearml_serving/serving/model_request_processor.py b/clearml_serving/serving/model_request_processor.py new file mode 100644 index 0000000..ef25347 --- /dev/null +++ b/clearml_serving/serving/model_request_processor.py @@ -0,0 +1,901 @@ +import json +import os +from pathlib import Path +from time import sleep +from typing import Optional, Union, Dict, List +import itertools +import threading +from multiprocessing import Lock +from numpy.random import choice + +from attr import attrib, attrs, asdict +from clearml import Task, Model +from clearml.storage.util import hash_dict +from .preprocess_service import ModelEndpoint, ModelMonitoring, BasePreprocessRequest + + +@attrs +class CanaryEP(object): + endpoint = attrib(type=str) # load balancer endpoint + weights = attrib(type=list) # list of weights (order should be matching fixed_endpoints or prefix) + load_endpoints = attrib(type=list, default=[]) # list of endpoints to balance and route + load_endpoint_prefix = attrib( + type=str, default=None) # endpoint prefix to list + # (any endpoint starting with this prefix will be listed, sorted lexicographically, or broken into /) + + def as_dict(self, remove_null_entries=False): + if not remove_null_entries: + return asdict(self) + return {k: v for k, v in asdict(self).items() if v is not None} + + +class FastWriteCounter(object): + def __init__(self): + self._counter_inc = itertools.count() + self._counter_dec = itertools.count() + + def inc(self): + next(self._counter_inc) + + def dec(self): + next(self._counter_dec) + + def value(self): + return next(self._counter_inc) - next(self._counter_dec) + + +class ModelRequestProcessor(object): + _system_tag = 'serving-control-plane' + + def __init__( + self, + task_id: Optional[str] = None, + update_lock_guard: Optional[Lock] = None, + name: Optional[str] = None, + project: Optional[str] = None, + tags: Optional[List[str]] = None, + force_create: bool = False, + ) -> None: + """ + :param task_id: Optional specify existing Task ID of the ServingService + :param update_lock_guard: If provided use external (usually multi-process) lock guard for updates + :param name: Optional name current serving service + :param project: Optional select project for the current serving service + :param tags: Optional add tags to the serving service + :param force_create: force_create if provided, ignore task_id and create a new serving Task + """ + self._task = self._create_task(name=name, project=project, tags=tags) \ + if force_create else self._get_control_plane_task(task_id=task_id, name=name, project=project, tags=tags) + self._endpoints = dict() # type: Dict[str, ModelEndpoint] + self._model_monitoring = dict() # type: Dict[str, ModelMonitoring] + self._model_monitoring_versions = dict() # type: Dict[str, Dict[int, str]] + self._model_monitoring_endpoints = dict() # type: Dict[str, ModelEndpoint] + self._model_monitoring_update_request = True + # Dict[base_serve_url, Dict[version, model_id]] + self._canary_endpoints = dict() # type: Dict[str, CanaryEP] + self._canary_route = dict() # type: Dict[str, dict] + self._engine_processor_lookup = dict() # type: Dict[str, BasePreprocessRequest] + self._last_update_hash = None + self._sync_daemon_thread = None + # this is used for Fast locking mechanisms (so we do not actually need to use Locks) + self._update_lock_flag = False + self._request_processing_state = FastWriteCounter() + self._update_lock_guard = update_lock_guard or threading.Lock() + # serving server config + self._configuration = {} + self._instance_task = None + + def process_request(self, base_url: str, version: str, request_body: dict) -> dict: + """ + Process request coming in, + Raise Value error if url does not match existing endpoints + """ + self._request_processing_state.inc() + # check if we need to stall + if self._update_lock_flag: + self._request_processing_state.dec() + while self._update_lock_flag: + sleep(1) + # retry to process + return self.process_request(base_url=base_url, version=version, request_body=request_body) + try: + # normalize url and version + url = self._normalize_endpoint_url(base_url, version) + + # check canary + canary_url = self._process_canary(base_url=url) + if canary_url: + url = canary_url + + ep = self._endpoints.get(url, None) or self._model_monitoring_endpoints.get(url, None) + if not ep: + raise ValueError("Model inference endpoint '{}' not found".format(url)) + + processor = self._engine_processor_lookup.get(url) + if not processor: + processor_cls = BasePreprocessRequest.get_engine_cls(ep.engine_type) + processor = processor_cls( + model_endpoint=ep, task=self._task, server_config=dict(**self._configuration) + ) + self._engine_processor_lookup[url] = processor + + return_value = self._process_request(processor=processor, url=url, body=request_body) + except Exception: + self._request_processing_state.dec() + raise + + return return_value + + def _process_canary(self, base_url: str) -> Optional[dict]: + canary = self._canary_route.get(base_url) + if not canary: + return None + # random choice + draw = choice(canary['endpoints'], 1, p=canary['weights']) + # the new endpoint to use + return draw[0] + + def configure( + self, + external_serving_base_url: Optional[str] = None, + external_triton_grpc_server: Optional[str] = None, + ): + """ + Set ModelRequestProcessor configuration arguments. + + :param external_serving_base_url: Set the external base http endpoint for the serving service + This URL will be passed to user custom preprocess class, + allowing it to concatenate and combine multiple model requests into one + :param external_triton_grpc_server: set the external grpc tcp port of the Nvidia Triton clearml container. + Used by the clearml triton engine class to send inference requests + """ + if external_serving_base_url is not None: + self._task.set_parameter( + name="General/serving_base_url", + value=str(external_serving_base_url), + value_type="str", + description="external base http endpoint for the serving service" + ) + if external_triton_grpc_server is not None: + self._task.set_parameter( + name="General/triton_grpc_server", + value=str(external_triton_grpc_server), + value_type="str", + description="external grpc tcp port of the Nvidia Triton ClearML container running" + ) + + def add_endpoint( + self, + endpoint: Union[ModelEndpoint, dict], + preprocess_code: Optional[str] = None, + model_name: Optional[str] = None, + model_project: Optional[str] = None, + model_tags: Optional[List[str]] = None, + model_published: Optional[bool] = None, + ) -> str: + """ + Return the unique name of the endpoint (endpoint + version) + Overwrite existing endpoint if already exists (outputs a warning) + + :param endpoint: New endpoint to register (overwrite existing endpoint if exists) + :param preprocess_code: If provided upload local code as artifact + :param model_name: If model-id not provided on, search based on model name + :param model_project: If model-id not provided on, search based on model project + :param model_tags: If model-id not provided on, search based on model tags + :param model_published: If model-id not provided on, search based on model published state + """ + if not isinstance(endpoint, ModelEndpoint): + endpoint = ModelEndpoint(**endpoint) + + # make sure we have everything configured + self._validate_model(endpoint) + + url = self._normalize_endpoint_url(endpoint.serving_url, endpoint.version) + if url in self._endpoints: + print("Warning: Model endpoint \'{}\' overwritten".format(url)) + + if not endpoint.model_id: + model_query = dict( + project_name=model_project, + model_name=model_name, + tags=model_tags, + only_published=bool(model_published), + include_archived=False, + ) + models = Model.query_models(max_results=2, **model_query) + if not models: + raise ValueError("Could not fine any Model to serve {}".format(model_query)) + if len(models) > 1: + print("Warning: Found multiple Models for \'{}\', selecting id={}".format(model_query, models[0].id)) + endpoint.model_id = models[0].id + + # upload as new artifact + if preprocess_code: + if not Path(preprocess_code).exists(): + raise ValueError("Preprocessing code \'{}\' could not be found".format(preprocess_code)) + preprocess_artifact_name = "py_code_{}".format(url.replace("/", "_")) + self._task.upload_artifact( + name=preprocess_artifact_name, artifact_object=Path(preprocess_code), wait_on_upload=True) + endpoint.preprocess_artifact = preprocess_artifact_name + + self._endpoints[url] = endpoint + return url + + def add_model_monitoring( + self, + monitoring: Union[ModelMonitoring, dict], + preprocess_code: Optional[str] = None, + ) -> str: + """ + Return the unique name of the endpoint (endpoint + version) + Overwrite existing endpoint if already exists (outputs a warning) + + :param monitoring: Model endpoint monitor (overwrite existing endpoint if exists) + :param preprocess_code: If provided upload local code as artifact + :return: Unique model monitoring ID (base_model_url) + """ + if not isinstance(monitoring, ModelMonitoring): + monitoring = ModelMonitoring(**monitoring) + + # make sure we have everything configured + self._validate_model(monitoring) + + name = monitoring.base_serving_url + if name in self._model_monitoring: + print("Warning: Model monitoring \'{}\' overwritten".format(name)) + + # upload as new artifact + if preprocess_code: + if not Path(preprocess_code).exists(): + raise ValueError("Preprocessing code \'{}\' could not be found".format(preprocess_code)) + preprocess_artifact_name = "py_code_{}".format(name.replace("/", "_")) + self._task.upload_artifact( + name=preprocess_artifact_name, artifact_object=Path(preprocess_code), wait_on_upload=True) + monitoring.preprocess_artifact = preprocess_artifact_name + + self._model_monitoring[name] = monitoring + return name + + def remove_model_monitoring(self, model_base_url: str) -> bool: + """ + Remove model monitoring, use base_model_url as unique identifier + """ + if model_base_url not in self._model_monitoring: + return False + self._model_monitoring.pop(model_base_url, None) + return True + + def remove_endpoint(self, endpoint_url: str, version: Optional[str] = None) -> bool: + """ + Remove specific model endpoint, use base_model_url as unique identifier + """ + endpoint_url = self._normalize_endpoint_url(endpoint_url, version) + if endpoint_url not in self._endpoints: + return False + self._endpoints.pop(endpoint_url, None) + return True + + def add_canary_endpoint( + self, + canary: Union[CanaryEP, dict], + ) -> str: + """ + Return the unique name of the endpoint (endpoint + version) + Overwrite existing endpoint if already exists (outputs a warning) + + :param canary: Canary endpoint router (overwrite existing endpoint if exists) + :return: Unique canary ID (base_model_url) + """ + if not isinstance(canary, CanaryEP): + canary = CanaryEP(**canary) + if canary.load_endpoints and canary.load_endpoint_prefix: + raise ValueError( + "Could not add canary endpoint with both " + "prefix ({}) and fixed set of endpoints ({})".format( + canary.load_endpoints, canary.load_endpoint_prefix)) + name = canary.endpoint + if name in self._canary_endpoints: + print("Warning: Model monitoring \'{}\' overwritten".format(name)) + + self._canary_endpoints[name] = canary + return name + + def remove_canary_endpoint(self, endpoint_url: str) -> bool: + """ + Remove specific canary model endpoint, use base_model_url as unique identifier + """ + if endpoint_url not in self._canary_endpoints: + return False + self._canary_endpoints.pop(endpoint_url, None) + return True + + def deserialize(self, task: Task = None, prefetch_artifacts=False, skip_sync=False) -> bool: + """ + Restore ModelRequestProcessor state from Task + return True if actually needed serialization, False nothing changed + :param task: Load data from Task + :param prefetch_artifacts: If True prefetch artifacts requested by the endpoints + :param skip_sync: If True do not update the canary/monitoring state + """ + if not task: + task = self._task + configuration = task.get_parameters_as_dict().get("General") or {} + endpoints = task.get_configuration_object_as_dict(name='endpoints') or {} + canary_ep = task.get_configuration_object_as_dict(name='canary') or {} + model_monitoring = task.get_configuration_object_as_dict(name='model_monitoring') or {} + hashed_conf = hash_dict(dict(endpoints=endpoints, canary_ep=canary_ep, model_monitoring=model_monitoring)) + if self._last_update_hash == hashed_conf and not self._model_monitoring_update_request: + return False + print("Info: syncing model endpoint configuration, state hash={}".format(hashed_conf)) + self._last_update_hash = hashed_conf + + endpoints = { + k: ModelEndpoint(**{i: j for i, j in v.items() if hasattr(ModelEndpoint.__attrs_attrs__, i)}) + for k, v in endpoints.items() + } + model_monitoring = { + k: ModelMonitoring(**{i: j for i, j in v.items() if hasattr(ModelMonitoring.__attrs_attrs__, i)}) + for k, v in model_monitoring.items() + } + canary_endpoints = { + k: CanaryEP(**{i: j for i, j in v.items() if hasattr(CanaryEP.__attrs_attrs__, i)}) + for k, v in canary_ep.items() + } + + # if there is no need to sync Canary and Models we can just leave + if skip_sync: + self._endpoints = endpoints + self._model_monitoring = model_monitoring + self._canary_endpoints = canary_endpoints + self._configuration = configuration + return True + + # make sure we only have one stall request at any given moment + with self._update_lock_guard: + # download artifacts + # todo: separate into two, download before lock, and overwrite inside lock + if prefetch_artifacts: + for item in list(endpoints.values()) + list(model_monitoring.values()): + if item.preprocess_artifact: + # noinspection PyBroadException + try: + self._task.artifacts[item.preprocess_artifact].get_local_copy( + extract_archive=True, + ) + except Exception: + pass + + # stall all requests + self._update_lock_flag = True + # wait until we have no request processed + while self._request_processing_state.value() != 0: + sleep(1) + + self._endpoints = endpoints + self._model_monitoring = model_monitoring + self._canary_endpoints = canary_endpoints + self._configuration = configuration + + # if we have models we need to sync, now is the time + self._sync_monitored_models() + + self._update_canary_lookup() + + # release stall lock + self._update_lock_flag = False + + return True + + def serialize(self, task: Optional[Task] = None) -> None: + """ + Store ModelRequestProcessor state into Task + """ + if not task: + task = self._task + config_dict = {k: v.as_dict(remove_null_entries=True) for k, v in self._endpoints.items()} + task.set_configuration_object(name='endpoints', config_dict=config_dict) + config_dict = {k: v.as_dict(remove_null_entries=True) for k, v in self._canary_endpoints.items()} + task.set_configuration_object(name='canary', config_dict=config_dict) + config_dict = {k: v.as_dict(remove_null_entries=True) for k, v in self._model_monitoring.items()} + task.set_configuration_object(name='model_monitoring', config_dict=config_dict) + + def _update_canary_lookup(self): + canary_route = {} + for k, v in self._canary_endpoints.items(): + if v.load_endpoint_prefix and v.load_endpoints: + print("Warning: Canary has both prefix and fixed endpoints, ignoring canary endpoint") + continue + if v.load_endpoints: + if len(v.load_endpoints) != len(v.weights): + print("Warning: Canary \'{}\' weights [{}] do not match number of endpoints [{}], skipping!".format( + k, v.weights, v.load_endpoints)) + continue + endpoints = [] + weights = [] + for w, ep in zip(v.weights, v.load_endpoints): + if ep not in self._endpoints and ep not in self._model_monitoring_endpoints: + print("Warning: Canary \'{}\' endpoint \'{}\' could not be found, skipping".format(k, ep)) + continue + endpoints.append(ep) + weights.append(float(w)) + # normalize weights + sum_weights = sum(weights) + weights = [w/sum_weights for w in weights] + canary_route[k] = dict(endpoints=endpoints, weights=weights) + elif v.load_endpoint_prefix: + endpoints = [ep for ep in list(self._endpoints.keys()) + list(self._model_monitoring_endpoints.keys()) + if str(ep).startswith(v.load_endpoint_prefix)] + endpoints = sorted( + endpoints, + reverse=True, + key=lambda x: '{}/{:0>9}'.format('/'.join(x.split('/')[:-1]), x.split('/')[-1]) if '/' in x else x + ) + endpoints = endpoints[:len(v.weights)] + weights = v.weights[:len(endpoints)] + # normalize weights + sum_weights = sum(weights) + weights = [w/sum_weights for w in weights] + canary_route[k] = dict(endpoints=endpoints, weights=weights) + self._report_text( + "Info: Canary endpoint \'{}\' selected [{}]".format(k, canary_route[k]) + ) + + # update back + self._canary_route = canary_route + + def _sync_monitored_models(self, force: bool = False) -> bool: + if not force and not self._model_monitoring_update_request: + return False + dirty = False + + for serving_base_url, versions_model_id_dict in self._model_monitoring_versions.items(): + # find existing endpoint versions + for ep_base_url in list(self._model_monitoring_endpoints.keys()): + # skip over endpoints that are not our own + if not ep_base_url.startswith(serving_base_url+"/"): + continue + # find endpoint version + _, version = ep_base_url.split("/", 1) + if int(version) not in versions_model_id_dict: + # remove old endpoint + self._model_monitoring_endpoints.pop(ep_base_url, None) + dirty = True + continue + + # add new endpoint + for version, model_id in versions_model_id_dict.items(): + url = "{}/{}".format(serving_base_url, version) + if url in self._model_monitoring_endpoints: + continue + model = self._model_monitoring.get(serving_base_url) + if not model: + # this should never happen + continue + ep = ModelEndpoint( + engine_type=model.engine_type, + serving_url=serving_base_url, + model_id=model_id, + version=str(version), + preprocess_artifact=model.preprocess_artifact, + input_size=model.input_size, + input_type=model.input_type, + output_size=model.output_size, + output_type=model.output_type + ) + self._model_monitoring_endpoints[url] = ep + dirty = True + + # filter out old model monitoring endpoints + for ep_url in list(self._model_monitoring_endpoints.keys()): + if not any(True for url in self._model_monitoring_versions if ep_url.startswith(url+"/")): + self._model_monitoring_endpoints.pop(ep_url, None) + dirty = True + + # reset flag + self._model_monitoring_update_request = False + + if dirty: + config_dict = {k: v.as_dict(remove_null_entries=True) for k, v in self._model_monitoring_endpoints.items()} + self._task.set_configuration_object(name='model_monitoring_eps', config_dict=config_dict) + + return dirty + + def _update_monitored_models(self): + for model in self._model_monitoring.values(): + current_served_models = self._model_monitoring_versions.get(model.base_serving_url, {}) + # To Do: sort by updated time ? + models = Model.query_models( + project_name=model.monitor_project or None, + model_name=model.monitor_name or None, + tags=model.monitor_tags or None, + only_published=model.only_published, + max_results=model.max_versions, + include_archived=False, + ) + + # check what we already have: + current_model_id_version_lookup = dict( + zip(list(current_served_models.values()), list(current_served_models.keys())) + ) + versions = sorted(current_served_models.keys(), reverse=True) + + # notice, most updated model first + # first select only the new models + model_ids = [m.id for m in models] + + # we want last updated model to be last (so it gets the highest version number) + max_v = 1 + (versions[0] if versions else 0) + versions_model_ids = [] + for m_id in reversed(model_ids): + v = current_model_id_version_lookup.get(m_id) + if v is None: + v = max_v + max_v += 1 + versions_model_ids.append((v, m_id)) + + # remove extra entries (old models) + versions_model_ids_dict = dict(versions_model_ids[:model.max_versions]) + + # mark dirty if something changed: + if versions_model_ids_dict != current_served_models: + self._model_monitoring_update_request = True + + # update model serving state + self._model_monitoring_versions[model.base_serving_url] = versions_model_ids_dict + + if not self._model_monitoring_update_request: + return False + + self._report_text("INFO: Monitored Models updated: {}".format( + json.dumps(self._model_monitoring_versions, indent=2)) + ) + return True + + def launch(self, poll_frequency_sec=300): + """ + Launch the background synchronization thread and monitoring thread + (updating runtime process based on changes on the Task, and monitoring model changes in the system) + :param poll_frequency_sec: Sync every X seconds (default 300 seconds) + """ + if self._sync_daemon_thread: + return + + # read state + self.deserialize(self._task, prefetch_artifacts=True) + # model monitoring sync + if self._update_monitored_models(): + # update endpoints + self.deserialize(self._task, prefetch_artifacts=True) + + # get the serving instance (for visibility and monitoring) + self._instance_task = Task.current_task() + + # start the background thread + with self._update_lock_guard: + if self._sync_daemon_thread: + return + self._sync_daemon_thread = threading.Thread( + target=self._sync_daemon, args=(poll_frequency_sec, ), daemon=True) + self._sync_daemon_thread.start() + # we return immediately + + def _sync_daemon(self, poll_frequency_sec=300): + """ + Background thread, syncing model changes into request service. + """ + poll_frequency_sec = float(poll_frequency_sec) + # force mark started on the main serving service task + self._task.mark_started(force=True) + self._report_text("Launching - configuration sync every {} sec".format(poll_frequency_sec)) + cleanup = False + self._update_serving_plot() + while True: + try: + # this should be the only place where we call deserialize + self._task.reload() + if self.deserialize(self._task): + self._report_text("New configuration updated") + # mark clean up for next round + cleanup = True + # model monitoring sync + if self._update_monitored_models(): + self._report_text("Model monitoring synced") + # update endpoints + self.deserialize(self._task) + # mark clean up for next round + cleanup = True + # update serving layout plot + if cleanup: + self._update_serving_plot() + except Exception as ex: + print("Exception occurred in monitoring thread: {}".format(ex)) + sleep(poll_frequency_sec) + try: + # we assume that by now all old deleted endpoints requests already returned + if cleanup: + cleanup = False + for k in list(self._engine_processor_lookup.keys()): + if k not in self._endpoints: + # atomic + self._engine_processor_lookup.pop(k, None) + except Exception as ex: + print("Exception occurred in monitoring thread: {}".format(ex)) + + def get_id(self) -> str: + return self._task.id + + def get_endpoints(self) -> Dict[str, ModelEndpoint]: + endpoints = dict(**self._endpoints) + endpoints.update(**self._model_monitoring_endpoints) + return endpoints + + def get_synced_endpoints(self) -> Dict[str, ModelEndpoint]: + self._task.reload() + _endpoints = self._task.get_configuration_object_as_dict(name='endpoints') or {} + _monitor_endpoints = self._task.get_configuration_object_as_dict(name='model_monitoring_eps') or {} + endpoints = { + k: ModelEndpoint(**{i: j for i, j in v.items() if hasattr(ModelEndpoint.__attrs_attrs__, i)}) + for k, v in _endpoints.items()} + endpoints.update({ + k: ModelEndpoint(**{i: j for i, j in v.items() if hasattr(ModelEndpoint.__attrs_attrs__, i)}) + for k, v in _monitor_endpoints.items() + }) + return endpoints + + def get_canary_endpoints(self) -> dict: + return self._canary_endpoints + + def get_model_monitoring(self) -> dict: + return self._model_monitoring + + def _get_instance_id(self) -> Optional[str]: + return self._instance_task.id if self._instance_task else None + + def _report_text(self, text) -> Optional[str]: + return self._task.get_logger().report_text("Instance [{}, pid={}]: {}".format( + self._get_instance_id(), os.getpid(), text)) + + def _update_serving_plot(self) -> None: + """ + Update the endpoint serving graph on the serving instance Task + """ + if not self._instance_task: + return + + # Generate configuration table and details + endpoints = list(self._endpoints.values()) + list(self._model_monitoring_endpoints.values()) + if not endpoints: + # clear plot if we had any + return + + endpoints = [e.as_dict() for e in endpoints] + table_values = [list(endpoints[0].keys())] + table_values += [[e[c] or "" for c in table_values[0]] for e in endpoints] + self._instance_task.get_logger().report_table( + title='Serving Endpoint Configuration', series='Details', iteration=0, table_plot=table_values, + extra_layout={"title": "Model Endpoints Details"}) + + # generate current endpoint view + sankey_node = dict( + label=[], + color=[], + customdata=[], + hovertemplate='%{customdata}', + hoverlabel={"align": "left"}, + ) + sankey_link = dict( + source=[], + target=[], + value=[], + hovertemplate='', + ) + # root + sankey_node['color'].append("mediumpurple") + sankey_node['label'].append('{}'.format('external')) + sankey_node['customdata'].append("") + + sankey_node_idx = {} + + # base_url = self._task._get_app_server() + '/projects/*/models/{model_id}/general' + + # draw all static endpoints + # noinspection PyProtectedMember + for i, ep in enumerate(endpoints): + serve_url = ep['serving_url'] + full_url = '{}/{}'.format(serve_url, ep['version'] or "") + sankey_node['color'].append("blue") + sankey_node['label'].append("/{}/".format(full_url.strip("/"))) + sankey_node['customdata'].append( + "model id: {}".format(ep['model_id']) + ) + sankey_link['source'].append(0) + sankey_link['target'].append(i + 1) + sankey_link['value'].append(1. / len(self._endpoints)) + sankey_node_idx[full_url] = i + 1 + + # draw all model monitoring + sankey_node['color'].append("mediumpurple") + sankey_node['label'].append('{}'.format('monitoring models')) + sankey_node['customdata'].append("") + monitoring_root_idx = len(sankey_node['customdata']) - 1 + + for i, m in enumerate(self._model_monitoring.values()): + serve_url = m.base_serving_url + sankey_node['color'].append("purple") + sankey_node['label'].append('{}'.format(serve_url)) + sankey_node['customdata'].append( + "project: {}
name: {}
tags: {}".format( + m.monitor_project or '', m.monitor_name or '', m.monitor_tags or '') + ) + sankey_link['source'].append(monitoring_root_idx) + sankey_link['target'].append(monitoring_root_idx + i + 1) + sankey_link['value'].append(1. / len(self._model_monitoring)) + + # add links to the current models + serve_url = serve_url.rstrip("/") + "/" + for k in sankey_node_idx: + if k.startswith(serve_url): + sankey_link['source'].append(monitoring_root_idx + i + 1) + sankey_link['target'].append(sankey_node_idx[k]) + sankey_link['value'].append(1.0 / m.max_versions) + + # add canary endpoints + # sankey_node['color'].append("mediumpurple") + # sankey_node['label'].append('{}'.format('Canary endpoints')) + # sankey_node['customdata'].append("") + canary_root_idx = len(sankey_node['customdata']) - 1 + + # sankey_link['source'].append(0) + # sankey_link['target'].append(canary_root_idx) + # sankey_link['value'].append(1.) + + for i, c in enumerate(self._canary_endpoints.values()): + serve_url = c.endpoint + sankey_node['color'].append("green") + sankey_node['label'].append('CANARY: /{}/'.format(serve_url.strip("/"))) + sankey_node['customdata'].append( + "outputs: {}".format( + c.load_endpoints or c.load_endpoint_prefix) + ) + sankey_link['source'].append(0) + sankey_link['target'].append(canary_root_idx + i + 1) + sankey_link['value'].append(1. / len(self._canary_endpoints)) + + # add links to the current models + if serve_url not in self._canary_route: + continue + for ep, w in zip(self._canary_route[serve_url]['endpoints'], self._canary_route[serve_url]['weights']): + idx = sankey_node_idx.get(ep) + if idx is None: + continue + sankey_link['source'].append(canary_root_idx + i + 1) + sankey_link['target'].append(idx) + sankey_link['value'].append(w) + + # create the sankey graph + dag_flow = dict( + link=sankey_link, + node=sankey_node, + textfont=dict(color='rgba(0,0,0,255)', size=10), + type='sankey', + orientation='h' + ) + fig = dict(data=[dag_flow], layout={'xaxis': {'visible': False}, 'yaxis': {'visible': False}}) + + self._instance_task.get_logger().report_plotly( + title='Serving Endpoints Layout', series='', iteration=0, figure=fig) + + @staticmethod + def _process_request(processor: BasePreprocessRequest, url: str, body: dict) -> dict: + # todo: add some statistics + preprocessed = processor.preprocess(body) + processed = processor.process(data=preprocessed) + return processor.postprocess(data=processed) + + @classmethod + def list_control_plane_tasks( + cls, + task_id: Optional[str] = None, + name: Optional[str] = None, + project: Optional[str] = None, + tags: Optional[List[str]] = None + ) -> List[dict]: + + # noinspection PyProtectedMember + tasks = Task.query_tasks( + task_name=name or None, + project_name=project or None, + tags=tags or None, + additional_return_fields=["id", "name", "project", "tags"], + task_filter={'type': ['service'], + 'status': ["created", "in_progress"], + 'system_tags': [cls._system_tag]} + ) # type: List[dict] + if not tasks: + return [] + + for t in tasks: + # noinspection PyProtectedMember + t['project'] = Task._get_project_name(t['project']) + + return tasks + + @classmethod + def _get_control_plane_task( + cls, + task_id: Optional[str] = None, + name: Optional[str] = None, + project: Optional[str] = None, + tags: Optional[List[str]] = None, + disable_change_state: bool = False, + ) -> Task: + if task_id: + task = Task.get_task(task_id=task_id) + if not task: + raise ValueError("Could not find Control Task ID={}".format(task_id)) + task_status = task.status + if task_status not in ("created", "in_progress",): + if disable_change_state: + raise ValueError( + "Could Control Task ID={} status [{}] " + "is not valid (only 'draft', 'running' are supported)".format(task_id, task_status)) + else: + task.mark_started(force=True) + return task + + # noinspection PyProtectedMember + tasks = Task.query_tasks( + task_name=name or None, + project_name=project or None, + tags=tags or None, + task_filter={'type': ['service'], + 'status': ["created", "in_progress"], + 'system_tags': [cls._system_tag]} + ) + if not tasks: + raise ValueError("Could not find any valid Control Tasks") + + if len(tasks) > 1: + print("Warning: more than one valid Controller Tasks found, using Task ID={}".format(tasks[0])) + + return Task.get_task(task_id=tasks[0]) + + @classmethod + def _create_task( + cls, + name: Optional[str] = None, + project: Optional[str] = None, + tags: Optional[List[str]] = None + ) -> Task: + task = Task.create( + project_name=project or "DevOps", + task_name=name or "Serving Service", + task_type="service", + ) + task.set_system_tags([cls._system_tag]) + if tags: + task.set_tags(tags) + return task + + @classmethod + def _normalize_endpoint_url(cls, endpoint: str, version : Optional[str] = None) -> str: + return "{}/{}".format(endpoint.rstrip("/"), version or "").rstrip("/") + + @classmethod + def _validate_model(cls, endpoint: Union[ModelEndpoint, ModelMonitoring]) -> bool: + """ + Raise exception if validation fails, otherwise return True + """ + if endpoint.engine_type in ("triton", ): + # verify we have all the info we need + d = endpoint.as_dict() + missing = [ + k for k in [ + 'input_type', 'input_size', 'input_name', + 'output_type', 'output_size', 'output_name', + ] if not d.get(k) + ] + if not endpoint.auxiliary_cfg and missing: + raise ValueError("Triton engine requires input description - missing values in {}".format(missing)) + return True + diff --git a/clearml_serving/serving/preprocess_service.py b/clearml_serving/serving/preprocess_service.py new file mode 100644 index 0000000..b59bc80 --- /dev/null +++ b/clearml_serving/serving/preprocess_service.py @@ -0,0 +1,356 @@ +import numpy as np +from typing import Optional, Any, Callable, List + +from attr import attrib, attrs, asdict + +from clearml import Task, Model +from clearml.binding.artifacts import Artifacts +from clearml.storage.util import sha256sum + + +def _engine_validator(inst, attr, value): # noqa + if not BasePreprocessRequest.validate_engine_type(value): + raise TypeError("{} not supported engine type".format(value)) + + +def _matrix_type_validator(inst, attr, value): # noqa + if value and not np.dtype(value): + raise TypeError("{} not supported matrix type".format(value)) + + +@attrs +class ModelMonitoring(object): + base_serving_url = attrib(type=str) # serving point url prefix (example: "detect_cat") + monitor_project = attrib(type=str) # monitor model project (for model auto update) + monitor_name = attrib(type=str) # monitor model name (for model auto update, regexp selection) + monitor_tags = attrib(type=list) # monitor model tag (for model auto update) + engine_type = attrib(type=str, validator=_engine_validator) # engine type + only_published = attrib(type=bool, default=False) # only select published models + max_versions = attrib(type=int, default=None) # Maximum number of models to keep serving (latest X models) + input_size = attrib(type=list, default=None) # optional, model matrix size + input_type = attrib(type=str, default=None, validator=_matrix_type_validator) # optional, model matrix type + input_name = attrib(type=str, default=None) # optional, layer name to push the input to + output_size = attrib(type=list, default=None) # optional, model matrix size + output_type = attrib(type=str, default=None, validator=_matrix_type_validator) # optional, model matrix type + output_name = attrib(type=str, default=None) # optional, layer name to pull the results from + preprocess_artifact = attrib( + type=str, default=None) # optional artifact name storing the model preprocessing code + auxiliary_cfg = attrib(type=dict, default=None) # Auxiliary configuration (e.g. triton conf), Union[str, dict] + + def as_dict(self, remove_null_entries=False): + if not remove_null_entries: + return asdict(self) + return {k: v for k, v in asdict(self).items() if v is not None} + + +@attrs +class ModelEndpoint(object): + engine_type = attrib(type=str, validator=_engine_validator) # engine type + serving_url = attrib(type=str) # full serving point url (including version) example: "detect_cat/v1" + model_id = attrib(type=str) # list of model IDs to serve (order implies versions first is v1) + version = attrib(type=str, default="") # key (version string), default no version + preprocess_artifact = attrib( + type=str, default=None) # optional artifact name storing the model preprocessing code + input_size = attrib(type=list, default=None) # optional, model matrix size + input_type = attrib(type=str, default=None, validator=_matrix_type_validator) # optional, model matrix type + input_name = attrib(type=str, default=None) # optional, layer name to push the input to + output_size = attrib(type=list, default=None) # optional, model matrix size + output_type = attrib(type=str, default=None, validator=_matrix_type_validator) # optional, model matrix type + output_name = attrib(type=str, default=None) # optional, layer name to pull the results from + auxiliary_cfg = attrib(type=dict, default=None) # Optional: Auxiliary configuration (e.g. triton conf), [str, dict] + + def as_dict(self, remove_null_entries=False): + if not remove_null_entries: + return asdict(self) + return {k: v for k, v in asdict(self).items() if v is not None} + + +class BasePreprocessRequest(object): + __preprocessing_lookup = {} + __preprocessing_modules = set() + + def __init__( + self, + model_endpoint: ModelEndpoint, + task: Task = None, + server_config: dict = None, + ): + """ + Notice this object is not be created per request, but once per Process + Make sure it is always thread-safe + """ + self.model_endpoint = model_endpoint + self._preprocess = None + self._model = None + self._server_config = server_config or {} + # load preprocessing code here + if self.model_endpoint.preprocess_artifact: + if not task or self.model_endpoint.preprocess_artifact not in task.artifacts: + print("Warning: could not find preprocessing artifact \'{}\' on Task id={}".format( + self.model_endpoint.preprocess_artifact, task.id)) + else: + try: + path = task.artifacts[self.model_endpoint.preprocess_artifact].get_local_copy() + # check file content hash, should only happens once?! + # noinspection PyProtectedMember + file_hash, _ = sha256sum(path, block_size=Artifacts._hash_block_size) + if file_hash != task.artifacts[self.model_endpoint.preprocess_artifact].hash: + print("INFO: re-downloading artifact '{}' hash changed".format( + self.model_endpoint.preprocess_artifact)) + path = task.artifacts[self.model_endpoint.preprocess_artifact].get_local_copy( + extract_archive=True, + force_download=True, + ) + else: + # extract zip if we need to, otherwise it will be the same + path = task.artifacts[self.model_endpoint.preprocess_artifact].get_local_copy( + extract_archive=True, + ) + + import importlib.util + spec = importlib.util.spec_from_file_location("Preprocess", path) + _preprocess = importlib.util.module_from_spec(spec) + spec.loader.exec_module(_preprocess) + self._preprocess = _preprocess.Preprocess() # noqa + self._preprocess.serving_config = server_config or {} + if callable(getattr(self._preprocess, 'load', None)): + self._model = self._preprocess.load(self._get_local_model_file()) + except Exception as ex: + print("Warning: Failed loading preprocess code for \'{}\': {}".format( + self.model_endpoint.preprocess_artifact, ex)) + + def preprocess(self, request): + # type: (dict) -> Optional[Any] + """ + Raise exception to report an error + Return value will be passed to serving engine + """ + if self._preprocess is not None: + return self._preprocess.preprocess(request) + return request + + def postprocess(self, data): + # type: (Any) -> Optional[dict] + """ + Raise exception to report an error + Return value will be passed to serving engine + """ + if self._preprocess is not None: + return self._preprocess.postprocess(data) + return data + + def process(self, data: Any) -> Any: + """ + The actual processing function. Can be send to external service + """ + pass + + def _get_local_model_file(self): + model_repo_object = Model(model_id=self.model_endpoint.model_id) + return model_repo_object.get_local_copy() + + @classmethod + def validate_engine_type(cls, engine: str) -> bool: + return engine in cls.__preprocessing_lookup + + @classmethod + def get_engine_cls(cls, engine: str) -> Callable: + return cls.__preprocessing_lookup.get(engine) + + @staticmethod + def register_engine(engine_name: str, modules: Optional[List[str]] = None) -> Callable: + """ + A decorator to register an annotation type name for classes deriving from Annotation + """ + def wrapper(cls): + cls.__preprocessing_lookup[engine_name] = cls + return cls + + if modules: + BasePreprocessRequest.__preprocessing_modules |= set(modules) + + return wrapper + + @staticmethod + def load_modules() -> None: + for m in BasePreprocessRequest.__preprocessing_modules: + try: + # silently fail + import importlib + importlib.import_module(m) + except (ImportError, TypeError): + pass + + +@BasePreprocessRequest.register_engine("triton", modules=["grpc", "tritonclient"]) +class TritonPreprocessRequest(BasePreprocessRequest): + _content_lookup = { + np.uint8: 'uint_contents', + np.int8: 'int_contents', + np.int64: 'int64_contents', + np.uint64: 'uint64_contents', + np.int: 'int_contents', + np.uint: 'uint_contents', + np.bool: 'bool_contents', + np.float32: 'fp32_contents', + np.float64: 'fp64_contents', + } + _ext_grpc = None + _ext_np_to_triton_dtype = None + _ext_service_pb2 = None + _ext_service_pb2_grpc = None + + def __init__(self, model_endpoint: ModelEndpoint, task: Task = None, server_config: dict = None): + super(TritonPreprocessRequest, self).__init__( + model_endpoint=model_endpoint, task=task, server_config=server_config) + + # load Triton Module + if self._ext_grpc is None: + import grpc + self._ext_grpc = grpc + + if self._ext_np_to_triton_dtype is None: + from tritonclient.utils import np_to_triton_dtype + self._ext_np_to_triton_dtype = np_to_triton_dtype + + if self._ext_service_pb2 is None: + from tritonclient.grpc import service_pb2, service_pb2_grpc + self._ext_service_pb2 = service_pb2 + self._ext_service_pb2_grpc = service_pb2_grpc + + def process(self, data: Any) -> Any: + """ + The actual processing function. + Detect gRPC server and send the request to it + """ + # allow to override bt preprocessing class + if self._preprocess is not None and getattr(self._preprocess, "process", None): + return self._preprocess.process(data) + + # Create gRPC stub for communicating with the server + triton_server_address = self._server_config.get("triton_grpc_server") + if not triton_server_address: + raise ValueError("External Triton gRPC server is not configured!") + try: + channel = self._ext_grpc.insecure_channel(triton_server_address) + grpc_stub = self._ext_service_pb2_grpc.GRPCInferenceServiceStub(channel) + except Exception as ex: + raise ValueError("External Triton gRPC server misconfigured [{}]: {}".format(triton_server_address, ex)) + + # Generate the request + request = self._ext_service_pb2.ModelInferRequest() + request.model_name = "{}/{}".format(self.model_endpoint.serving_url, self.model_endpoint.version).strip("/") + # we do not use the Triton model versions, we just assume a single version per endpoint + request.model_version = "1" + + # take the input data + input_data = np.array(data, dtype=self.model_endpoint.input_type) + + # Populate the inputs in inference request + input0 = request.InferInputTensor() + input0.name = self.model_endpoint.input_name + input_dtype = np.dtype(self.model_endpoint.input_type).type + input0.datatype = self._ext_np_to_triton_dtype(input_dtype) + input0.shape.extend(self.model_endpoint.input_size) + + # to be inferred + input_func = self._content_lookup.get(input_dtype) + if not input_func: + raise ValueError("Input type nt supported {}".format(input_dtype)) + input_func = getattr(input0.contents, input_func) + input_func[:] = input_data.flatten() + + # push into request + request.inputs.extend([input0]) + + # Populate the outputs in the inference request + output0 = request.InferRequestedOutputTensor() + output0.name = self.model_endpoint.output_name + + request.outputs.extend([output0]) + response = grpc_stub.ModelInfer(request, compression=self._ext_grpc.Compression.Gzip) + + output_results = [] + index = 0 + for output in response.outputs: + shape = [] + for value in output.shape: + shape.append(value) + output_results.append( + np.frombuffer(response.raw_output_contents[index], dtype=self.model_endpoint.output_type)) + output_results[-1] = np.resize(output_results[-1], shape) + index += 1 + + # if we have a single matrix, return it as is + return output_results[0] if index == 1 else output_results + + +@BasePreprocessRequest.register_engine("sklearn", modules=["joblib", "sklearn"]) +class SKLearnPreprocessRequest(BasePreprocessRequest): + def __init__(self, model_endpoint: ModelEndpoint, task: Task = None, server_config: dict = None): + super(SKLearnPreprocessRequest, self).__init__( + model_endpoint=model_endpoint, task=task, server_config=server_config) + if self._model is None: + # get model + import joblib + self._model = joblib.load(filename=self._get_local_model_file()) + + def process(self, data: Any) -> Any: + """ + The actual processing function. + We run the model in this context + """ + return self._model.predict(data) + + +@BasePreprocessRequest.register_engine("xgboost", modules=["xgboost"]) +class XGBoostPreprocessRequest(BasePreprocessRequest): + def __init__(self, model_endpoint: ModelEndpoint, task: Task = None, server_config: dict = None): + super(XGBoostPreprocessRequest, self).__init__( + model_endpoint=model_endpoint, task=task, server_config=server_config) + if self._model is None: + # get model + import xgboost + self._model = xgboost.Booster() + self._model.load_model(self._get_local_model_file()) + + def process(self, data: Any) -> Any: + """ + The actual processing function. + We run the model in this context + """ + return self._model.predict(data) + + +@BasePreprocessRequest.register_engine("lightgbm", modules=["lightgbm"]) +class LightGBMPreprocessRequest(BasePreprocessRequest): + def __init__(self, model_endpoint: ModelEndpoint, task: Task = None, server_config: dict = None): + super(LightGBMPreprocessRequest, self).__init__( + model_endpoint=model_endpoint, task=task, server_config=server_config) + if self._model is None: + # get model + import lightgbm + self._model = lightgbm.Booster(model_file=self._get_local_model_file()) + + def process(self, data: Any) -> Any: + """ + The actual processing function. + We run the model in this context + """ + return self._model.predict(data) + + +@BasePreprocessRequest.register_engine("custom") +class CustomPreprocessRequest(BasePreprocessRequest): + def __init__(self, model_endpoint: ModelEndpoint, task: Task = None, server_config: dict = None): + super(CustomPreprocessRequest, self).__init__( + model_endpoint=model_endpoint, task=task, server_config=server_config) + + def process(self, data: Any) -> Any: + """ + The actual processing function. + We run the process in this context + """ + if self._preprocess is not None: + return self._preprocess.process(data) + return None diff --git a/clearml_serving/serving/requirements.txt b/clearml_serving/serving/requirements.txt new file mode 100644 index 0000000..281b0f8 --- /dev/null +++ b/clearml_serving/serving/requirements.txt @@ -0,0 +1,14 @@ +clearml >= 1.1.6 +attrs +fastapi[all] +uvicorn[standard] +gunicorn +pyzmq +asyncio +aiocache +tritonclient[grpc] +numpy +pandas +scikit-learn +grpcio +Pillow \ No newline at end of file diff --git a/clearml_serving/serving_service.py b/clearml_serving/serving_service.py deleted file mode 100644 index 419f7c1..0000000 --- a/clearml_serving/serving_service.py +++ /dev/null @@ -1,589 +0,0 @@ -import json -import shutil -from logging import getLogger -from pathlib import Path as Path3 -from time import time -from typing import Optional, Union, Dict, Sequence - -from attr import attrib, attrs, asdict -from pathlib2 import Path - -from clearml import Task, Model, InputModel - - -class ServingService(object): - _config_pbtxt_section = 'config.pbtxt' - _supported_serving_engines = ('triton', 'ovms', 'kfserving') - - @attrs - class EndPoint(object): - serving_url = attrib(type=str) - model_ids = attrib(type=list) - model_project = attrib(type=str) - model_name = attrib(type=str) - model_tags = attrib(type=list) - model_config_blob = attrib(type=str, default=None) - max_num_revisions = attrib(type=int, default=None) - versions = attrib(type=dict, default={}) - - def as_dict(self): - return asdict(self) - - def __init__(self, task_id=None, task_project=None, task_name=None, task=None, engine_type='triton'): - # type: (Optional[str], Optional[str], Optional[str], Optional[Task], Optional[str]) -> None - """ - :param task_id: Optional specify existing Task ID of the ServingService - :param task_project: Select the project where the new ServingService task will be created - :param task_name: Specify the Task name for the newly created ServingService - :param task: Optional pass existing ServingService Task object - :param engine_type: Specify the serving engine Type. Examples: triton, ovms, kfserving - """ - assert engine_type in self._supported_serving_engines - - if task: - self._task = task - elif task_id: - self._task = Task.get_task(task_id=task_id) - else: - # try to get a Task if we can find one - self._task = None - try: - # noinspection PyProtectedMember - if Task._query_tasks(project_name=task_project, task_name=task_name): - self._task = Task.get_task(project_name=task_project, task_name=task_name) - except ValueError: - pass - - if not self._task: - self._task = Task.create( - project_name=task_project, task_name=task_name, task_type=Task.TaskTypes.service, - repo="https://github.com/allegroai/clearml-serving.git", - branch="main", - commit="ad049c51c146e9b7852f87e2f040e97d88848a1f", - script="clearml_serving/service.py", - working_directory=".", - add_task_init_call=False, - ) - self._task.set_system_tags(list(self._task.get_system_tags()) + ['serving']) - - # self._current_serving_endpoints = {'an_enpoint_url': {1: 'model_id'}} - self._current_serving_endpoints = {} # type: Dict[str, Dict[int, str]] - # self._endpoints = {'an_enpoint_url': ServingService.EndPoint()} - self._endpoints = {} # type: Dict[str, ServingService.EndPoint] - self._engine_type = engine_type - self._dirty = False - self._last_update_step = None - # try to deserialize from Task - # noinspection PyBroadException - try: - self._deserialize() - except Exception: - pass - - def add_model_serving( - self, - serving_url, # type: str - model_ids=None, # type: Optional[Sequence[str]] - model_project=None, # type: Optional[str] - model_name=None, # type: Optional[str] - model_tags=None, # type: Optional[Sequence[str]] - config_file=None, # type: Optional[Union[Path, Path3, str]] - max_versions=1, # type: Optional[int] - ): - """ - Add new model serving endpoint, automatically published - - :param serving_url: - :param model_ids: - :param model_project: - :param model_name: - :param model_tags: - :param config_file: - :param max_versions: - :return: - """ - if not serving_url: - raise ValueError("serving_url is required") - - if model_tags and not isinstance(model_tags, (list, tuple)): - raise ValueError("model_tags must be a list of strings") - - # normalize endpoint url - serving_url = str(serving_url).strip('/') - - endpoint = self.EndPoint( - serving_url=serving_url, - model_ids=list(model_ids) if model_ids else None, - model_name=model_name, - model_project=model_project, - model_tags=model_tags, - max_num_revisions=max_versions or None, - versions={}, - model_config_blob='', - ) - # load config file - if config_file: - with open(str(config_file), 'rt') as f: - endpoint.model_config_blob = f.read() - else: - # Look for the config on the Model generated Task - found_models = Model.query_models(project_name=model_project, model_name=model_name, tags=model_tags) or [] - - selected_model = None - # find the first model with config.pbtxt configuration - # prefer published models - found_models = [m for m in found_models if m.published] + [m for m in found_models if not m.published] - for m in found_models: - task_id = m.task - task = Task.get_task(task_id=task_id) - config_pbtxt = task.get_configuration_object(self._config_pbtxt_section) - if config_pbtxt and str(config_pbtxt).strip(): - endpoint.model_config_blob = config_pbtxt - selected_model = m - break - - if not selected_model: - raise ValueError( - "Requested Model project={} name={} tags={} not found. 'config.pbtxt' could not be inferred. " - "please provide specific config.pbtxt definition.".format(model_project, model_name, model_tags)) - elif len(found_models) > 1: - getLogger('clearml-serving').warning( - "Found more than one Model, using model id={}".format(selected_model.id)) - - self._endpoints[serving_url] = endpoint - self._dirty = True - - def launch(self, queue_name='services', queue_id=None, force=False, verbose=True): - # type: (Optional[str], Optional[str], bool, bool) -> None - """ - Launch serving service on a remote machine using the specified queue - - :param queue_name: Queue name to launch the serving service control plane - :param queue_id: specify queue id (unique stand stable) instead of queue_name - :param force: if False check if service Task is already running before enqueuing - :param verbose: If True print progress to console - """ - # check if we are not already running - if not force and ((self._task.data.execution.queue and self._task.status == 'in_progress') - or self._task.status == 'queued'): - if verbose: - print('Serving service already running') - else: - if verbose: - print('Launching Serving service on {} queue'.format(queue_id or queue_name)) - self.update_endpoint_graph(force=True) - self.update_model_endpoint_state() - self.serialize() - self._task.flush(wait_for_uploads=True) - self._task.reset() - self._task.enqueue(task=self._task, queue_name=queue_name, queue_id=queue_id) - - def launch_engine(self, queue_name, queue_id=None, container=None, container_args=None, verbose=True): - # type: (Optional[str], Optional[str], Optional[str], Optional[str], bool) -> None - """ - Launch serving engine on a specific queue - - :param queue_name: Queue name to launch the engine service running the inference on. - :param queue_id: specify queue id (unique stand stable) instead of queue_name - :param container: Optional: specify serving engine container. - :param container_args: Optional: specify serving engine container arguments. - Notice these arguments will override any default container arguments! - :param verbose: If True print progress to console - """ - - # todo: add more engines - if self._engine_type == 'triton': - engine_type_container = "nvcr.io/nvidia/tritonserver:21.03-py3" - engine_type_args = "--ipc=host -p 8000:8000 -p 8001:8001 -p 8002:8002" - - # create the serving engine Task - engine_task = Task.create( - project_name=self._task.get_project_name(), - task_name="triton serving engine", - task_type=Task.TaskTypes.inference, - repo="https://github.com/allegroai/clearml-serving.git", - branch="main", - commit="ad049c51c146e9b7852f87e2f040e97d88848a1f", - script="clearml_serving/triton_helper.py", - working_directory=".", - docker=container or engine_type_container, - docker_args=container_args or engine_type_args, - argparse_args=[('serving_id', self._task.id), ], - add_task_init_call=False, - packages=[ - 'clearml', - 'azure-storage-blob>=2.0.1,<=2.1', - 'google-cloud-storage>=1.13.2', - 'boto3>=1.9', - ], - ) - if verbose: - print('Launching engine {} on queue {}'.format(self._engine_type, queue_id or queue_name)) - engine_task.enqueue(task=engine_task, queue_name=queue_name, queue_id=queue_id) - - def update_endpoint_graph(self, force=False): - # type: (bool) -> None - """ - Update the endpoint serving graph - - :param force: If True always update, otherwise skip if service was not changed since lat time - """ - if not force and not self._dirty: - return - - # Generate configuration table and details - table_values = [["Endpoint", "Model ID", "Model Project", "Model Name", "Model Tags", "Max Versions"]] - for endpoint in sorted(self._endpoints.keys()): - n = self._endpoints[endpoint] - table_values.append([ - str(n.serving_url or ''), - str(n.model_ids or ''), - str(n.model_project or ''), - str(n.model_name or ''), - str(n.model_tags or ''), - str(n.max_num_revisions or '') - ]) - self._task.get_logger().report_table( - title='Serving Endpoint Configuration', series='Details', iteration=0, table_plot=table_values, - extra_layout={"title": "Model Endpoints Details"}) - - # generate current endpoint view - sankey_node = dict( - label=[], - color=[], - customdata=[], - hovertemplate='%{customdata}', - hoverlabel={"align": "left"}, - ) - sankey_link = dict( - source=[], - target=[], - value=[], - hovertemplate='', - ) - # root - sankey_node['color'].append("mediumpurple") - sankey_node['label'].append('{}'.format('serving')) - sankey_node['customdata'].append("") - - # Generate table and details - table_values = [["Endpoint", "Version", "Model ID"]] - # noinspection PyProtectedMember - base_url = self._task._get_app_server() + '/projects/*/models/{model_id}/general' - for i, serve_url in enumerate(sorted(self._endpoints.keys())): - ep = self._endpoints[serve_url] - sankey_node['color'].append("blue") - sankey_node['label'].append('{}'.format(serve_url)) - sankey_node['customdata'].append( - "project: {}
name: {}
tags: {}".format( - ep.model_project or '', ep.model_name or '', ep.model_tags or '') - ) - sankey_link['source'].append(0) - sankey_link['target'].append(i + 1) - sankey_link['value'].append(1. / len(self._endpoints)) - - for v in sorted(self._current_serving_endpoints.get(serve_url, [])): - model_id = self._current_serving_endpoints[serve_url][v] - href = ' {} '.format(base_url.format(model_id=model_id), model_id) - table_values.append([str(serve_url), str(v), href]) - sankey_node['color'].append("lightblue") - sankey_node['label'].append('{}'.format(v)) - sankey_node['customdata'].append(model_id) - - sankey_link['source'].append(i + 1) - sankey_link['target'].append(len(sankey_node['color']) - 1) - sankey_link['value'].append(1. / len(self._current_serving_endpoints[serve_url])) - - # create the sankey graph - dag_flow = dict( - link=sankey_link, - node=sankey_node, - textfont=dict(color='rgba(0,0,0,255)', size=10), - type='sankey', - orientation='h' - ) - fig = dict(data=[dag_flow], layout={'xaxis': {'visible': False}, 'yaxis': {'visible': False}}) - - self._task.get_logger().report_plotly( - title='Model Serving Endpoints', series='', iteration=0, figure=fig) - - # report detailed table - self._task.get_logger().report_table( - title='Serving Endpoint', series='Details', iteration=0, table_plot=table_values, - extra_layout={"title": "Model Endpoints Details"}) - - self._dirty = False - - def update_model_endpoint_state(self): - # type: () -> bool - """ - Update model endpoint state from the model repository - - :return: True if endpoints were updated - """ - - for endpoint, node in self._endpoints.items(): - # model ID supersedes everything - if node.model_ids: - model_ids = node.model_ids - else: - # get list of models sorted by descending update time - models = Model.query_models( - project_name=node.model_project, - model_name=node.model_name, - tags=node.model_tags - ) - # prefer published models - model_ids = [m.id for m in models if m.published] + [m.id for m in models if not m.published] - - cur_endpoint = self._current_serving_endpoints.get(node.serving_url, {}) - cur_endpoint = {int(k): v for k, v in cur_endpoint.items() if v in model_ids} - cur_endpoint_m_ids = list(cur_endpoint.values()) - max_v = max(list(cur_endpoint.keys()) or [0]) - for i, m_id in enumerate(model_ids): - # only pick the latest in the history - if node.max_num_revisions and max_v >= node.max_num_revisions: - break - - if m_id in cur_endpoint_m_ids: - continue - max_v += 1 - cur_endpoint[max_v] = m_id - - # check if we need to update, - if self._current_serving_endpoints.get(node.serving_url) != cur_endpoint: - # set dirty flag - self._dirty = True - # store updated results - self._current_serving_endpoints[node.serving_url] = cur_endpoint - - return self._dirty - - def stats(self): - pass - - def get_endpoints(self): - # type: () -> Dict[str, ServingService.EndPoint] - """ - return the internal endpoints configuration - - :return: dict where the keys is the endpoint url and the value is the endpoint configuration - """ - return self._endpoints - - def get_endpoint_version_model_id(self, serving_url): - # type: (str) -> Dict[int, str] - """ - Return dict with model versions and model id for the specific serving url - If serving url is not found, return None - - :param serving_url: sering url string - - :return: dictionary keys are the versions (integers) and values are the model IDs (str) - """ - return self._current_serving_endpoints.get(serving_url) or {} - - def _serialize(self): - configuration = dict() - for name, ep in self._endpoints.items(): - # noinspection PyProtectedMember - self._task.set_configuration_object( - name="model.{}".format(name), - description='Model Serving Configuration', - config_type='pbtxt', - config_text=ep.model_config_blob) - ep_conf = ep.as_dict() - ep_conf.pop('model_config_blob', None) - configuration['"{}"'.format(name)] = ep_conf - # noinspection PyProtectedMember - self._task._set_configuration( - config_dict=configuration, name='endpoints', - config_type='hocon', description='Serving Endpoints Configuration') - # set configuration of current served endpoints - # noinspection PyProtectedMember - self._task._set_configuration( - config_dict=self._current_serving_endpoints, name='serving_state', - config_type='hocon', description='Current Serving Endpoints State', - ) - serving = dict(engine=self._engine_type) - self._task.connect(serving, name='serving') - - def _deserialize(self): - # type: () -> bool - """ - deserialize internal state from Task backend - - :return: return True if new state a was updated. - """ - # update if the task was updated - if self._endpoints: - last_update = self._task.data.last_update - try: - # noinspection PyProtectedMember - if last_update == self._task._get_last_update(): - return True - except AttributeError: - # support old clearml packages - pass - - self._task.reload() - - # noinspection PyProtectedMember - configuration = self._task._get_configuration_dict(name='endpoints') - if not configuration: - return False - - self._endpoints = {} - self._current_serving_endpoints = {} - serving = dict(engine='') - task_parameters = self._task.get_parameters_as_dict() - serving.update(task_parameters.get('serving', {})) - self._engine_type = serving['engine'] - - for name, endpoint in configuration.items(): - ep = self.EndPoint(model_config_blob='', **endpoint) - ep.model_config_blob = self._task.get_configuration_object( - name="model.{}".format(ep.serving_url)) - self._endpoints[ep.serving_url] = ep - - # get configuration of current served endpoints - # noinspection PyProtectedMember - self._current_serving_endpoints = self._task._get_configuration_dict(name='serving_state') - - self._dirty = True - return True - - def update(self, force=False): - # type: (bool) -> bool - """ - Update internal endpoint state based on Task configuration and model repository - - :param force: if True force update - - :return: True if internal state updated. - """ - if not self._task: - return False - - # store current internal state - state_hash = self.__state_hash() - - if not self._deserialize(): - return False - - # check if current internal state changed - if not force and state_hash == self.__state_hash(): - print("Skipping update, nothing changed") - return False - - return self.update_model_endpoint_state() - - def get_id(self): - # type: () -> str - """ - Return the Serving Service Task ID - :return: Unique Task ID (str) - """ - return self._task.id - - def get_engine_type(self): - # type: () -> str - """ - return the engine type used ib the serving service - :return: engine type (str). example: triton, ovms, kfserving - """ - return self._engine_type - - def serialize(self, force=False): - # type: (bool) -> None - """ - Serialize current service state to the Task - - :param force: If True synchronize an aborted/completed Task - """ - if force and self._task.status not in (Task.TaskStatusEnum.created, Task.TaskStatusEnum.in_progress): - self._task.mark_started(force=True) - - self._serialize() - - def triton_model_service_update_step(self, model_repository_folder=None, verbose=True): - # type: (Optional[str], bool) -> None - - # check if something changed since last time - if not self.update(force=self._last_update_step is None): - return - - self._last_update_step = time() - - if not model_repository_folder: - model_repository_folder = '/models/' - - if verbose: - print('Updating local model folder: {}'.format(model_repository_folder)) - - for url, endpoint in self.get_endpoints().items(): - folder = Path(model_repository_folder) / url - folder.mkdir(parents=True, exist_ok=True) - with open((folder / 'config.pbtxt').as_posix(), 'wt') as f: - f.write(endpoint.model_config_blob) - - # download model versions - for version, model_id in self.get_endpoint_version_model_id(serving_url=url).items(): - model_folder = folder / str(version) - - model_folder.mkdir(parents=True, exist_ok=True) - model = None - # noinspection PyBroadException - try: - model = InputModel(model_id) - local_path = model.get_local_copy() - except Exception: - local_path = None - if not local_path: - print("Error retrieving model ID {} []".format(model_id, model.url if model else '')) - continue - - local_path = Path(local_path) - - if verbose: - print('Update model v{} in {}'.format(version, model_folder)) - - # if this is a folder copy every and delete the temp folder - if local_path.is_dir(): - # we assume we have a `tensorflow.savedmodel` folder - model_folder /= 'model.savedmodel' - model_folder.mkdir(parents=True, exist_ok=True) - # rename to old - old_folder = None - if model_folder.exists(): - old_folder = model_folder.parent / '.old.{}'.format(model_folder.name) - model_folder.replace(old_folder) - if verbose: - print('copy model into {}'.format(model_folder)) - shutil.copytree( - local_path.as_posix(), model_folder.as_posix(), symlinks=False, - ) - if old_folder: - shutil.rmtree(path=old_folder.as_posix()) - # delete temp folder - shutil.rmtree(local_path.as_posix()) - else: - # single file should be moved - target_path = model_folder / local_path.name - old_file = None - if target_path.exists(): - old_file = target_path.parent / '.old.{}'.format(target_path.name) - target_path.replace(old_file) - shutil.move(local_path.as_posix(), target_path.as_posix()) - if old_file: - old_file.unlink() - - def __state_hash(self): - # type: () -> int - """ - Return Hash of the internal state (use only for in process comparison - :return: hash int - """ - return hash(json.dumps( - [self._current_serving_endpoints, {k: v.as_dict() for k, v in self._endpoints.items()}], - sort_keys=True)) diff --git a/clearml_serving/triton_helper.py b/clearml_serving/triton_helper.py deleted file mode 100644 index c878541..0000000 --- a/clearml_serving/triton_helper.py +++ /dev/null @@ -1,219 +0,0 @@ -import re -import subprocess -from argparse import ArgumentParser -from time import time -from typing import Optional - -from pathlib2 import Path - -from clearml import Task, Logger -from clearml.backend_api.utils import get_http_session_with_retry -from clearml_serving.serving_service import ServingService - - -class TritonHelper(object): - _metric_line_parsing = r"(\w+){(gpu_uuid=\"[\w\W]*\",)?model=\"(\w+)\",\s*version=\"(\d+)\"}\s*([0-9.]*)" - _default_metrics_port = 8002 - - def __init__( - self, - args, # Any - task, # type: Task - serving_id, # type: str - metric_host=None, # type: Optional[str] - metric_port=None, # type: int - ): - # type: (...) -> None - self._http_session = get_http_session_with_retry() - self.args = dict(**args.__dict__) if args else {} - self.task = task - self.serving_id = serving_id - self.metric_host = metric_host or '0.0.0.0' - self.metric_port = metric_port or self._default_metrics_port - self._parse_metric = re.compile(self._metric_line_parsing) - self._timestamp = time() - print('String Triton Helper service\n{}\n'.format(self.args)) - - def report_metrics(self, remote_logger): - # type: (Optional[Logger]) -> bool - # iterations are seconds from start - iteration = int(time() - self._timestamp) - - report_msg = "reporting metrics: relative time {} sec".format(iteration) - self.task.get_logger().report_text(report_msg) - if remote_logger: - remote_logger.report_text(report_msg) - - # noinspection PyBroadException - try: - request = self._http_session.get('http://{}:{}/metrics'.format( - self.metric_host, self.metric_port)) - if not request.ok: - return False - content = request.content.decode().split('\n') - except Exception: - return False - - for line in content: - line = line.strip() - if not line or line.startswith('#'): - continue - # noinspection PyBroadException - try: - metric, gpu_uuid, variant, version, value = self._parse_metric.match(line).groups() - value = float(value) - except Exception: - continue - self.task.get_logger().report_scalar( - title=metric, - series='{}.v{}'.format(variant, version), - iteration=iteration, - value=value - ) - # on the remote logger we add our own Task ID (unique ID), - # to support multiple servers reporting to the same service controller - if remote_logger: - remote_logger.report_scalar( - title=metric, - series='{}.v{}.{}'.format(variant, version, self.task.id), - iteration=iteration, - value=value - ) - - def maintenance_daemon( - self, - local_model_repo='/models', # type: str - update_frequency_sec=60.0, # type: float - metric_frequency_sec=60.0 # type: float - ): - # type: (...) -> None - - Path(local_model_repo).mkdir(parents=True, exist_ok=True) - - a_service = ServingService(task_id=self.serving_id) - a_service.triton_model_service_update_step(model_repository_folder=local_model_repo) - - # noinspection PyProtectedMember - remote_logger = a_service._task.get_logger() - - # todo: log triton server outputs when running locally - - # we assume we can run the triton server - cmd = [ - 'tritonserver', - '--model-control-mode=poll', - '--model-repository={}'.format(local_model_repo), - '--repository-poll-secs={}'.format(update_frequency_sec), - '--metrics-port={}'.format(self._default_metrics_port), - '--allow-metrics=true', - '--allow-gpu-metrics=true', - ] - for k, v in self.args.items(): - if not v or not str(k).startswith('t_'): - continue - cmd.append('--{}={}'.format(k, v)) - - print('Starting server: {}'.format(cmd)) - try: - proc = subprocess.Popen(cmd) - except FileNotFoundError: - raise ValueError( - "Triton Server Engine (tritonserver) could not be found!\n" - "Verify you running inside the `nvcr.io/nvidia/tritonserver` docker container") - base_freq = min(update_frequency_sec, metric_frequency_sec) - metric_tic = update_tic = time() - while True: - try: - error_code = proc.wait(timeout=base_freq) - if error_code == 0: - print("triton-server process ended with error code {}".format(error_code)) - return - raise ValueError("triton-server process ended with error code {}".format(error_code)) - except subprocess.TimeoutExpired: - pass - pass - - # update models - if time() - update_tic > update_frequency_sec: - a_service.triton_model_service_update_step(model_repository_folder=local_model_repo) - update_tic = time() - - # update stats - if time() - metric_tic > metric_frequency_sec: - metric_tic = time() - self.report_metrics(remote_logger) - - -def main(): - title = 'clearml-serving - Nvidia Triton Engine Helper' - print(title) - parser = ArgumentParser(prog='clearml-serving', description=title) - parser.add_argument( - '--serving-id', default=None, type=str, required=True, - help='Specify main serving service Task ID') - parser.add_argument( - '--project', default='serving', type=str, - help='Optional specify project for the serving engine Task') - parser.add_argument( - '--name', default='nvidia-triton', type=str, - help='Optional specify task name for the serving engine Task') - parser.add_argument( - '--update-frequency', default=10, type=float, - help='Model update frequency in minutes') - parser.add_argument( - '--metric-frequency', default=1, type=float, - help='Metric reporting update frequency in minutes') - parser.add_argument( - '--t-http-port', type=str, help=' The port for the server to listen on for HTTP requests') - parser.add_argument( - '--t-http-thread-count', type=str, help=' Number of threads handling HTTP requests') - parser.add_argument( - '--t-allow-grpc', type=str, help=' Allow the server to listen for GRPC requests') - parser.add_argument( - '--t-grpc-port', type=str, help=' The port for the server to listen on for GRPC requests') - parser.add_argument( - '--t-grpc-infer-allocation-pool-size', type=str, - help=' The maximum number of inference request/response objects that remain ' - 'allocated for reuse. As long as the number of in-flight requests doesn\'t exceed ' - 'this value there will be no allocation/deallocation of request/response objects') - parser.add_argument( - '--t-pinned-memory-pool-byte-size', type=str, - help=' The total byte size that can be allocated as pinned system ' - 'memory. If GPU support is enabled, the server will allocate pinned ' - 'system memory to accelerate data transfer between host and devices ' - 'until it exceeds the specified byte size. This option will not affect ' - 'the allocation conducted by the backend frameworks. Default is 256 MB') - parser.add_argument( - '--t-cuda-memory-pool-byte-size', type=str, - help='<:> The total byte size that can be allocated as CUDA memory for ' - 'the GPU device. If GPU support is enabled, the server will allocate ' - 'CUDA memory to minimize data transfer between host and devices ' - 'until it exceeds the specified byte size. This option will not affect ' - 'the allocation conducted by the backend frameworks. The argument ' - 'should be 2 integers separated by colons in the format :. This option can be used multiple times, but only ' - 'once per GPU device. Subsequent uses will overwrite previous uses for ' - 'the same GPU device. Default is 64 MB') - parser.add_argument( - '--t-min-supported-compute-capability', type=str, - help=' The minimum supported CUDA compute capability. GPUs that ' - 'don\'t support this compute capability will not be used by the server') - parser.add_argument( - '--t-buffer-manager-thread-count', type=str, - help=' The number of threads used to accelerate copies and other' - 'operations required to manage input and output tensor contents.' - 'Default is 0') - - args = parser.parse_args() - task = Task.init(project_name=args.project, task_name=args.name, task_type=Task.TaskTypes.inference) - helper = TritonHelper(args, task, serving_id=args.serving_id) - # this function will never end - helper.maintenance_daemon( - local_model_repo='/models', - update_frequency_sec=args.update_frequency*60.0, - metric_frequency_sec=args.metric_frequency*60.0, - ) - - -if __name__ == '__main__': - main() diff --git a/clearml_serving/version.py b/clearml_serving/version.py index 80eb7f9..e4e49b3 100644 --- a/clearml_serving/version.py +++ b/clearml_serving/version.py @@ -1 +1 @@ -__version__ = '0.3.3' +__version__ = '0.9.0' diff --git a/docs/design_diagram.png b/docs/design_diagram.png new file mode 100644 index 0000000000000000000000000000000000000000..e75c0ec7f2d0756338d59362e1b9758e42e0e7d9 GIT binary patch literal 340024 zcmeFZWmptw`#wxaI7p}<0#X(N0xFD1hbT%&cZ+mNcVnS|C?H*u12c5D0n$BmNW;)Q zFwFlRcYnL?x=$R(`{n)ceBl@-@4Vu?&hvVy_(+-@LJz^i!y|wAKuQ@8k7NoD?}Q>L zF?i;d(j`GWJQlo%QWC0e`lzv!)o;|WSggV~VUuf)x1_FJu}FOIPUp(otC2FnLXoSl zsw(5=C3xk{m3wr1q+;35#GGW8jIj_$VS>{H1sVLF>YV|M9oM@PaOjlQN&V{9NKcObRyuKdMBt|Ke?U{^2J`r|t&K z-1u(>K-IW)>OZX!ZW*Z}@L0VbgkK>3FLnT5(k=17dD{ueGb}-rCnG-nHvtsv5` zTW=(@S?Ee3(yv;pJ%peTp1R;Q8_yK>c<=FQ_uW1fg(0>iug67Zou&OSjgj3EJ3FDZ z>LRGka9^mv-1+71wC7OsE|%c5f`^KeQ98B-R`80eo60e_Ost2CEx_+q;YS)1V2`%K zY7SScc3U_RgR}--Mr5?y#bVf_-W=8J$5p7p5JvlY{DC4f1)+uHz|wj#qGVL_-VnwVg14-PHHgPc!BxP z<@JAj=EF_nXXF>|bd7s$wZHF3#(T2AKBZpggCQ6>+nD(|dh>pPCJxdUk_qV9NjxWpIko+$eTk#?h>6Gk@ zO*f1z=woD5I+MgE^u0Fj9jLX6 zLX<2c>O>Dht(7jGNR4Jt%!Gc@GnuCpUKcO>_$lIwEUE;lUleWNQ&A5UK`*4aG!W4~ z*%lOXT2P$qq$l5Any9N|pf0o>6Xd@pJwe`(u~ygnNQf&9GxOxnse?!28Nd+oUTJs# z$r6!D7!DPhl+Gu5Ryt33j~IuEj8={uqKXvSc&~jF*lOd=1IOQXY<~foc5>=&`Az@3 zC+_ZdBnZXjoXgU49djm-nC$%yE*rv;e zyNpwAKTkQF95{&~d)fuUpN4_B{Ot^p`3^y4V@9DoE1W##rVC^mn~=(XOkyO^0&ndXJX>b^eMsfvZnxtMSJhUBkbETX87M~UhVznY*-MT~ zxmrbEi%;FZI{b|-$r^Xj@QU)gZjtR8bdQPJwZnW4HfKvs zTF;endykab=xL@Y&t|thJ389x)cdd!M_@-JuLe^u8FQT&19wHtHCH`P`wG+@*n7F< zGfkF`^BvBC=lFUxN7&_(M7=992lGkr2@X!*gW2dNU0)%=AdMtu-=n9l-j&an)+QSQ zZR-8-SNmcMhRf_G;z_4yYDvtNEY+(`zdakG=B8k{-S#z1a7@zOASoL{!*N+8 zDo0*;y`J#xifLz}O?xaT+{a0QXhopM=z1vwkEs7YD==d(ziBFVb zx_!3!Vt?IqXRzs7uCfbMKRsPQlzP{gsJTIFBiNa zt5MF+vIEqyk9Je zizT6DE$}cp-mgSx?Z@K&Pq^EHG{RveW6*WUc9QCEs~S0L%g3C>It7pKHR{d4!fSxx zzgZSS+qgW=zTIa?mS>Dm9x8|(m{!+hIwi0-?r9fy%jDIFZj;guA$jb>zT#(xW>s`kQWF8p;0t z^$q{{d4XtR@y|M`m)j`rQNCV6B9gLEUe%>fr||!#DB#1$9=*%a~32p znT{)=4kCY=6#>pJm!l>rpuCU0pwqqm3pu;Wwrbt7i`RC`Tt!1U^1wgIdnO zV=>(Yspncj_f?xjzhSQV?WHmAHV_nOf=I^Be%-*ba5+mFNFbodMk4$kcOYTZqX!DADKGhqWF%mSq+hl5Rxx zkZ!fNlJZHd>9WyRPxYk*dgfU$#Xn6%Y4h&*0Any(e`!LshIL@`lF^=Yk}SFDx;!D& zV;2>(&X;5QJ?X5sD72n+gcusM@_FMYA0*C#1*^QZmgxB9&sDynDt=`E^V%UY6^ZKa zNtdNPuzYnX2zgza=rzYn)ChMK>bvQPC6^NsH$;eLvMq*}t?-R0Gv}Q*NWLObkAaUJ z=`8YrXH=KmdNbtTj?ws8J@d}Z=<4eH+II*U!}$4g!pj@a@#v!n92lqL9XkptotG=2 z(KN+y-HKZd58BHIUb|1E-gR4nEh}SH3w80aDbJafZfnGN)+EWYRtJa^1vF$h%*CcN zT%Stp2!xs4EAj~3lQpIPoGiiMfdN+=M!Vh8%wKBS zg2`yLL1=n$4Ynz1pNEPQ+Z)tTnc^R^%bHMs^6qyj&9@aEKg^8B)lQTPbD5G zyX}>eRQpW=nJsa-TR!ld>fAWL+N>Xc9;FT3n;C8oaqQ17CsJXFp{*D!>MFkZeN#s!GqfU(L4NV}ft}8xLJ7 zJ}(-TBSud>tFz5=UQOx&-+oRx!hibGUlO@5tRPChkaOYe&rvc}m)uh^M^%YZ9&f!q z6&h($>_S@DFwXHW#s=JpwAFY)YYn+e^N~1kGfeFG&Kc!QPa6n^Cf4$e)kKDTTvQMJ zhQxE@P=T+(;T+zGh^dB*k`$V@!;F#-YaqF3?gSC3;m}i}UC70L>TS-^PH@o-`=bdI zkax;!0_GNC<{&9F*TNY1;q@sc32;5a3#tKv2^nTH!L<4>P^mYL>adtK_2=Q41pO6` z^Obw5yV@yF^OYsGA9XJm!nFNc((8av&w#E3t`xR%bJ` zHaKoB#B^_XZ4ut;dD@*!A$BeO8n|`tz>iUX_=Z3hb5|4ePrAx)C!e*+#OI>_oSIJ4 zw@nxRMs%-x1z0WnD^nOlqMD>{z}j8nb89G*n(c+IOYC*>_$J59#u_Es$Ok?zE)Hha zOqCEtGqp?g`q8L`uD5sABT+7Sm{pS0g~Tu>kzw-sR}`yiL{C7h)hgvGK=P9Z0nx-w zfO))Da=iGH@q_Rr*UJ=*d)ucMc8a1nkCd=jk&sJ4c1U;v2xz+*>m&k+Z(9vqhbg8= zw!tmGW|&eP;uAxz0;@AFX%N-OcCepuYpQ{+a)aW6Q`c?Jm6B-rMByQ#R}C5V4^!Yg zAxt78qUxjwt@VbX0l`E9)%Hg9Rwq*5t#-BI%4<+#E&Nhq@{hfZkSUsCD`Lmob9$kA zf!6|flS;x&*O@(01#ia`1ZJrjpq8qmvfXlSkz9LR?^vrv%h0t-G=xy{fNt?O3!N3? zy{^{wQPqvpBX)Hn2|DlJ+N6;P6yA|5D6OmRUonJmZaJM6JJ zx#t9*p)~J-#h-=`JQC*ykimS<*o^j{R?x;Ym8L7~`MGIjoTF4ZycEv2>IhhK&Jj}ErT-a|BA*UizpvO~5M}&PoWwBE`Zr6cpqUOT@K{o@F5T zuzmP^@|1hcnpg<^=M>M$5s;Z$BnY{3Thk6w{%LW*qh=X!*mJA7oFIQb?H@n@;Vb!C zNLl(WFCj71V35^oJ9kZ0vUf7~VY@Op{B8nD`$<|?k#2>CyT! zE*}VJ@QVw0VDU~(S>%s{*M~pql-UxQ+FR}Bx0J$WACst^+;>@?W+hpJ5QAd)+C z8@m^veI;MZi4*rTw5DT45^c#qHu`$aaOZ`g7(in9`HP0iH2o_?jd5wj69(qbU#k)q z48?j`kNUDz$buFd@NYJV^4pHyQTD057jo{l@z>W*c1i_y2!ECaiPJd_DI8~GEXS%` zdDbyGJs6L15Io!ay+Ut|d#+XZv(tZYZigbJDi;hoRg(lL|7t!?f)$m!=1B3A`H-X{ zeKs4L4%4bID$WPl2aDM8{`4h1mGE?B^vhV8(8Hmgr7)jRq|+G0jrsxyb#Ezsd~(S% ztG=wM#u9Mvdj|4!<|$HQVmY!Ca)}sKZO7M=tm_u`ri0gcYWE+1d|iglIgaKr7+*e0}?=^YHxz}Gnv&kGnV#1=>y5<$YjhYZ%Vs!@o!^<@_<*#l;~@+sm&kkdvl z8Hc}JN%Y>;S{^Dgo6)7emONh}7juhFpzcu*Oanm>sH-{$a3S>S5r=@-lV++t3?gO? zbGYY<;Plg=T8raI$|8V}|9Mz(XZQ+Oaox%|U9vwmLi`aKpWXNy5YDE8nQqdQE?Qld;*wadxfMEz=>iF=!ol$*T5v0p7?@b|v!FRHEGt zo2)^U`qH1aOYVPr$3{yUAL#w?N)R&Q6o`x+ zsuo4LwTrJ!&3<%fYjpcq6gJ?Z;qElihxOhaV%sc|bd4O^;~GChv>4CZX4CYV&X~Tf z0PKXTW~3i>35JlH(2KFw_S9G7_Bx!4vl~6b_;T2NKEVv2QX$s2ywOfpAfLQi)n4(Z z4F!+l4Kk0Q{^H1f6AewsmZfSxRXS z*bgS-x~(WPJp#a@*^HDziM|9|1g&Rh|44vOpuvG)9Fj0{&VLm!{zAV_s3Z+Lp&pyf zaOx;^FN|UeoM?wC5W@G0$osDZvKr%ZUEVrQ4LGXQ9kYWRLwQX+)^p5Z>x`m02I$gF zE}@hRx>X82`qzGOKyPA|3a?+d_H+mpPjg#SyeKPwIz=>_2>=v80V)v)RGByWgqpm-VqrDn50dP06ww_0s zTVU<1*z*CFEB%Bd@^IkPjG+pLXQ+p$8es?=TIdZjB>03!#-p|4hlGxOkwU@BXFubz`YHxEn>Gj6(Mx<#py% ze}{6(rp)^Q$ien3m&PW@$E`!)j*ot_a8%;SngC)9iHr{XDbS@#^fR^P$X`z%EPzE}_G5woY`$3Zglt415~UuVouNF=p`%}7*J;S4J6 z6@cHAH8J~EcSVPDu^s`3ZoNL)@OJqH_X?A@|InF6D+@9%vB|iB7f8IMNk2C79HncB z!wRmE?_WYzMjbm#K#I=OL(Vm}0r2AyknB{pS7w;d_E8#~Gg$8F{BZM#^;Y&8<+18rV68@H(>26?K>+;lo<@W|hJrX8hZp$MZ7l|v$$|{~7X7v95gH9$ zBLGU9jkb2vZ#V29%hae~W-^^ol;LW@5=CtQRh_KW=;y=x+!>{?cN|nuV_Gc+>{tdq z2TVQh)dgiA$YdbzZis_uWGV8s6Qe?>mEa1d!|`to4{$L zn=TVcU+ zWhf*uowPYQ)!WZ~w2Q+K8N9kdwL>F?wR>wHpp+GK)v+e1xPWRy3o9&Io_}nnQE7rY zo@2k?QK?T|m(|OuD>q^at-DV*UHL4@_jpLPJ2jt6N7Bu^Y{5hInZEq8r!RUjlPJK& z_e~CQ@)c5Qmc!CKNw)-p2}&Hezbvynkf*n=4^!tFsdg{&T=_llJGXm|#-i@!SvGC# zF=i!%r}o7K>4_xzndDBX)ClZiyk`H^N|zPO)@UAAUr`(am~n+z*`~Wy=2|ndNFP@~ zdG3aTKX#r^&bu8S9psWEaqx|>{%p`+f`TKDQdvmH#Xp}ly#1s({6mT&h*ov+#!78; zU`lDvA^}XQj?lNkp_;RkC67u7>KAr~0)f-yv20&*(gxrGSuXzCUVocw`{b$Hc~!Z1 zkF36O%4w)D(;T-Yii|{hg_ign%|;vOC(BhdBA4M+9lN8B3NYP@LN1-sw&L3xHf3X` zu_qA*B;@onp@^Ps$S^hO;t?c{@4SS``@lS-PtV>uzv{1>Y<6H`*y2i;;}Q}ZI<6@zvOj&JXQ=uA zxB$pJ9VEHvOIvMTVUKzl`>5Jdq@O>?|Ab5pl(`rYF~cC!FqkDG8>r;A*zb~ zB4W})t+GQn#%vH_AY|Q2B`PRhi}Nje_Xsch9L&=0UAGI#ILz?B2kIC+C%I)aI9jL*gw+A9C^=%ZTn5^T4TjE8zq z&o_!Fn()D}((S)iRAs`!B|KTFuN)yGhz?pa3_a;dIcWZ$%v#FFSw zrOmPguxM{i@Xxh zq+$|KXwn+%Lw;M$q6;rWHEy+vIQRsK#^qs+zyj6w)Ej|1$}4C8a-;AFMwI=id~}u^ zZGZBBuUL}Le1YuqeG=&($Wpqb@`29!9{Iud;B~WUQ^g7szzEnQZf5Uaa?p%2H8F{=0*$d7O+lAyo=$(&(W`B7KBU|DCQ5w|<7LVbgmk{HRropbCbA9fw&F)f zVP7<*Q*3^ENUERz8977L>(s$uxdv`rRigsq-gfd3+?4}wHK-8Ina-iW)SdhA zEmwJgzN@>&)3s{jD+8G@F<4UP?}3z}gOL-Iwg zOKBIBgDTc8MM@zfPC(3QESI;kO(0&q^Xe(6Qp?zz@U^doy+t`@TC%6DVNuysKA`Gk zo99A~>hZ=Ejv%KZ9?YXb>bX$6EFH$Q(&(nfsFY6Do`c+^HcN0;Z=03v5g-})$yfuk zeMQ2Ls`m2e74(b0xKMzOgp$4nb&2*5dn+LBFNGm$1*PGkt$)4bRO0oosS-2)*rz{U zy>-c}fP(X4#D<>0iEtE#I=A!)m;`0t2dm zt7csHk|SoJRs&L`KN%F%@WiZqw_8JuY3UjxYZCqCaav6O{bf}!kh}Nknmj) zcA42_F4Fyx(EUE1U@G9tPS98V{VV+Hu_xPNp{<~XT9x;p)OzSK2p~&Anl-Y}orWti zy|s~v=C(2L-iZVWEV0Z__x49d+9pcZa-Kf69R3d-g1pJe`NdwK=ATFA#wn&C$v0RkTwDb3AC0Ak<*`n)Ndp3 zUk9axPen5i{sO4|ZEJu3*PR<&=XpxJ=&*l&{pYWsN(4^BE$YI_zb(N({L9h_vVyH6 z{K?JJf1J9%Nfp4M5ta@8`!4>&0*23nA-Eso*&qBmykFivh2uz> zeS7=s@P1ybAFpoef+2J=<8lAQw)y2vzkd&lFn~$cuM3>{FIFrOT=)3{!ur{tlDR+D z{%@Brhz-#B60b=A=?DJhPB%M(Aqe~8m3#fNn7@4He~0(S!T8_d{b9@gcX)qXzyDd@ zA9mt@miLDZ{-5Rj;oANeHv9-G{uegjf{Ooz4gU)p{>;Dr7dHI=Eo_LXJt{D68h+2N zlKb%8)lvYli~yE1?H*EUji#ygi*v0UgJUh~>^{JAuj^n(3Qc53YZrM7+_y~kYD>39 zzM{QT_cl$iIOzg_)lAtEoFaBsMv#_$S#P~5FNyq{B;=23Clz)1W(%jy_ChyzpH7WO z31IlP6Kyn22Rwb^CJ~}k{ZAibM70cKua%#A_O>=@ zgW&i{;K_hBexSmb>r7%ECZt)>?;|wZ+E^OMvkIZ*>CmKN@)Dilh_l9YiWwgtANb@< z>?|Y80NIfG{PnZd6gFuX%*~^crFkjj$^@B@Xh5ez?!@wkhEB`E`F7v&*7xkj4E(mQ zy`oRl@dh-J`Or%FYMfZh#D;DVk_FTq1H^ijH>Wlfs8T3EsVLPackth(fHKKAn77*E zN<}MRIux+peWv+)C3^tCUxxtLzJz6;PQubDF&MNdS7asc2RXc#ir`GTU0 zb$^c9Tkm4Tgn|(k(Dcd#qsKM6$_*O`$_&=y(G~7nbD->BOwhDF0)z}rfUcVPVSelI zQIe?0++og%8=kw?dx?|_-XF3JKyk=7?#|Q4mq)i7GAV^Xanwf8WqbJ;12BytKwd1% z;Y&7?9^tbd?Aj4IUw;4jXPn>#oD@4E;)f^7vOzh)W-z}&B(#7)sphIHc`2#wSx&=# zZyeF^JCpPw5~k5f)c7rSHDrR@gX;NiztvJ;Vgq1#d=ZJCfO{QmX4vh!7&0-JhnH>@0We$&^$!Yh|JMu0#GC{Vt6cbNYhWFXuZzy}Aeon?zOI7-+6V6_WP z1)bGh@Bl1To8IFmpS}f3nBTauLrCzrz5I>ZxKMclT=uYO7)b)kN^Y$Al6{l61x@XB zu(iG-`!H;cq^uhBLGhdMEwxjrX6z(;oO+eTaozE`7IQ_CioTK*8(Nn3u9)#jk-@*FZQGl zWXLnYl#xCdCOMfU>d(?qHRIX=FfO~9L4+@^}|N4euWVLF|7KlvX&dUQ_L6;mzLEOS3 zbfP7S%Sx}(sX!_1zKQhn1)_fg(>%e!KGQSt>j2%)S5B81>en!^0Spa-6JR3Ebog9l z-T6j$ad)gk_bhF|kBH!(|11KmDRchI04wPQ-=mFPKp7t))#a;kql zFscVQ*iVF~ltN;1$SkR3yvB2+F_;Vo$g*B|dlz>WhgiFS9>Z#^vg8dN?^Pi|DV~2h zO|bLR;K0(c%?AOlXd*B>-@q4{fcgaSZ+qzv!1;bqbTb50p4NXpX8yV~ICm+BwHYeB zvWnOk4=FL&-Ut{uG4U=_4DKtEU?3aGVFJ8~llP;&y2*;2!v##5LCA51^-$q1_+TaG z$TRy`8?GyWlMIv&^F$gi#`4>Z>rN_dA0E&Kib>buq(}l=nj^%A%otBhu6J5kbc>m! zx-Vu&!(ZO{`b0q8-u%XzT8`?s+t!1V9gJQ)1D)N7Z!$=FFOkUU_kfu;Hlo^bdh|Xr zdtM3g1>LzPhOW6~(o*h8k3j>&&b)Q!*jX=W^*L4`(x4=4O`2l_rr^VDlVqK=;2G1h z)o{Cl2VBF$y*tZr980UH)MliT#{_#N$~ocVUEChBp1_G%TDt2%DJOU^PVd(h_dhyw z2PjxuXM7YrAK4E+!7X`HY$K30W_QPJuD(_!dOoXLrF*8`gNuHb89t#ebodM zpvs26%vXu;eqIn)65}+)0P^DWv#57?l|j9ntM9lFxa7Jrh&iSpjj(qiX&H&I z198A3_6xg*XaiTE76r7?iP5`>w&JM3@kzGJN-r2O<3MdV2_GqQ+U*x1Bi_&Uk=gO$ zctBYyMMxB|_RSJ#li0~!UNC)5K3>ueL}cFTN&)8-vhkOMOCigLrZw z#&RGxrs^v5C@0}oUD#f1ZAf3)q)Yc9kuO|1{S`yonPsIgqXA>P!w#zmc*i2AlvvAAc|QM9slm+?6ckU&_zF-@WGQ`gaPV7C4?=du{)z zMYjg$?PYWYFpu2ZWY5M6N<%0HOWqJvgS(R9RXZubp_(<>n5}w9f2x<}B73zgb$(ME ze*E?K3dKoD&o_f_Mt)6laGamCrD>I%CH?J}@FlOX;OIY+;a_(?~;ZvJ3?|h@YxQ>x^ zo(w1rlhTfo|35AO)AexF`Rqi_K@x*VR*&$z_W5DN64$^YFA1ZC7oZ|ZK7LB}LRinb zq$3bDsZQSUVUe`gsjCr_-hO%{Dro0aH#6Ni6_&QFLiAj-sas%MKOc+M*99Sk{|e?% zu(nR05dq{&rH{Y7A_#;(wt*q3SFOU;oS+d4Yu&L>tIC#!R>kRXl^}g5CWeRyAS-OS8Kk_H3Iq+WVO4a>-6ABa-FD)CMAcGhQ>OE0pk_PO6l!EOB#Ep>RXyfeSqpu9f1p|27Sr#lQDDg)}% zUGSUdKFr21RZO?%J~F|onua&U_UCDJ;+S(u?-rJK5cjZ4VZFe~_0D|KY?9{-)Gf*| z>r7-ku)hC%bfeLXCG5gD|18}1fGC` z=l!H?GC5F+kjm?%@5+ZIzh@w?7zSbJyF^YmVfn@$Z&~$r*8SR5=gNN005!G<^7XDm zpu~1Haw?R$(2(x|#0yUN9)>f6 zcaW)5ZI$hA}ox9G3)zYSIXTyP#HeSY$v(z zB!;mzc;c^BWExXD_~E3%6B%5G2d%4JsrSfwt=(F>&4%8j2>oSOa9%LnnIOq~H^zB& zbSTmH*j)(twj#K%({D9de{+1y2o{Js%z@rFE5W|;ece2twZC-)$W0|i) z4JSdt&i^r07hW2WmYiv%4dic@wet#cN_# zF@R8K%%PRb1!&mQoh=lEH-dInf#>X5e6RB8q1A--=zahhcRtX6wD=rtNW%bA?zT%e z&gF7Y;?kv-_w3Ux5z8?XCeeD<9q|I{H6G3kLN2*b%f4%zhYFdmt|sH(C#UDjFB|uO znUg%W`!&pAX(Gbu_{z>zUp?cz(8bz>h-&@Of~1)UpDR6;ZoOq&&~-I_J>tX3Br@0P zAicn22j}$$l99DKtnDliyBpO|)>T;z;$q`cw^^mkFold5{0P=5p>bU3D1<6cV9dN)@u~CiBCwZ2^OTx9Z`op_Ux^}b&FuQ zo*c6dWCdk_#?qbx$h(lKY`lVYJCy=w!wK)b5}>U8C}Nl~DEQg`9}eAh8gSddm)qjx z)KR+;w!an1*L?}zu_?I3B<5&hmkqEo5AKq7zI*o+zkNS_g5W0D`&3V%&WN3{-1CQQ$t}vXECJVDi~F(g6~6F5U7j zzQzYMgsCI&s#QxsB=v_~3zwJo#_rW&Z9rtoJ4U9xN|yCq3WO{tryQ~Bj=_Jsz`*HC z;maF#Uzg-sjwVwR4ry*!{qdG7;^Jp-s;wLX=`^@7_HszM4ug*A<;XeD^iLQUFcV z7PtL`7lFkLyQmj7@T?AS=y=sd4bgxj~(xX<5u@?dzL>zmL8 zw@H8QAfyN|fxnnM($BQ(tkuWzi=c~0@Ne=Q|6JM+P^+j`eOzA#usAA>$G6UkF58){ zIxG4oHdfH_nredGa9ktkt-ctH!UDyG^-S~IQXt%@Fmwk1&Onu`oyLH0C5{kY3K~b? zx`zNxrpw9B3}cJ@KpAZ2@YP503jx!eV%X701>|e5yr=pgF>oc_k;lY?oIw3~-{h;5 zyU#1Vp|8{qy+BNhYZ{`#p`WI0OfaT zIifiQ%khhnC7MU6wcnwiyEt_y&c>10%OzkL*BqqbLf0UvnPlwl2+K^?|g zpnu^G!UFI_&yd+#TY3?2!d=$XfbSqm(e^z8>4z)tI~rn6mQi4Y^qLtMzr(g`2Hs+E zN#MLveI*cv0K{wy^^z^oKC_2@Yxw@u25{24ez20Eonz33K#z`MM~`<9SOt)Dx-=;X z0Xm>=K7)03e6JlAa6dIcP%`QcbbjxHwn6`b0#2e-x?xB?5V&3U1(e}YJgS{+U@cb9 z(_c?+pvt+By!Xix$je-Ko1vT{mDysDBdD?re#Pn01KK&zQDK~VaNfALJ`g09gP_$;bXasJ z^dAT;>}1!1)&OrxXJ9QkkV3{??DlVgjiJuTEal=`w`T=|)<>4O?Z?XrVrOB_g;1SJ zaLwI$(kGSEVKkwf>*{#08+t`)X>pQH?Uyy~AjAbKFS||Ddg~pm0YlZ?eqpZ&Y0PSM zwWQI#?lLZkSu~a8^wKyFyP+iE3|Q29=W}<4`l(kr*44TYbW05xej2-lzM4#xXlEsIk?rzTxlc6 z>z?$(^|xWLtDb@@^waSj>Kq$E>0q(-pu-LQR(R~w3Bd8D4HDbwQ(5)B&F5*5QjX3} z+6jjHlheDUK-Xwcob?*mFwmBRe^6piua(|hy^i={)@xsvgL37;z^v}tqUfsITX3yLSEAIP8F@J&Pw>Ix?p-_ zd7OFg(OjPMR3R1gJw77+gyWJEl2E+eeh15cfNh`vK+OF}&b=eODDZ$1tJ&aw4}l<1PEelH7NI|pEt4(<6E2AVoq z-7l}b?CtI*61Lf8?Gv+z#%T}vBeJvP=NABw-ey8n>{GQG=Gx;&O|(WIo>F)W(558b zUOD$Xg^3a?jj|^a_BO7}Syn6Vc~%HIQ~}53{bfZ%c(^8^C=M&$6TWt%b~;pOH2DLr zbq9B|#*Y>ci$1|<$l#7}mKeRb3J}E;@X@m*PiMCT%M8I9gXL(xF1xp8X81F4v?BcZ z{wh@Y4TbgJCqTPi$BaZ@U6WGSZTQXB`CC)(?;s8IB2DDiF9&sg6?#AKO@G_^gASoy zn$#wSM+1@#LxQ(;3ba>Id#nqJH!m@I0>%E^`)ZtmkDw z`<;b}3hZhRGK6YP^_F>%5E~VdOtH6;=0sJHS2X8g>@4aJog&|948m)d_G3lhh8S?* zUu~;Q6`8)~Y^J+-X^;8L5rQmT22Uzu-u!!X}CK}d0j#WYEc(FkBk zt9Ll05zE{1yqhJy+t#ikWQbaH4#R%3Xii)8(KcNu=*=>Z6Gax0FuBza!CC@{N3*1A zP+KXBi3{Ua_GJIUwMe?jXLQ%KS`QzMwi}7wiX- z*{T3$6Cr*FUaxj#5;~&ATang!BfGjXK)zj5Lo;`%8)C#$K1raG?E+m{HuJE$T5hUg zl~H8sdN>#8K#r$KicmC|LJTk=3ra+5B)@}JXXTTlo%G0D3+4<<&q$RpTadC8Rn^6E ziFod!nsY679;OA_R&U(jt0gA-=+H5_fswN%+Ao{%sq&OfdxLAbfSdGLxkS{{l4J;xZ%r+6k~zjfCK8!E!r^*XuL|55@n#t;4Ot>AX2qubR+767}GF))!rL1h%>F@EY}E0)LSc4YKA*E7)QR=AVv6T z`4&9diI>o5w?slY><37-b~I871N7Ug$3f`}Rowf?5kKE_vh}jkRv^n0Ubl6_GSMnK zh!QkS*Qkg9r5Z;pbnPRM_G8g;=QJU8CyNY~qVj{NyhbUwRL*e^waa)4U%jwC$C3(eDCiQb55K zlblY>H(0-?QM6Ga=SScU6>+6!NtqYcuZ!48`&fg(r^2;80!i^sYn#6HTJtdtYYi|n;rcwNmwK>X>kZAq> zfl;TD@~(R#Hu)e39SgIq?jeMaogf{Fw zMG z{ZwPHEiZX&mmePaXT=g?D3?&O1aqu6f<=GtE;7TWWT3|M*vjw4Fh~(cS>EZ|(QjWA z!n$|tEYG6<0c>xC>{IDuzs^QCUPl%D-gE$Fj^w2_paGtvp zZ@Oy9;@1H?vlrW)tGo@#F4?UfIDxb_UCE4?fgF0jL^tkoCO8tC*;*<36VqRt5drbI zTp&S!ffJ+y%Bfp~p>eaqxN&(V^V|zwr0+}K>8H65qppG94J0TItssUr>+%8-*>eI1 zHJdKX!;-~HYGrT$Ba7urX^G4eAk90?j%8kUgS1vTEq=dVJXcL@ zyD;n|23@!t$O*dUO%ixSMrhb0!u9$&sjtI?Ylw-=sr(YpMz-ulo_4Y&KT@Q@8(Pv* znGFUYQB}+VWD&TBp}u;emhSmaV{`Ps7@y>81ZUYEIYnOFSyKj8clMT)LE7ICsMja2 zZLtP*p66$ikrprdNYlu9L5<}>D+NALmo`>Uq9j(*mT0(^y&M-2>GX=^st-ECb;>%3 zzKP<6u7;VWCV4Dn-*!Q1?nTv7mH2q%TaY`E+h5aw>uN^7E0{L74B5ylMOw4s*lxQap2=XH`U`MeQ;zPs9?50h)oA>E!S)BggaqNC26l6hht57*cv? z@jGnu{sjIkM{M|q++lv+DcQ!cwgOR0;vgd{k$unV|2UpaK(GuEH|4qeup z?JV&*0NBf1OSYA2j!en(u~%F1u_Xv`Et64@rM#~h@I|&A7|#0VBeKiGGX2T`as*(q z8_NK=9nS1+bLApW9vbE_L(uIACJyi@u^%p&oahmiU|LItoI^7|P}BOC8Tk?DJ!p}q zQ}*oll>7{NkS0C-Cj!d@mKzp5m-&tEpS@SCdiD2s zAc7_N2KjazPtK zc$+9!)d}?8nw?!8rWC|3JXM1C?f$`#Tt5p?5Fb$5G>O^?UHuIerqW3Ul9ydjkk$re zj5B9WH*TFU(G|B+f?eH-1wF32f{6vQy^^%&Fme}Sv~#G_8)@C_FLZpQi8fJ)oXzi$ZQ$6}le+a&NT6tw1`&_qcF>~hU(Q!? z-=|B0WC%x*0<0RXfvUc$G?SAP?(>X7pmewOeF8{{b?v% zMcyeJ)a}4o=W4In^4ywjjUEA+voRs@seI5~1+IBR-{-&tW>8nV0RTuh-zv;2tW) zF9ci_1prOq+kLF^4=^(J#8rhvRFqM?pmPq8gI%}6^8rmpJ&zCO$CSe%ekI$<$DBH) z)@`CLfKnlfJ#xfVU`K5V=e`p#R)C_Q4ak$>-%W31sl7BIf`J(A_0S4PZ@bkN;sMac z2mt#OgV;n8dGQ_*%~Im`-{YuCx&5Ib=%eC9>~RPW(B?V1qvgD82ru@&^kI=9dXnrfP|CAvLs6I}(I zBJNxbDuW2^u}bF^U_zeC8tOZIy8>SZux|p;qXt=R<&L<#`|O?gAjaVqLLl8{kj+y~{sZh-`PK*Fbgj zdB4)|je9KTjiApci()}GXblP$_9L7q96M-i4U=n(@c|;to3dOuXH4|n=#*fWA&FW? z|Hmi*-dBp6za$`s+yMYGuGqqEpz_rE4roU+0`Q;}P@=f4Msbb(9RzAF(TWdx1#}O9 z7M6zqd$R^BiDLyU191LYB-{3=d4-=evv(iR!>?SuVgtG%b8&TNDlqda-mP1T2c4SX zht@P1SE|m5ge|q=y3&3y4lMvEul3e^#tq~*(JQjzgyGw-_fSK;Moqvx4|un~9AA=M zf5u>&4{q9~CQ|Izgx;8^ef z|2VqkM#_z>%$tl7*?Z+SqO3|}Rb)g)B(hh?j*=~jl(JX$R>{iVBYQ{o_&?vBbH3l- z=X1XQ>vEmzbUK}L-|zQpJfDyC3=4K#L;SUZW5U~G@Kv3%t-dG{7S&9ff3>3c>Up@H zj5SOpSO!$P?R#2JmiCR-B`9GRlsV#BKRoaJ68ClTyhTs`8XT5q2i@Y!kylcpSNi^3 zuHE=0(VTm06Ak*m?;-8yXZXSMhfe0kY-b&klS0Za%qDgw4x%%5^17?4&j0MOHa(!XCh73K zasT&<9>m{y4!i3@!0(_V2%DTb6lqTPX$*c*0VvhtHP=<|T(ekPIEMWTN((G3|Iq@F zlnC4NWu^BYAhFeFSYG${0m|Soz7Ad2v{}yPLT_7vqs`XxaIwVOT}L4PO1(+Ge7zdb zJ!;w%854r2o**$83H9oSKu_LHk!E;rCu>_Q~mxZ?|M=iZbc_j$7eGWADE# z8yR(IlOFbZW_mTh=Z1t4rN|>gU?3h(9o}QwEVOW{RGs|bSb6-Qz`Fn8hvm%98n9nq zxsh=CXIrdblVR{Csnr*MQ;u@uJk&YJ4G45e5CaL{4Lz-eFlEvHcLrrWioFs`=en5^YM6$Mj(R@*I_$7= zTo@fk3ZR>+!!bW(e>JlqLNMOwePDPE#@@xwp8K#e$oP0ji{)MKi76!0+U(liqrqzx z)6Ro)144U7Q!P{3)!PWL=xv`WVn2aLZ` z6He*GBtpZe-OrF+tp@;uVF|PKd$*K@9|B}gY&KdJ%HLDJc8yTIR_(r2*!$UB!~J?x z@F&G&{)3xcR>Yeo5CpOuOERrVADtR>eV2#J?WX6`Q_M_sZm9>t5prt0v*OvH8ax-% zDB{AAGBW`@ND;(cvW&l=texrijUBL^IqHmM z=8c~V{#2a}+J;EO!sri|ZWHaKlfoZ7eZ}Z61zjsLq1%<@Dp-x##LYUW*d*b2pRNJW2l)V}mpq)Mm&ySi zcMG9Ph7Wo|9{L}g8)mW=(8T*Vqupi!+jj}{4Ph;TNS@Ol!7AJ?y@cfwPj^$CTE%Bd z#o7Z-+Zy zZS%T>`&jlVU2hc0y(#*GA|_z6k=|#yw6hTSoK}9(A0h72`X18G()#qesp*xL(`Coi zX8VJCwM(go1<7=P)m1hP!e7deKegK;WHZz^%t`iC7wU&Oy=5Rg&gj0W;k^GusUYMR z)FTTr%t9y%;_Dx8b=y=OIBhX--iHYu%5*gC%=^1_7LYuJxShg zEpyTgudjpzM1neo$h-a=A+5NXbDx`ZhGwauHK!ZQ%@%ii+yeq z&c;=IT-BYqv7ISr)I@f?`;eAIyHKZMH^aEwn9edi&z58XHbYymS&r3o)ur1n8e_t( zyShC6hvV=stlz%o@(ZrBeyBNYk$kqd?Kdt|i@tb&GiREJ^&PHsH_N~zyJGX<-t4Ow zgNH$x^VvqSb7@fq$&VOrTxfsve)PRv3O?1XPPr}3w$I1UG(%2Ds7<2_meYR{(fEpv zIM*2_U$SDn`TAjMx@+3ZHTlxxto8SQ^*rLXj>&v^r(K3VI;Dn`r02#G>qv|HRa~e0 z^}gX$#o$*n?GB&r6UAM~POrh-IxfGZgI{|5^lWdcQLvBG4VLyrw_zH0nRp?~2@Y>S zgnHeJ95o8|y^gC}$f+Vw>=-)H_}h64vvr8+S>Eh54|*zx#V6}(--=4g#)Xn6&2UW; z?5aII)a-mG>rVX3|K*x~HAsO{UZJi(zwGJVYs~(alHw@5V%@7M^YCc9jFpZ4ou9?4 zpuAf`*Qwqnc2c|uiS|n`x^(HS;&rOHOG?+C3khGx;m)O9QYkxf_t^EUteSM}-STLv z*xGZiU=ClIbZrt(UXy|xYv7WRCg;Qm z=?acIs&nv6M*|M@xvArmbUxfx8>Wu92pr2>b&|kB!t&I@;+{Cr#hl#>s3C9CPJi$aTR9f$W z2tX29n6pjU!%nr(bo6IbDVKAQB=Hb*oeQXt?ZI804D^a3=;)Mjz>Dk(!Cynd5+9{X zMJ}1r%2g}LZAFfSkk!Pjhf4fjohnzW4>BPXZB*WMnr`c}0E7|AY&g^-50|@!Q!QGe1@utLiq&}*)Gxj+k^kCmjczc-&&ph7ZUz$ zg_eY!^F~Yd3@eSvg~VH{@|^g_F$r4$E?m8NCgIUzY@SI!^mJ3&PJ+i%6EKeaekRy& zbMfzfL-37+{b{Y|=)1;jiL-Z71J~oY|wrrwn@^JtsWTisE->3EZW z%J@$?cIm-~N#khwXj!WdrHQ12lwZOGiIdN0uRewh{(DiYdQ24d({9-Fc(I%ozJ>r=EeUsm2gJJ^>S9MsvqCy8M(&Rkn2KS6g5$wKzOm-4N^5Y!nSN=kM-8g7K(E`WLu1Xh4Q}g2U_{h6A z>eusWK4NB=+&8pnO4a>-!62SFoE(85xq#lW8Ce2>j<12gaSUvm*j?>#hSxOSXS9U+ zV!~f>J`(W}cMEK}Hyot06{RlmMREVRP5eL)PKs@sLMwDu?em6((uO4WEFDcPJHb7k zb)RAyjf&MPa*Va-IyTTzxH!t#R=1!u&Ra6Ezdigc8BXZRDzM32F*6Pk!8F^G$dUWw z^MLqyZkyu9)nqX$7AHaKFuINfTFe$M*2J1w{X27lpo!cf0;N%wV6whg%RN12Yt(LL z)aA>04eu_$R;Q`fYDcGsBmHAzPiyqQOIfH_gch5f2EDZCqJoI#!iy79=1izqdV4WjO~KqD3_X&OfESQhDpwd z&8=5xuI#POSZdy$Nl1EJ_~+E?oVrDK{k=Fi0=I%Qw7R$jY=*F0r zC&WP!b8C7|q~3CVFR#)!@bXUytoS9qON^1ZxB#{%d(Epj6Am&FAFR(+!0Vo?8lR%U z#s@hn%__D3L&306BEhG0oe7tL;Dv2Iz5w=*QV72-OCrRqeot0OYTIHxI_Af%i#`q>c{Sw zoI&lIo~TI_ZvY;i9JsW1!Qk5&uUm?jE4KLhV7j0D}0 zkd4cmgqZAOlgiMx;bjEg)-g%C_9kieNdj&2ve#{msu2u|KR$Z3<^<)%+F1Wmmax6+ zYhpS=7VXuERpXN{EqGU1@{$h`kgS)Z|LiM1r)JgwF%PlVt75eM8tgJ!2i9$3zIx)X zdco_nlxH}m!bXX&fu~HN-LWy_R9=MBPz+aT z+hy<7tbZcY7PyOUM3w0($NilUdfQQ%i}hUvwzI1(D8neZj&&>ie62Wxks zCs{7oml|LQd~2Wafc94+p=4?$W}Pm>Hb7#w%SeOcY~|zhvA$}lB#i$sBD`Bkw7ODY zPbqyeJl^K>7cg85J(O}YLyaCnM(bi~Bll@9e&zddbxc?Nn$Tk#MBhw9R#-P#7x0zp zWmCngh{>}m>d<%HSNF$m1^FJId3#hLPyeKIDxa~6nLN>5!nxkP(~8Z^b#e=DZk(I3 zx>VzOsVH^;Gc}jIX1Bj{Xp)d%PMg*IOOD<%LE>V9a-e{@tjzht{P29@a*Jc;F)8b~ z)Y5id6Hra=Q%}(%x|v?lAz!6IRN(X9UJibpcphgGq-l?t+_^$KMda4wZ5kF^#J!7Z z_VUJSE0j*U?8mn|bE)w=IiIMFMCU%>ydNYfvW-f@@tCnpx?$Eha94@ zGj7BkVKnWN2V0_z;}5(-n&_B}I&3N_h7$;FbG8M{qz=slc-}WYksfdwYX4=z$HUY_ z8ENw13{{RV=M4b@@xg%NN#)=piuEN$$`uzrI;~`fXV+zdsE_-s>=nJKcq;nVmajww z<9H=-x}N-7uD7Ge{_W7BF^rAutmgP0Y0)7 z`sFnxL8@lnwZ&1h`IqANEkcXj-yhhS$PK?9n^1P{sFU3qDKR)ECNge?FC0-~U#<3> zdE(D0{J+<&cj{Fye@p88QkwNz{NOE~uj{hgeN~O_vcUsGzT0tyK}>9R?aEoMFIJ_W zS5>V|$Vt^m@R;VS*AA%JF!a+I3v@)*V{o(YsX#e zV;yl9xK=h)AxHkACAmsP^98C$ugd}Sbs~r_s%C<1LO5N z@*T`Y%D11|!YiL!9t%Lhm22F(#pff|d-c=VE-a@{;)-ddc~IcfPz|l*D|`OAWS4k9 zqhkr4OE-ELB(OnfYPiD*dAAv{{6h_P*;*$+7K>jIV8uH|vufTLw)5T1JGK1vyPGB3 zts;`tF?*Ev4xM?zID-_ILPGPaV+$EsZ?PWh`hF(xl(+b=Xz#LG;oT#j zjhFetBkzI^8I@)@vvOTQ2hT$?hx&V|61BIT#39b1YfmvAZ$_$BsHXFDhZzs1(ju9N zPF!-{+-_}Vn$4NA`L@I4K?`97-$;tZ;^3-iL`Txz);mF5D28X2o6(wY|FdTp-h9K? zF$M}n)pe7=GYu9dVH_TNYs!Os&F5a)wbJm(PD}(XkBci&E+2@c{;rF`7}*5XyB^65 zI`~pEUE{I+ZHIeOUTXKF(d9Ig?Y4`vJZlfX#-?*FV;J*V80-2vwM?7ujHR6x>wvir z^W_?GrNO6_cg2Oa*N20frB3lWZ!6;ST?{z-A>u%0=Dr$y%=+8T5{>XaHrh7IW&KT` zZJmeAbKk91-jHe3B%6#I2Ywafka$e$1$9)S)xMb9JXfa<;oh2T!S?t(<3pd|b8fFE zu8h%`?WfYnmeYFL)jTWZm(iw;?7YnJA9n;+vw*n3s<(BRltTDvN<#d*8H+6{`HUHh z+R`fJlOv>0SA^?U_&JizMBTD!-I&~3+{0)-AGn*eb4w4HEPpATAtfE^z}XzpM@EUU z*Hbi^EPZ=byOmty+F=on3Bh0M1fwTrkUWlogv!`GY4V;EbmRI`3Ng0SQ5PzZ{}D z(aD0FXYXTbE-?7tD##X7v~|LjF~MNP<@~vY`!p4Lhqg*?Pu-|;j@;}kekbb};i}oI z*^Eej*P7ybl(oID=}S?bz<)tChjTZ%<6%H)$F%ypN$^ff4Zo%W;*xwEhFa5}pXn{Z z#FW}hENW>!y&dYsWN^eASV>~SOUQ3is3%06LKhL1w1|Pq&aFf0kWTiz&86wq#0Bli z6EJ)yRCM6GTzT(-Yfb#~Rjak6b)(hB`r@c7WkYCZjNr&si|y&=Ejrtlj*;LZc=Q$J7c{{F(C$)B9TYM+cBYt_+W405Mq48V$MM15{ zw7-sBcs2X<7XJ~^E;CxJh$Stc_mibIM+?t#0xkn)kPJbUGL+}6hUmT!}2&km(bug_qv zr?cUg?9lor(j}D?-!fU_*aB>kQKFwJFJ{)7bXV{W+)PXrmgK9=S8o{5wC6|t#IPyR zTCV7k{u~Hv9)>7eKg;=G(l^fQrr*P(S8x^UhKd|`Jy+}lCwHqq-%h7*b;X&kAMjB^ zr*1@GaV7ejCcE1vlR~J$pX#24lsJN(M4574<;(v*us?4E90?{$D?7k$9MkraOq~;zRK{_{?0o1kz77>|)Vh z%#;)F>p4-%`PsIoc$1p3==)zmJ)^+ao)~qW3`GnN{U9(LdK7%+HJGFd!LdO7HuF9& zR&x;@@qpIc;Hz4_V9;vAa#y>b;{Vy$$f17u%4Hi>Si|%p-gCvE$lvKm8-NQrP{FH> z;odHJ;qeo+EINAX)xoP>O|3tvCiKGe>NF_5A=RWG<`*tLQO-deKa`Woq4f8V=?OpH zb$vt!%6X8F2@dC)D8p!g+e6drwKGHy3RwxT5o;)f1;4rV5m;P0SsgOXf3;gIl4xWy z8ANKg^+V{ZHQ#s2!3Bpv>5dBU%={^{)<#`S;gv7L#P}$g%hH_r^x$Gs2gJ4>?H+- zj4@p4D8Nq4KS9AMjXng1{Q96*$VPxA z_pSa@i>g5SuT@6E!)Idm#PM;?dGyvWMHdWfQB(OHtBNcV-pJI!f{knGS8^f5wGBw< zv)5t4=&9cZe@Z^cS(=`nprd%qK{1}cFK4w4zx4$}D`*x{0sb7`sZ*Yy?9s4MesK0b zFOff3B0qWvB2$%2m*=}bobFD~$Dv5Ps{t!t0yg&Ip<44f)EMDV1IQV(z|>F#?g@y) zaKztApZoJytNWw{{0cH>^>AMcaX7yLprda@bjEb!}ew( z0AqUL`RwveaOKS$e$$t54NFuy_utPaR@KX2bY2@sr_opmT&NoA85N+sp2LtiFqX%N zL|Kmy=^j~jrUex~80ccDUmk>u#}$R#g6TOLT7Iga$WwjCxi!zjmSmmIxBR6;^9GKnAR~$ z?^K)@OE3}%{*u@5KEw-LzJEVD^cAdg#1?7fx+MH26a*GnZj5Z^$pWpfC`b;s0NB0| zY`w#((Oi)aB?q+r-bW-+cvbT4ua2K$Vkp)RTJ?9i-W3DeaR9x)@k7H21UGshtN92c zC_xesUJ$Ct`0qoBii`l{OjU>lAaEi&$O}y+qq)|C~s{okPl1>^@NTmV?&$R}UYjQ2)KmQt;4=5Ma#P=TIMr zvDc458{@5Zr@?15qv!*$Bf*?P)WBKx+kYL*$S+y=P{42A17&sr?4qA<>6XsJ!({gc zp0$)G00ygE;db8vJ(0nD9|*f?A2m0HGq0phT-M_|?*{PQ5qJu%VfZocKK?u9zn89x zH{RSVW?VUpf$p(w&?v#~Iqxa9HAa*ih@q-qoH+E>zmF;tGy8KLB#;zCuJsLOqOsL^Mt!jm;N%4bLU0!Fn z|MhM<6}?oPRhicZS)nJ@=*4JP!Nc%Z$7@SNq(WdSFypJ((0;v@ee&P8$%E3jCjV#w zo>Aj#Yf@Y;1JOpRF#mTYlt46|9w#vE)}U^7s0_6OlG6rr0Q_fULEK6WIW!e4yg*1r2?a#bGCwsPZKZmFClu`^~p}uE>6s51+WnI9n zfIjJ8k47$?k`}0JLzk%+Z~<=|d%Jd;jsTfE4A#eU#@O*X&A?~Im2Rbs0MYz$--198 z5s04`SQN0w2Cka20`Dm+VDC)-&mn|6>xI^sPJm>u#ai*@ab(u{lZFRn@PJ6bDPTlx0kD`=6~FEFCqt0S=U6pfL1c6+Ni&Ayi-V zgaq}3RlaYY1h9ch;mpsLOE~w-v*7+e+x!qdiD#FVa_K1g54cH-A@{zkF!uh#r#w)& z^}{Hbkd1r))m;B|Zjs_^>rxUQ!3&-GX9K}W2Gcgn9wmZofxYzRvS=A7(+r>OB0?)r z7TiJnJFYz6jQ^PJgZ+;7R;t#E?-c&?T?tk2Fo%B+8fscn#Jb@#p@c8M&eTJ#n*Iqp z0(!9+w3N5LJU=UFz*i4+^_=ApcyTxUUj?Skx9(4Vm^(=*(GIeL_xHO-JtS`)1V&02C`AWRcPSYgeOKk#2a5dyG)5#id`qL%{lOI!q{ zY^!iv^n?1%3Nd|yFJRhchN_qY>j4Ux9&nwxLK0ZwB-uSKQy0#Qo`-ITtNS!@^5f|H z@_&wv6mnv6;?XQUL052OPi&sURFa>1_8IOH6+Uk;G>*U?;HM#!1xd0Mr0P5&N*nq2 z7R!sn!%A_#zZ`kt+Zk^-Px_JBS2&5ff}7b!$YneAQESPNotXT^ArjsHB+LI^ni>*= zvIH-z7;%}%?9Q@*l_{YcY?3)oHQDzf(Z=kjK5+c0CKQwKB3D5ymjnN%gZ&JA!scX> zxDeiNOOz9@;gw7FLFDzm#A;?#UcvdS^x?XuSu2 zeu;W8llZt*ziyzc83Q`&gviEnsD1opia{jU52e?%-*$}AlP}_7h*8vECM>m+HF*dA z`8kTpcMO`}6_AJcjM!V^(LAju%A*mfYJd8*3&BHbK?+LA$q+%LtXY=z{J&Pc%8NfC zD@ak@k(<}`NAdb;897uRAdwzd6D7Kd6HRkme|O&`y|CmwFStFLgcva-nW6Hwqi$((Dbxe|H9^Y)G-#2OaSWsvCq+iirfI zwonJ8B3Ev|SVLM@iS#HGCmydQvs3Gu#<`3yqeQ*8yWse{(6^S_HAUfuMH7M3tP z#O)+Gg7RPhC7~0%Mrz_le_`UzUADXXaB}%~zum?B#hH0aL8rnQ`~zIh6Gdk+GHvg= zq2G}!BHxBj{1H^NPVj*B;S^YcYqjm=YpaBRFC%{yuf0Y%VLyNVfrN*-GJJ;_$Awli z_iFVC*yyNf?XrQS5h5~!~lT;jsI;f5>bIPzndA1 zwz&Fa;MRN6E>r&T0QYz!)b_ibD>1N1WZ{A&O!g+ti%%PD?DxqFFg5s@Fy>$n(F&Xup7tZ; z<|Hzs6yw3`dMTwybtSinc?WuB(9(Did+xz4P-*f31HSl^8!G0X zSNOTAjqMvwBXgbmA$BQ(!Ur+rAyFNO0G-jXU0%O{A6!ohFieUvdZlFYq{3~bS@jrJ zYV2pknevH4tO(4PpWhvTjd`e>M59<5Dx4nhZO+j8m>fHaulQvy97(`5b=YHJ|1=5u>W{n{;FkHosjtmMi{s7#uzxIZT51g)n zt0Sy}J-ig-QC^u>kpt}{zDHkF?aA;t+;53zty#j9|Gfany>(IK-s+M|Li+G^Jw?PP z0@)T}(}fdy;vO>q8x!VI)*=20Dp+f;MP`P zlz+kXc=-o}L@*Xgqwe{6rj}zCE5K;3!7IRchn~MYg$xj9%8S|&ATs#QOXluD7hKv! zm!TssNvU=~;?n4-!@;I3N-)Fq{#AQ58tZRL* zk?Z$vp>VUbC?&73d+0i-vhzU7EA^i17F9DgZxji|gL$Z&C?wMqMvGV>a@7y+jNh8& zw(2Q0`k<{D%R@0dnSFhPxk|&MdmFlRTXjlAU6bfM5u{UWW4Lt_I@N@&TI@cItFs1? zwvq_J{qAc4jF`Bt)e*5GvGF4q>{9fNx2$B=bpG*l!>h7vV-3kdt6nF|*&q)kyHNAM zE$H^+zqJ;_$ECB}=leXHnqWBMzGWzh=sTfz))(?^*7$sc)VDCK&+yzn^7Lg5S$iha zF5am{NrKMeXkF&f0@z6JAimvJS5u}};6Ydd{oGMMlQ)&}F#)Po8Fg|n7O81RZoAw` zurS6sw+4VsU%VsT(mnYRXDg}pk4diDTh}2zq*EgO%ECS(J%+Wbi*-w9qzzU6Q@F73 z0*sMBXU`D|ZSEs1(hi;{NDGyO8{WW>S9{>U5j133)8K2rW8*o=Aa*f*dpW3HO|eY` z?H%|IzL!JCMa7Ol31w^LMUMGB&>;#6pfuy%2OkuNO5DTUO`r+1Is0fEilOn&{dEaF zqH&N+)(!VM#X8#9(&AdYj`YC{dT4S>h6 zgLVUG*Oxzq!^AlWEurkj)bsb2Emn{zc_8#D5x-U?GmDm!^*r3kID>qMh0kwo_5oBJ znuLpI8n>DZVNvSj%C|Y`nPv#F6UZ*RNr1;mh#mh@c@eIH3+Zwk0Z_h(_b{;aNR9nm zs9~#)s$}eNfR&$P5-#0Ce$XG@cB~a&{!I&>gSQ?~F%O`fwn+*JoqVM}2$JZ8V@j(A zsx!_MzolXND@=)e1vOb+ZqsFK>~n&kfeR!n4l)o+Gk#<*#ZBVy<8N8QGhX}zwJ+=& zFaxPEF%7q!>Uz=wDPOhFWB1fA$ZF?-n6(oa zrra2nJM)*Q6(x6P-xwBoQJhs3!#IIep$L2(1K{bRfAl%9E^r#BdkS(X#cT6bg6uxS z-5c{FEO0l4wU+H-T?E<&<%+FD@@4Dqr#51Og52c zSbI8uDS+8y7UkMlVfuxFMm(7rnJ^{JNqDI9$zTpUnc2kT_;vNsPkSRdDUOFExr`l& z%}3B4mO7BFB?&{?&2}vyJjD&GfztuZdm^?Y`?A6+m>qFu@uoYM>m_5+D!t*6?~`W$ zqHvho09&dlGMH;=99`o&Ld&*DK59=_k~10^zFZ!Gfhk0a7y;m znT|X<({FCStP;e0SWL&>VF8ClZewq2RylU5DYNaX3?K^d8AjYSaPQ~xYUC$ti`ffnV+@=KMH5UD4R=H0{GaG8lmRaaMK2Y0t> z_jcS5HV+S04|4~|Y-v97jhdA#jV*NS!Uc20q-DT!j%WsBeg4+yc<=M!k*(7Yj&vXF z`qDjc3mRd&aE|+yiC)%5*_}`02PR9(1!v~mNxzNJ85EEv?Txp|dz3fTW+o@w^6e*= z&P-Q05AGe_Eu?Xit|}U*{pFHUv6{?mwffs4+rR0Bh3hBdgliqUElXCpjg%i5J%>ZO ziB_7PWaqTn|BjMHAx%jPg|3bpF-x;I6!aojJ4L<@@bKp(`$xD|bRlmP4s_ zaMJ;yvl~!obrC0_n@0jUR=|)u(9E}T{zu`N?wY9(MIt{#t-swkh6Q-B*6}b z@DLy~nOk{ix#vy93rq)xwCVU}n8R2p#;*Zr^4BiwRK&wDnK$6hhM%p?SU_hQ!991JGT0ihMyph_UmG4ajfuSv`emiesV(a0ZcY5AZ2N*e;WIhS$eSwvtXt!KHruVI zdu+niTb)AofWWsxtvYdQpu9SN`uV`#^*tmVdsC{e6zpb!bX3M=^BH4bv z+Vb1hWHP7g;mlM*@bCW1U1kD{CJ!hb4JPR83%R=&E~VPfAGC0Ii9M?hEP2%;K4q>D z{ToR$d;BoXVx{L}e6URVCwnbH8Bw?J0LI0@OJN*d+I$Ruic&SU@D3$FC5r=x($!s| zG#619|Z_cauB*)InNCYlTQ251@0jFfPtG|a929Ajb*1^Q!%6aVA zWpmZCw4h{u6Q!2TiYWY!#WWMnC8?7q-C{hT=4-{rVU zBWiwpVamo1Py&;CFP^Pl(ZM-;L?L}_ziWu_L->bOXibICDyZ$dhWEicCF-wJ5RBAA zrYF6T&KMFM1Zc!@RD=_k#2yOnOF1YRZ?h5eKABds`xbui>Oq#1m`zO!D(OnrQXOV( zZ9u(C$!>JW`pkzmiwCF~ni0N^^&xalCesh+{parCZF)kSFL0CLB1iik7fx$Q4T#gO z6xJRI(HMm1GTgiyRJ=;&$#Gy+vsIg1xGGX3;uFR*zQ|sgxGK!{DwBQND2KWAT2=GX z-umFvLDDeJeX*YPu@c_{0W$_wJ3RCj&610dNICV8NRNm|+1MWXNtdG1h9*ar&)%Zx z!*5*_${UK;m2rx%u11F?IrukI<2s!i-OoO%-qA3eOf^H7*A&d-u0_3M;7kHH*ynf4 zDVRGa&F>pU=_FAN0bo!{nHIFFl#<>k)$u!UJ}q|ZDfxHX0bQ-0z{7Bt~-4h)y~uA;;+&4EccM@p^MT`!vXC~>D2d`#TN5_(9i zes^FbK|igWlT0dP1VS@!awZYps(*-cnD2{ve+WR069}g5-RsHA!POb7$u;A~@hon3 z19&s>+{7yciyHM6#UO$&bzHLc&1t>MKO?P9QBK{GdOAU}DSmC7R$fRdjH4y=H>IBo z-!IO(4=F`=4qJj8xZ}yp$*{XS9`&(NCQ8&k6Q}18AB{M6)3CJ?EP5p)APqQ1=gTv(XB(l(12`N1~l8ASWB?L z`b}66@JEisi`o_;7J#?e9C-|wsLAHFR3|CYRMToUN&GMZT)6{-u?R+=1EIy689|(0Nkg9emP6IUXVdCOmbOFln9$PNf(BO z`uRaC2LfrI4<;89)AAk381V!oaS2S*PV34RoF;gaKKI(`d-bEtQVX|dEBJ9a3xUQR8c-O3lc$W2jM!eIaX+8@~IIKBtp8l=LN zmry=O8w-UMn;}dlJHB?l){jMqtt+Hd7prlp{6kr=#1oTG)jbO3VyEW6_dk8DK82jA zf}iq`TuEdQ9x6~~X{4fz((cW@6R)qIm|LKvyTnUC&t2<#*D>`VZ^~Up()8FnurI5v zg+<;V2sqZ6VS;mV@LUQFTtN_?x4xY3XbD-|b-2Ov;1x)0LbtIghZR7g;ODh`#y!``Cvpdn?V1QxFt31& zo=G{bJxYLy^O*6&fM!SSDF+!4ZL$QM#_mIszh=8CX4k~g63fk)PPM!hv!ZIL75d{T%#1E z2(L`)3Pz)iH^M5+*nx8mz#IA5Wl~qhQ1dL>@lQ|KY(@Y9r@fyN_0dSwWxmTf`m(>5 zKNT7uccET9)ha0D1-u0Er{bKtVr+_H*o&#WN>6?v)06`ja8L0y7U2~0! zp63yH0!_1_`c*5x+nv5MJ$4WKAk8z%DQxYYXzjhTfpV7$t~3x6U=peJ2rb2E>}*&V zfCwAx<`3XpZyLwTAgs4XJ<)Dp%c1;j98m;bQqKCQ_sO>j^U!1D@HA^*}Y?Q6n23sRKv!a4lMcd2$wom?M>LNI52Sb1OEC1t*W6?eF-Y>!dFY*jJwcwH5UZfM4Ag@+nTyd28k@+3rv(^yy(mRVo zZi$t;sd-Tz7xNDRg}bdrAU2LIz&cN*Z{~|p8xnL0C@-3JgcJ<+5z-ip)wdga>@)yi zKB1k)IXMo1TET zXf*{Z;P;<7SZ9>tj!KBj@ zLTBe})^7eoQDAeZRL(uh{&%6a5;4PuQ{|J^a(lLai zwvCWUCx%KNojEWv%&$LiBQ*kxCdAr8SrorMo351b=QZWU;-wOt717;*SQAYV4Mh{0 zzk3t{R=H3!EES+CXEBv>X!DzIpdz2g1SvxAs(3jm6; zjR_#bJwrksG$~PMnf|5L0se{FuwrzlQ`p2&A|l{O+}B6}v-oIvxsf zho{=?Zd&50aRJEQqB=!@t6c%&OerWCzCfjO!l}5ofY^A+A)Ua-LYNlP2kaQ9$e(Z9 zjUqp-GOCQy0Y7d26JGlAf3_zIXegkWB{4FFk@1B5e1);D1YKoHJm$F}x5m~&u8*gk z&@e5ye9kp>9C|4ZVg0pTbGU>#G#AtJy4e zNhHwm*(z!-0U}b1zKaZ&gisx}$%bzx_xfb-&Ey(QK2n`PJeM}ImB#x=ABzUg&G2Uv zjZWGZY#lhARgKPpL$YaUvUt*Bd#0S)t;He4{qDh87>82%rARN&^fC;)U#B)fZ=HF- zKv+mNsh<7-C7KuKOu9_t@x3^vx7VH7#WK9*JZ{k}eogPE;AESA;|g=rt}*Rk{etf= z0WnvBN()bqQQZ}qu7w2U{i5H=T1J`D%7oJIiT7FJOd)^poh<0KmdAfTm zUdknJ-2Ob$+i>HXbalg7Z*C}Yk6P8VAqFpa22ilZMD0YVhKGsE!+`%i8Ih?F4(c5*8iB)xhOFvt0|}eq0q8a& zz298%P!Tk573!Jm10$ zKI}bBsW;a3AU84mVWdead$O&jO;Bn^_b$uH(0Gvt&$tCR*sv23a+X0kzFwZoqv5?T zA4ii)GF%MTT{@mffn-%Mc#95cID1wlGvCb1@|m!2&*gJg|JgJ-R!Sw6boi(-^oN3- z)VOte@Uv$XM(VeDK4A;o-*ZDX&Cy(vj zSrd;FBo1RkH+R%8j#;^Jh!G%^eXHhZY(-Cya~KPG;)nraF|MH&!n;v&e3s=*!D2;Tiz0oj@bttNQ8W@csHQxcd5yDd;@SCM9}R_(TQw1H zOBSboEjj55h(vY2VHKroiLrQEYyI=4PuVEF1?H&6#{&fs4)IyToZq`Cwrjpe?rB!3 zTYbszx^%U2JFvfb&N2L>1-N%$6|}jw!sd+KAR=tKU1sEbf;6$T|9I~67WxiUUyj!q zDgu)V8V^Q(T7D5OQ)S9RlZxb)g}?=L-JRg6#mM;HEtps`v*_CpETcgc%e3U~XV~Yb zXgqq;D76`y#d%dym&kteP_)EtKVvnsK}a+lM9`sK;k&;4fQZ$~m_=LUqI4 zC4-8lo-`KKWHCv#I_Z}?=SE_lS_zN3j`VXSuHR)4lflF?1I&}L&Xoy1Kj`BDP7GKA zED^!QK3mZK5nz}K;!E)cp;!s$p*M{EeyyYLZ>}~)4h@b!-JA#&_JXW!jPqIp%@WXe z#YjR8+*H3#S3^p}Qi4OPJwZ8c+fDI9BaY=9{#X0)?L3=di9JY$cqaT7K(am%PE4V+ zw7cuh4u+^~!|a3-u3<+yf^P1NguC$G9X~%!@%ZciIYB;V6juEeVC(i5n<^QTFPS;8 zQYM!jZCNo!86aPi%VoO=iBL0_U4VKkb3FG?m4wU)|3K^vz$=})(*MBe-WL$d;s8sv z*`)(H#^hQ~x%VomW}uFl%S)ZgfH8>9tv_iiE@r7m}l8}1NXo@WLLH5L#FbBpkF=W}E>j$PmLzxonWCU3Y~b%9ba zc<-Kv&r(aICY5GV59*7hlq>4M-6w~=c_RbgwNv#r=x`9_Odq6Mn50#}7%%HvO>bI} z=K#tea;kR$Vo-@9a@=_~V~I%Q!*t&$`OSw63T z1QMlz4?waDGS+D(|G5+SP)LyX)|*pAY}B#;(KYy)cbI{C5ScDD&aH$>S5LeTx`hm2 zIln3fQTqWt=IR9LMFa3ytSel%cmsL6(vhq84|_KM2<4oY&|Jjc{#K2~6;gi^#P}iq zfkyn*7o||+&W;Sh!i#f$TLV^nGdY}p!JW-l2yjVl$+Dv$`7;3p-euRPf{hB|Q9vGw z@LmjBjdhW+MD&V;DqhUnJoOQI3!n%5y5;CcDCUT0K3E8jSgR+lL3*9RlNPGOyuB(&ojtH8hinVZdVOmp*(e4aHIsp!d{hVr>&7 zrmdCkSC78!g$S9hcj&`RHFMK*zl6yYr zQ=9@*V`IGk^NrlH4IvTBsfE9(WMCDu2Ohp5@@@~%GFxZ zkBIUMB5x+Sd7siE*QACK?_p`5}~x@E)3$5zqJP%j8c`F>xa4}NQkV% z<~^1gCl$H|ha-u$1CzsR0pW^5RiUFJ#h9T{jn1kjhx?b1p*s34qZ#jiJvhEaYC~%s zAX2-hI4CQ{R26P3+T&bzushS0_iOT0-dcFwp8sIDh-aHhbB#+DT)D+TQ274kj&!CG~M1 zW-yg6{fkdk8be&%@#y?bq)5232>S>ywe0V9GANGGBGJizkp=Tg6@+r?q#gX zHsp9UfPyn+ovl#eZ3T|yCJJhW8PqmsvRfy#X`)+_I@F1Hb8|$H{a%zrMFEZy8% z$Yz{ARF{}zmt4ro@8m!6`;W|X=aQF%3h4g|k=Yf7KeTf{8J5upGvm7R15Z=4m7~35 zxI0X9!S2OTy9!9~cx%4f=+4SZ-AkpO$%Gi@$m*g5{r#;=p+`xRvSHR;r=Oaa%eJna zyYZe&mA9keoWe-PMm1!XEFo}QqyV91frj)p@S6Qd4581W>I2YL;Lym(9>t_#h%DhH zw1$wI`m-PuuWprEzoHMxMrFqAY^EG~9D%DujE)g|pES+pMHJjhcD zj!z(ZmzDc-Z|}mLmya-4C#5$u%NFy#8C=h%!TN+($T6#>rPvlV`3$BBa4dm-92!9u zf*nP#cf_bO_lcC%^qVJ$=}7jCx5cyBs+k{kX?^-s&a+NDrs5NY)e41g_1OF5D;4L? z4j8D?FJd=VO|YaE3Sayc}SHsJ2CH%J)THitN=8L{v8imWVR3FITQsx{lC|l zAu2Fpp+7$ZbJekqOf=1kcK#-E(*L^R#bw73{O*RDbn5jJ=IqpYeN`|oDO2e6-)sqv z#e_KG?#mVN`Z!?!q~F@-$+e}n0lOX1c{q@{icWA{E-i9ex@Fao>JRGYvTDh64ym)= zfkG7^J0>lceZ)f0kcjvD`mbXgUGV>_#PM!nYyWVQZxA?I1EmBgT-shFsAjO%M!ksG3qzXkMCHrw@D$TR z<)CaaFm%SMAVhLOsKOtew44#KHH_qR8gVgqg+wClPiveQnQUeGJ@_LCQZIlnP{C3Q za?&OpSaH!$RlmBUNb3porYTs2ryxIWin<<5O8Q*K+(AW^fTzJO^*iRTPBsNGzl4w# zj$_8Z*l~Sh^ajwa8wUT)e0_pp^@K5lQ=h!uhUm>7rvCatM;wk07pw&w+!6}Qwx<3} zzzdLmZXat0^<3%0M_s-vqg7RiG;OEbQFV!&yx}oTm^Y&7S9LA~F{@k=r3HY)iS~_j zo6z+Rg3L8(=|{&l+;z|R@#mTHEmnbIrjPWgHU&;b*Q@3H8wC*Ynw?i&DZD^MOe&IUx`8Q{=zuoqG5UgE>TMUg7af1;mj9 z?iLw}FpFjQBXkRrd0l7&xSd7Zw(S6$NdLBx2VL9S9!;jrIFSc&4yLuGQig~kX#fgo z+i05C`+rNOS=Q03?*92yXB*#xD zwWn(1Jb$&lXRA-iXiw+fZU@2d|3}qVhDF)6Yg57?H3QN;h=9`49YctOVA0);G)N;1 z4T1tvQUZc>NjHduv~)?gbbo7}=Y7Auw}-zt5QlrMd#&p_uQL+905c&XRQe0b*%Zw6 z;Q49)RxG^9Qh-CJd;bdb_PS~NFCyhW0_(X6-1G9j`%?Es_$doGls{&)m80Md@DLqn zMO>a0RI~*a`-bxe0PT;^;x(`za!%&ba7A#ALowsmF5_${CXJE29X~R;?LdL$*2|Wc z!%s{HYXUXLKQ^DBZo2Dyu?es(RMqhW1K7**@P6O-ZU*+-ZeUxne79(|?YcGs^h9#UqVusk-~h^YATx$9pUz@M?>x&T#&cBQ2e0GIhRMh-PUx??cON0x$v@w-<&WLYSk zGz1plw^@+)>M)u88d_C!R260wliPyOv2No||)w|+9=u(AS5APkabo1?ugZkskm9}hw+DKK0k&PDj^l&(jezLP zrqUj<4`l3RKuN-zD^U4g%D3r5080UiW*yZcIGHSz+B*Ws(Qg5lPu0qA=S3-2^`trV z>-04+PUP)KXyeN=aQypVWU3_q6R*PNm>Hw@bw(S*!G(ZD(|n?Hy~Bn?N*d)s(r87{O9<$ zvNWX+Wy>~#<-Q!M6-s8er{Wi_?oZRuyu1rf+~g3S2O?CTQaY5|$JWJ0})ThDvJMGni>#MpB zx9ZhTp>`=iDN#Ex`bM543D`!=0Vpi0`Ei`^UeKPP63is0-Y&RF2e z=?`cqoOa94x>2`UEIk%Of2UKX!~uX* zC<<69E`VFBWMzrKCME!!vc0^@X19kz7#3-+AAJDnrn-R z!4;(U*||T*xf5^n^7U8y{=$heImzP6#OAAxPcJ5?%?;i@9e6gv+*YD_o87N@6_-}w z=ltk-mZ{SE8>Y6~i#7U_GX)3DpHu`z7pnC+qZz1AI_Ps#>`W+Jsr@Z*3&CL_7JwQ& zqf(XJ8@oIs_6l662G9WG2XKW4f<8BaGnY59Wq+}K2T)ub;bO*Of@J^QpgML{A2Z;5Ksrq`^csG`LK!Vo9>|F55mDhzKaZp* zV*}9_UVsa0T4ArRnI?Xp$-5h{z0+6&b)|xXMzBekAhz# z68}DiWYqWHqxjQMj+v+y6y-gPSKV$rcL5m2s|cc9LC}M;k^3ZN0D8#%wT=W;7#-s~ z?S1gCoLxgAg2pJ`B)?>RK0dec#(Q+5>1d_fZFt$e3vqBECpann^7dNAb*KYpCoE>Z zk{B4Kh`UdXP3_J1c?sHVrJGDNi?!~4I+Pi$c0|*h-%~%O8(J5gA9sq_Nf#^3OViBz zK~`dSv+PdI=lxzqONi^e(yez5{l?MY?5!iUgxQg?UTl&E!!v=kbWV-FUe(n_ODVunM~ zNk>p!NS-!!IPAGl5$YoM0p4#hW26|Ywx+Hz4>p+~T(Phb>w3(owLSgC9JJL$xG@!H?Z}z)QkTh8WePXOW z2Eex>fiCHF(W`Hx!+MVvIaIjj5ONHu4xOxslNosu z;oT55k*5;*sN+nZcKb~kdSg9FuModIU#3Dlkkmii*%#RT&SW3Ae2oL#YH=@FbapI4(zxXITUvVA ze5){gEoiUFHltYIs9COK`jiRds15mRQdFZGz{s*_a!K+FX;cM=(X~3qw{jbHOc@)+ zIMo-YQ*_^}3BpFBx+V0EtHh5tbR!JCM@8jCv)H{|?)BE1?D>6rzUGkl>h>x` z;r=ZB^@5ncP~%38ft?}Zcg%w}$>s%do7+FLs>%}8t2ZZW^L~?+a`aQZb*=9;aK(S#J-w6!E4*+gUwUXQ{Iw-Ic7E!zgvm))_9p>qy1`j+$%RCBweDh zOa^O5RZHAUr;85{iR5YrtJ_bDZ1^9(2sc$d`!#Mm1vPTaBRMVWmQ1%JS9{wdEcc5( znodVf=q0P(xV^)%!|0pxB6BVsLkGXZ-08LSnlF>PD{j#b+nTG79v70s&UGg2EhQ1F zZhjq$3psTvwr$DcvrRUF4&~aiT<$bZ4}U`!+j-XBMy|NTZI=zZy?bg19G*F2-_Oq! z4WZh;ccT8!9FY4()aUbgW!OwGiEp7#(ooG<1e!6)F$NH3Y>_EzCdH$GW2tIlLv?FF z^$fxBdk9$mT-nvclBQ2oQ2hn>sTM6 zlwiHpu`ez+18pctHygzjMJ{Yq0?Xz}`A7;QpqTg!+P{Yi497B7XMQM$+`u$)O%uB{ z1BLY|&@_;>*?7=%v7^jNcLk01eQx#l`G-$YHmN$0$svS|K2TOyZb|-DJ>!Y~EL5wo z^@x>X59N*iNUpS`v}fRS2k1`XD?W#rAQkq#s+q{~{#2 zw<0|0X0vTFr$HE>m3x*;i8Wb{&N1U8}c4% zq@4B`;Va~*2C`+xj#Vt~i%paQ0FHX&)Jd6(u-a?cOZ4C%;N%HnTb{d-9sWn(Y5m}p z^)#`LvSiK1`gtmyq?+HqU#2EfiDPPH}8-YZI99}v?`FXQBgOBUtBYId%LRY`&1`SrA|K$zAE+0-Q?`0 z$-Es9%==AHBDT?C4e#E<-`eOn9^|Aah8_HzN|Y=vzYaZhipJp5-*-5nLwI#~rQUh_ zM|7>*UohQ0`koLyyFpFYNn?GYtL??%E6l}AG-$cZpKhn@#J_!N^5tkU3*NnoEuJd zZ`j*hm$;b9RdM>p9i~so#+spv>k0x6o)X7o4Cs*G)F zQHW$fcpai)!AwQ7mREx4dL!;{sYZTY7OG<6@bG9jaEnY|#)<&fLu!uNpU-HzOJ#;MhxqLl7dS47a5xK;v z|Idq3&KBo`m`70pKn$Dm`0?ZFPIV$$92lKKgNrsu(Vf*Uy49oo-^l6|WT8>Dj?Frh z258lk3_eyfWk!P@>2DAkUI=2llr<$BWr`s87)9l91I(hlVZ?xhu0g>NvVsJIJFfi- z7Ddu;|9yq59;h1i()7tj%R9j&U7+R};$h!H6n~uO+Y)FlX}@NUar$<5!{M~V`5zMR zKXD2(JPaa8IkQcnE-E$OQ-c>%vZf51;dfe5bWSTD0KT}j;j&PGk^cevC`$BF9s~fe zxGIpmt@0h8REztpxKmi+qdvx!{y88u7O7=O41{r|RfEAUDhee+;#9_v4?UGr+!Y4JElVCygQ4%4sb=QyKV#yQs;yrU>$N|_sGK<DOs@pL-+s#J zn6i-Yo(}0xUe(w$c?l1`cWOBRjKmL8UatFP>is^;PgQi` zYU>Vf${V;uZ@<)z35uzZ?!M--6Eqgdx_A^Liuf-VU}}rrX=vpowwsd)fkHA)ci;Jt z^X6?A$=ee3`(`((12uJtLc5TI72uv=-GOaeW%S@Sv5<%_zsedywM^8MUk#e4z(8GMwVv%L?2BWXqwqK!{=oP%+_XJOq*-B$A&o-T*PJe3aBbdE289e)jKH zJw!~MYrxJ|uS3A31vsaeE6{o7T<^CNsnRgiG11D0VRiu|Y>=|R0dHDkCxfaT@k$T) z-k_3s*YH#F8-U>OdqTdD7;rAHD$UEe0b5Q3I0tyZPl3Nnq72^Z@aWP^a1z&3`T#)I zs^vQ}O<43+`_DvL9|Mn^vl3*%^_oR2(y`P;f1RO6F}=QlEqjIX7O%Gu2j$B!9$!== zy`;H+zg`(DOO&82qu#YNk-m)}{p`R6o!gS1yr9tg8yZ+Taw+H}pE4|Xemp%o=e+_0 zQN(`xe7cd3x!A5o9zD&Z0FX2+g2WPYzuW2D;sLQ`OHwC>Bp3UPRCDJMgDk}}5^KTo zyy;V=&Gld7eTr#+jp8m&_ETJkRNM^5sGjWnlpjKzk#709xb~NbY~QcnSbwamqaRG~ z^u|JT&%gb4;(riT z{Plx_%E?oL*32er`nxWWag}7=3i!M}JEI;N@`l~uyr80o0 zi^ROYK&e_Ep>GbU%VIf9gON$DD?4i950;C$#TZ9b&hv?a+Wkq8O|sLm2t z^I39uV4>&njnd>IL55&0#5))d>%y|&wwqlTTh#I!|-wJK2*9Ev70aJrU0PQu?-2%)h00=lBYK@ye;#sJz zR-W@9?iAnZj+kyR`Y$uG%{xi_IgR!b}H3Dk0_Fkb)lCO zk%~q*nfme8X${$J6(qAHO7eBaT!(qec z*zU`sA+gY-c#f8vt#-m{KH!#s&-nP>#1;VhguY?Q1_N_|>^NA$WxOVsJpH%XiAR&c zjpIh(!teciI#J_7&6GJj5VHV?%i+6YoeU3*_u?XjrHkbATaP-z0wF^=BpuL@u!juR z6tE^BnA3?IRET~)7-xB9%R}}-yT(54@eGQo00G2Ia5G7qUH}^QjkByjYh*im)k0WT zUj$0l%h}r@YEum@f52qkgYvnM9&0EPYtdw7#uCY&J{PmnIZp_qOW9 zd-`-&pVP||AdK4I-{;y6{N>W^weN63{QXROY3b)}GKo>+(>Qxls?vOGy+-GlM6z1q zX{CFA$@`PuuXV-3`lqBRm&fqS<3UN=uBDjNg~+_7bqdf!4f{Bf-rs~^>NR?6gp%o!KLu$XvG5ZYup-(Rh?Ybwd{^-{Wy_avY&6zuQ zj$_?)POs~hb0UEmxQ5=XW_wTPyQYYB#?hU+QEf&Y{O4hur!D#9?xkYy)x<|%>s!R! zW$C5Vw9T3x3GK(!Q`lmjoOK(C_2THjDn1$idZ- z7miN$L+{BU?}-}h4`1zu2ASe2_h%o(lv|6wLLy2h%99Oz%xwe!e!wJiq3(UVl~i|> zH|EwOJ5m1zgX5&w;&P`h^qSJCSM$5kw6Z1baU)3|;_wVrty)I&9S+MI#;i~1u5hpx%r~Tsk zXQq%ucs|RF0FsxT_f|U1NPPAHpoPwtml@4^*mrudQKxWaUlPH2G>S`EPhJbyUBe=r zO^=a*7AgsELQrS0Hc`9?AVMp$pf@;c#mb5qNvr;I3r;F70AXJx3U9()xKit6{n1LK z&~Zp7?)9e*w><0@wt6c92PX<*xoHv4JFXCRdFoKRh+RVjn?;^&2*9V3B?&P-%7}L& z6FD|uK_Me((Ri(%x?u2I4U8$Mtoq9SaaM5IDDU(I6N^u@z#YS0`eBJx#p+1j? zcwp784t(PO`JqekSo?=hJD^6`zuoJ-o8dor6-aW2S@S<31<6nS_jFQ&zju3s3G~uO zTBe}iAhbE<70eg#8{OjeAqNTH*f&EU-qTxy-xD_G?alN2cHev`5^K+$YEbDYs7wziOfSieLE|=Q`e)eP8K<1*`LLN?ujlK@n-5>7?H*@T-M|>dJ~0~m`JO|`(Wj3B zY?zjIPgPSNT>Fe6TOwJ#Z4gi{TG(0;S&8Iadn?07j%Y;I>+4%ZmsR(*?H?ybrZXwG zXR~9OlE#BRX-QFxA@ldbs&5#VUPzhVL^D5C2;SI?`?Sv@Y9uPS`fjDnLU*&aBoZ$no0t{M(3rMOwc@ zo59~jJv{pLp7Gv9-AMQPxkdPTBjZiX+r5`M6i!}e9B&%jk$v%sw*pl+1la2mW(g5q z3cpyYt~t%zN&#qBJ`G=sa7TcuVh{I0e)cwG* zN-68#c?>*1roh>3TwyUdkmO{=fI4wwsWFgEh!m2=MC;i-FtePR^H?T0Wg_Dd77WpR zdLhvuk;ty~H-kPM*$z1ZRtxi45)6WaV9O^)ESkW8qj@lyhej!5paOFN1V8pc{WCsB)i^8>+4lX( z7aX!KU_F+)ZAI)VI`G>1B^a{y%KcqND$FWGb4=py=dU{dVbX*3!uHM@rM$cbjxEhd z*V%_Wh2Ng-=c{r8jzta1hvA_6mW$aW%j5`vh_wfgB`D%r1DAgY{|1;C$Lhcy%|Ihc z%`yRu9dtm$=Q8S*X!;5jdAAK_nC>!6x%bbC7$tfUKp^Hwcu461`1MiBWCav)g!H~= znf0r^cN5VF5dc$h^=rCPT8x%z{Ffk`BZIORaV^;R@tfaczgfN12sNd<)cZ65*8UE3 z!AVTDlTDmd01z{bVaNyU5wC8DlN#Sg)I|ns}99BYG42)ooaQg4ejT)CBkT^gkZR$m~70M|lQNjmh zaDbA3zotOB?RKb>J%aAN=g)crlW~p@PuDILdge6Sm2~=FFNHeDT?a%(4FC}3OdMzr zG(rg^qo2HX1C4g&75;LTN4$ zmYqhDA0Se5Hu`ekeGA5J_V|c+mXz>x>Ijdy%1C-x3f~@L?lE1YgyWYMEe2c26tZ#j zo6MD2Uta;x!|c|_e%fhT6*k6+Fc)G$v3Hyl8i4$?$V~DE~>Nw7yi~k)0SzwO}FkGWU6(3ArqW1QW=LkYw;rDu#gtTv`>H zyk^7OzFQeX;armf34OovbO&=g~xsaWA?CT3tq8bA%n-^!&JINX78RZ0BWVn z{OQ$z5bzbjt8qU5PkYFMaaH|is((-2ckNB{y-HV8HkoT1W@3tdZma07b+#uyjXD@Ph`!xq0ROOh%hq9YX<1A8xAJEM5e>*9BJo5&X&Dd5O>( zA+y>}&JgSh_#DaFJF`rgC4nL0Iv?gHe|Eg_&5}G*$d=ujG3l|yjk8Hug*2egyb4IBg<5+6`FQ4CRpL}naDzrjXcAqE#- z1hhmtqdUW`)0W%&wMzX4+@rZn82R~^c+*F@W$#(oLq=&Z2e|Z2bf#K=7=cXl0d=%8 zYMyAtBh!JWt3ijDYY$C`s8b!y7m`?1VvKA=!2b~Iiz7V-ia zB@_ugA2{e@jc@TaeZOxk&fmAtcIBAAI1|94#SSEm+P?x#d0(^H#TQ@^N?tj-6BuP? zZ0SCfpOr3F*NriIr{J&M`Zn}UW7JEG+)BH+xJxERYetL9jSIG}7cTPq5 z)8h7LOyZpIZ~`1(6}(t0yhbp*1eQ_HnaO=5d%0q*>9eug6dpB~tG)8E!`V|G(cf~B z?NfF?7rk5c(Ey7Rc>mQrXs^?*#WM9hjWO1&ffM5W zLJ+}A|0NDk<2urs7=3!N-|DgiGNDFSK&r(eA@BJT5FGn^5tM0u!e+6Oz6IR6%oJWS&i9MP1e1QtqC z&*W)P7MG&##UMdaP*r34F+G;cfW_G&q9rNtdij39^N_ua?@PY}ehutlM90OvH^Vo@ zT%aMMA|%>h;+i~s--p)s=o6-5>5gd(EY+xwB8gsIE~j^mrkk-d?@M}KK^ukt3)zq@BIRZMjd>(fRRzBVJx6g*{*iU7k&J~>sF^zP3ILG;i+y=*Kam_6 zE%XSvu9EwOSp7TFMY>gRPUt44u>#G(c%yB<_37f$J+58OcNp)Mb~(x2*D%N%YwN)u z(S)Vq&e^J3(zDZ)o9ESxR`GUJTf&1Qd2y4I$?k$bcHb&M!yqLub-86z`MMa4nZha6 ze=0^qTUuBlzlLxJ?l~pf(=26sq2C#5h1%vy_t6R1YPL8dG?s~AgCY1s9*Tv87C1uU zJqagJdr(QpurwGri9v@V^KFrBP+x{^w&~LVjWKkS@3LD7c~J zXay^ND_$Y&#zx5he4!65Z6(1FC(AyfJTp-}lf8uo!K9<=)*K`KWdr$g#6o{ZJL5Ok z{1ubiacu}$gN7{L3+Th3z?ow5M(+xD8~!E8%ekN_d!KMqUSa+Q=E;>t5t~kRJX}Vf zJcOQ1+JcL%aCbC|=<7{s*&1`|V}=l%4?JxNCc$rTduaR>uYqP33Gt?o!1Ex8#s#xi z;z^3wYxE3t*%0Bj0Cd4vp_;{enbm+G7BP%rtK5=~%Mf&zOri8w%wQNa!V>!?2o$>e;ijoPTY=11}l(p3E?b!qM*$E z{hx0Tz%`^LvZ3jLAp4Nxd_|cp-|^8odd@6+G=pna)t7u8qdzcwiIJ~>54$UKmu}fO zB?^}te8~Ze!CwY~*xXoX>SjpIlHdzAL^v~TQn-GD?c5zAwL**Aoqi$vLO3fTj`*&6 zEwp)ztyf);#<<|#w_IrWl((XD)cP9)FVkJxKo$*Q>00?pv0x2EGouE5+%T{ig~3*# zDvUF5D(Wn@VAG^XWqIv~rXtcANC`SIk5Z^NBQetwMuZrWOkM<`%lC6j=&F)VhZYJro}&^?={+gNI)jmAf6cDo@R@4f{0Ctt z0la9NgXKI8-w8-c?iEZWwqk;X1^mz#uEJbTV}ee*vHsaF(j+ZYbmMo*s^L#}wIV1H z9y8Q%kNf!xs$`4bT!THD!Ozi5L$jVYZl#8qXf7-U$N}&Oxy9^YHV)fF+UxvT*@Oo5c-tFs2&H;8baVupsBi1wIQ3(DFL!0cg*VKr&$4f6kAgY`} zEsy^)dWQ$XHyiuyvj@-9n4U^M+h|I6YfwZBZ?r)27$-{!2AVuT~(DmBIobVYwQJZXDGr?)I_-XFBJmvZ<2|0mY8u7}B zW*xWcOqjA^n+!ZP)t9=@GCVJPG;RTwJ9SyHAAf*C7P#wS*2#8=1fOBz5JICRQ>FH4 z0|XpBS~TMb&{YZFh?+{be7~!JKJ%-b2VLR5{~`VSOGk|%uYMvHM$?7TFNzDuZu$At zeQGp?$5-a?!#$qDeLe5yFdWxfRvD@azr{Y@*r?!O1nH5M3~xW@Tj2lfDy`?)o7Ie zB1O)7&}k&=joC4L0UauykE2zrlkPPPSMAWeDEAqL$b$Rr+GNXph0QZOx5 z*1HoxA#TV5>s8X=D)sn<7*#1!(H$XGC1aqki2E z{0J^b8E-_F>{<7@Sy91hj0t8Y)$yd6IrbzG@0}JNG{I-wsd$c9Egnuc8=?b-c72|> zFkCB6m{zvxSjp}PxFk&0|NJ@XK4|syC&nOQQN0&=K!X=1)Eu`WO|=?5$XrfoXn)o& z9DolAfv-lb^zS73a}Wc$p`yoI`W9Tdh$k>QLfyYL%1rpo z*!wd(Ujq)ozO0>zL5<9Nlm%*WUlS&ollx64H zwvq9$1-A*&4pZwj>Ncssf*J+j*|L|KWGcK)D4z^CZ24#7J;Z=6Gt|l;;2!I+iOM_dRin~4{^E{<_jwDS{CB6UHi)nQx~T^2w50mEme3v%*Bq>w-t z!!wz6xqD)qsYB%O$bKWCRh~V458{l^Y47H50njGpoffZ3#;hS z_ZAqRS5c2NGfx@XBCER(y3g!dm4&$&o0hx!Vm&N2d3aZJulE;&dLH^K-$6!p^Mo=8 z14gXVHRczg*D%j;zFS3tyK44AZ7po*8C<5&Pl68VniOUx7$@>*1$P-Ymr^IFaETOn zM-xIx5{gM;8V^cW4wk&JCzT$)Dq`h*U&@1yS`B{_68|6?rJUAGijdKL$KEs^lMv_{ zhmm7-S0mr9-vR{s(f0gZ`>5?T{>7tml9vNXq1v?{7;5epv=av!uGMX=dTM(OqzLGV z%SnE?{G;OM&N7&A@zm$eG^HRY1sD)m&&uzjb86N|wM6j~NI$CWM*Na{w)L33P3MsH zpeY%Rq*NqewBUKfMSbn>-CvnOW8LkT;2-+qCMlt9bTX>S>IR)k;Pbb_uA}o8LFfwZ z&}U%$fn6U0A<86(phrThU&<1=BN?FgjPgZ`i~A$itq>Z9i*sc7u$8m4A#K0kGtN@? zT>##I_~w;lf8o`D9ZG15E0s|7jk26S0jN_=(WDyPbaA~2;tWVp6V@Ib^6VPZWn8VzvayV zuGBN>E>5;KldjsiQ0FT460^QNP7J$Wsu9^VyB|Q?%vqifJbVWaGH?AtPUoAlw&Uy7 zmnfVpUoT4}AkGKPll%uG6kZt58HUp^!u7U=Lb4u3 zvHHTp1MQzr^&FRh)-4E|0P)aj6d^Nfm6H(q3HV~(Sr84kJ;wQPEuQ0UAQ{nftjzhu%CF)EeP{QwDJK(W zB7S1_(b1pAlK1vEES*sov1&S!4Gr13a1e+?~xBjW0dWJH}t1Uh(S?nDvd_Io{#5wx~msgoc!NTBUXf_ z-~!4M!nzNa3rAKef6)X6I-P;o8bj^b2~oJ3DQNK4MFd8s4ft|`sJs2&{e7sav&CR~ z)yclcEm)@Cl<3HP5cNb1=i>FVECZ$zzd+FXIxu~|C+cR)p|>x94LQ5GAm&YAj3A=H z{ZAdlunkQDCwVOn|NXY7#9$IktTn$c>I*EXB+*(aX(z2?cktXUj;sJ9CQ+TiEJS%m z4KSZ(^X;}9hw}Ce*mi%JAGx2%@YR34dGDeTT#_*x^u7!Ew76_(=$>3s^UWr1nw9AL z;m)Us(A|q)1>H3Tr^PFys<5^ieMwo@a0vXMP5KNr>E6fQv&m1I0AOHD2CxA6+!qS15u zLs$aY9+htcwayK~=p<3C2a9Sd6t>^v2>i-D3khLxvEI+R4IXjC_Z1Bt*}=7Q%zZF`x^@Ht zz{Dmzp}JN`LCNBu%EVtBrdeSgwT~u80N33|dHV{HC0j!mJdrf{0>4`;oSfTf_F>Lug#p!wHfXV%hMprLk34=9~S9&_dTe z6SyCDQGt4OSJ<#C!TIqQH5~`%!P7ftN&7^{?7CWE@~7mjt}5aC*9n%#`tJ&nH4YD# z9tUrYdp;dLdvn8f+svBfy3ME(yb#5Dh<^}|%s+=aWB*DjCkPgr*Lbqnr1@l}>G=~M z1I;HsF?N4d_f%r$o4PI-aSJ4o*b;-k5&CNy|=09=1i4k8lT*bp+y$ieaMb`WT2yn{o(OyCTZ36 zAhSu)M%{*S5&b2{624s*+OmLhOC;7nnyA7%JaV#0$)5>qCSB<&dv{l&;-puAt!o0e zkxUS6Da1iUXD$%j0XT3VrouTe`wkdZW$W!-sG_nI2V-xpk!iHMU8WP5SOz}}vqWR( zv*gJs3oZ56_Ex;p(}iUlfL1=rAleSjb&Qapd=FD^!~a>gtZ#8_@>}i@>rM%uiv%#W zzGs>f#J0RtjQ}o6_bBf=UJn7|9{IL^ds3rFG#c9!J}cu&2$2}8&YeY?EK!WNX2`%4S z8Ivm!GgTwsgo4_6XiGtd09L~I>FR5FOt6y3Io6QL`xN3kx2uN=b(1UZb+_o=uSa@h zsW&vYG84p&-dNXLarR4AWDGpq*kk;&_1J(}&nFAUQ4nMC(mJi=g;c!LoDM?!?fmU3 zp3&RwMUJEST~e9-l03v;@|Pfo-M7kUhON6Yo4&Wq;oGP?SGvff?<`Lr1z&$I>i;5K zxzLC1mA#D@clb~F0b)Z-n1130^2ILx{)7#io1D}#wNVC{Sgo00efqOIF3kY|w26gH zNW+?C>}ce>Hb7e#6N;)i&`2QBEr8sPplSjh>m%S&&j(a|K47mdW9Q-Se!3tqaVgP+ zcOd?|SWlE}kstOaN+tr0tUZ>PIF3_O_YzQBWNlv0aDmI3P#L^vG~->7l(L=Sq$7OK z5tfpke-Vf_@akzkr*BX<#9)QRg4m(nbaBy4d|cE8502zuaSWtb=<>I+xP5DxTEEWg zGR6oJ7D92qn^ZXBOwU=@U>dAFvYa1ei81AL6XCht?mSYGYXJQYZQ2vhy`_ZT5P+c-Sk$Nu^e1ss4o^`+0Qp|WXO%Nq!GS0bcx^D(SYQ^xqA>(YGO3aMTnKm^-qygj?~8;7)4wg8Ax1T-Zrf}yj?=Tb zFE%{HH1;hfhA2;2a3ml^7K`^{rol7*`Bh37n|u2|Z>JzeZHAl{9|ydlu<((%$38<3 z8!*pG16d%HrqnY!%^nAV624c8tmkB8_-&r4nJRMEC!R_iu0H09!3rX0h#`}aCiCkK zbB$o4uT5eagUuI5>GrN^G^K?%-mi!9WmDEYwu}-uGHs;pH$+BG2!3JN^I;qNee!}i zo4U&J(TxHLqJfE6-!5OFSqC<$;Axk$7yipvtze~?dgo?wn*48*`%6Tv!;!EYsDCnx z?>t&x(XicRko)Ziv*a{dOkEV_Mb}~Q-?N6y^&$t7k z?x!1J1fw7Ff<`^hQOwN6o0FGAi;Fmu_2foK0el9vR8qh1qa9#SmiRt&ZB*eI4Nwp< zHo;flMTQHSI$DR0h?cK@R@Twv$;norby~4_T2n#KJM+GYwEf};{8P+>t0o`_f7ek<$-v z5NK*3MV$#UhTsC=1HJ}zZ_mr~=i|Vofs;(B76dlB0)s!&9nqXU2P4CLpqi?f|2j_~ zCriOg4|2HQ!q?@tH^*@y_aGJvmly)08&CuZK3`C}nvpv-M*pIe4-5sdptzva^LCmo zsd8gbYVAuG*I_@)d2%SqTsSLLni&8OWCnWn1TF)KsqfBt1W}Qp|G6qYKHvtSF)&<* zaL9bQ+0%m*+oH~B+7$g}U%OJ_)FW{3u}p1GRsH;Jztco({e{V_-oD>mn1`*+;>{B! z&krs~esXSE{E0EJtRb~ZN_j1k+Tn(g%Z;%$qEUxIrQQ!h%ws)EsvFD9U*2rI!Cu02 z5{=Oym$2^WdGWxG5LK{ca*A&bWpj#ePZLT0O?~V?ApZSnvflf!%7Zv2M2-dhoJNfd zY3GgMba6&5)z9v*riW%dDq$uajPqRdA5JLq?FDXoemC`caEg0N9b{kUis31^&xt*U z&z7ivs>R(f4&}+}_hDA?Y3F~A_;zO5J0tP-T1MZ!I;`ro*tYF6|2#4qkP<@s|2J`6ty&i<_gNhybNoqAoBUj1l(=y1s zP=Mauzf6|6-qX(dB!ah0?djz;4CuOYn+BumrvD3=U#{;Z zGxJ>utv-)Y&_{j!z}##ZUiUYjtE8GLdDhCQ|HRL6#`UlPK3K1mHGc?qV~eo` z3HNh25%l`@5`84;OqtfeWYKY7IXAB z?QddkK&MSu)0AEMqpUggdWnYj=@L!O_zxF8spcfBQBM2O{%ISzd|?BS?H6b`Xlsh< ztl!|ng1swG?Ec<(HhQd_%Rc1s;}&MStovI+J*ahjy?Do zxKs{-#JAux{faNm7PS%ps!ht5GT>;`qu7?aLz;5+>c2xsM9Q6<=T-&}n^3-afr|9R_p5{Sfec6TSY@vG$;uL6l3!@wD9ePblg z?+#mro#!gmo$&qbF6!$Q6$U}U z>hB6+c!o>ZI367w3W?ER!6Bl<3C8+ih5&#mKHGn3f^PTs*yQCfT{}eGT zR9IJ6SNVYU9_;0+Sb|kXbkJy)xkT53TGVVYw-H>5G%D?6)#s&Ib zoiS=)D1;He)5ufhus-gJp*Il9TyCP6e)oDavi(3D#J-|efI8%R5Burafl1twP|;-E z<%CtvguKYjKDO(#tRI&iuSg-9!KacW+H4zZHTt68AbZLA0QZHzlt4X*aVCG4Bj}=f z-3X{cB|w;1rQfZuQZjGu|EWVCKoBu>XqYn%Ds zTg1VqkOC~SF}^B{J9mFgR#C%uM-FNTi%c zA>D&HVerVRrO@Wek;v9WGvJtQXQU{G4K_g=1f89bK~7BUh+g8YBJcyw0~tP__%6XN z7zSkqZeacYAo4d(B@#4UmA|tVPXY17y^)BY6zizqp?D@dP&J1)H8nAVlZ*~#8e2yzkkXIIRn9Qv zzX7lT^0J{2#$qrkrz-+Q<^2ax!=$4k^#XXXH&{Q4-s}PZowyIUs;Bt7|D(fPvBZOA zsa!V%e)1p=um~Pzg6du!=$CMCL!{kps2S$@z==MUK#9vKjCqT_i!?>*N_NKwfrFnt z;COu~(&`v^pGGE>$OVP<373XC9|Hqlf;FC=XsSnQO_Fl5mmX%wMZ4LI!Z5U0f{cH8 zx@3QP5EEjg&%rhrbr5xUlyF*$9o+Q*EHa+M&ZE>8K|v>f2d%f(exZc_-aPL=iE|Q$ zst|Dy`YT59jRSG#?J%ORR(emE5&5e zf6~HZeE&s^a)#VA$$#M(aVsbYVi={`m4VzA1I$}LB`$D3*~N0LSj~JXEnKH&kSB_u z68f?s2B`ZUTk>IbQbEh+5lR7!;m zaGKMQS=x$B+h|Qp$RnIIw{QQn_QbUQ3t<4G(Rpz#Q%}bK)$jU3bu2!HTBPBQ#bD## zMv@a%9qan~cRW5;4AB8tO3v-#RQ(0Safo|KXBEU=vu2hIJeg=26zlSiFg#@d8ACT2 z{yExTj1`tc)X+4;92p$CCOk6kU@+W4vOGt-rV4&h*B$2UZBQ?i7Af;%vKcldbI7gZgIKZT80xbp;X=yA*KM)BrqH2=+8)6AS;_ z^>U&bpaJ7YLY=u1vH)C65B`2B6^Wu+aK%!bGx6VJnaI6vIL6u&xE$`MeK=z}KsRN$ z{Y5)${H4Wx=f^Xf(a@pseBUzPw?1FZObeq{3;x&-2%A%!PTt>@T1Y zW+p$Ac~-hFqMq!a$B0i90EVbkYOF@AfSHX(p8SL2jt+YnpfR=vCgllqQg?&QmgN4Q z&}0mj+MS$is;erjf(!SB^C2}l_{O;KOfU`_EWN>FJ&L$3{6P&?OyLhzxb99bdp%Oi zl_O$$PTAq619~mqQ{?^Dt#hCQ{zOSzKTC^?@aFs6(X?$3j*kibm}xTq+^+QE9g_c9 zaq+ozRF9;H$((NES^-xj8|SRxPr2{T>M9)AxZUPNd8Qz}mft;ge_SY*yVM~N9dhrK z=8LMB@<$$)xkfjRzNc0(Y|z4Qh4aJ-E*bPGI55psuOqoalFC&xEiJ7EkclcVu`?b~ zVxBG|_BtZ;_1mH*z|s7%Dz+;(B6&Zj3A8kT3&+0HGZup05qTPp4(LQG$}p*8G3i^ z*Do26z5`n5Ut75g6a5pUDp1k-l7X=~iDQOI={7t6#_Rc}4-N*Tprm%h`D7yzrDT{< zEdA1FH)spEj*%{(t3`4~*=WwLSU>1;^b7RK!BfW>aKN$K`T`CvZ=&1?^NlcP4;HEo z4G4``qb3ryG#>V0mCMxhg*Sx@QeGanHUm!w2M13;8RBS!O5HPnX$s@!+?fCNDJH0a z#@xECvZL~+TiM2*dOBgxTcfkm?Tt)(Jprv#0ZS_yZRNHbV~2d={UEcjAr?y8cEQ4- zw|kX*C-sTzp`suxuS3kU;2$1IF`kEe>ADCRtYZwQHYaBK8kI2^`x-wUg8JZ+SOuAp zjFF@NDwvU3xDlJ)m~Z^%np`Wad^%^j!aJZdNHbJvbe$A4LK00iy~t(QYgQBQw}BpW zC_4Y+rqi+xn4>zc^*@l0gGoCSyeMpmSxI{Mj37>jELtCA-wPFy`?JXGdnRrS!ngC$ z>a%IEE2a(BBJwt+w+{r!*M#DNFnH_AGE)6uaXhLI7TpxiL{pH#R{#VZ4aAGATeBeJ zzaS)H>BxEN1DH&nQ)@EIMSgr*n^hh4xH)Ngz5h}bRT zG{;Oh(gzZSxR?BiUluUzG;ITY^B?^2yuRo!_!^EmDDixxN1;-GWLZ$hU>XY*eH0|W z#e6PLvQs5gURM+bq;N`gWH&7Z6h=@8{?=dcn>{h1zQj;Hwb2%D;~IVt?h;~_+Q@%* z%k{GB4{?fpg&7+UgDk74Rr1 zXwZls{cx>Z ziJ@)$>(jLThW#v@u1SE>=)QKO$`<%F2R5^QhY|vhg#we#&=CMm7Xb&~cAu!=-{-wi z6taEP{*Yb(Z-#kL-~UTjwqcwQw(Z~X%_wE*SZTpJ@2DH<_)MvIekoRXh1wLh{mLK< z_)yW-t=DN{%g`wvc+Z`oswD*`F!b1F(Vg-<8$8fMt?)uj%nX>`aRm~i;XY9;Vn4p0gVXeVH}QDp7Cka>5|WBwf;8v-(N@ z-d9ll3|e;j;$h-eyv363xsvz{qGlH@DnJLn^(0rc^}wUo z=@c#Y(#Jp+@06=+{ehVj+*#%GQQ#FW_SY-5VA%`jf`$Pd8Dka+=Zo#SaI2GCH_ zzS{1n#KRr>uY`% zT-%UlN0ED(Xgw6Pd|%?Z3_Zm(4O}kQ4sUP_iq)_ZHxxys1&~}whn&ORcFiitr&2)| zLzUe?`O47UTnEH-B>uzk^3CIX%*Ngr8q8M(_7qZa4=yh;q|Aj>&doL|DGPKTzJKkK zgKORZH=7O=s^Z>=D8EdXn(Y;^P}F0ySSA&(;ma497GIr68gCmu<6nPS6IOOmqMXoU zsU>6A2XXl9UmAUq7~?3Mx877X`Ff+i1>xw@RX_Wj?-fu3h7H1+M5ta+KLW>>C5f`Bpt1mknj%VSTt(tk zK0I{m2RL*gf%x1jcq`C`Xu=I(?+945rCz!1%|fwn5V|{)l{tkki)|1aK@PyI(CfSE zj`oFkzxUYtF!TTsQdlTddK%2CEeZc~_dzTB45c-SIV+$hxNpw{CkQyu<6bAe`c57X z3QI0LwknN6m3HzU6_GyCWKTj>7Tm!7*T@t{Z6tky_wJv*vqTm}XUaht(^BNay?M`^ zC6Rw15fnnsXcV|RMq`QY=Lm4FrVjMPOHnIdOF{qUV2p;K{hq`@Lw66Cg>f93k1PB- zog92+3z$E-YayW?=x_ub7X1;vL%?_k;YGvmKr2Bxl=zqe;KJb`Clu5w=N8K5x`9rN zXTqsB@Hgnttovf=KIUwKDOy6II_FISuqa&-&U*$dSGr2;k#z&;=(HnHV116wQHrpi z{rq>JqM+^?E75rZc0;|(FQQ+$@4Is7rBnarrIHW-r40D%AD%GrT93lb4@~|+C;_rY z84yP=s|JcnAc?~zY90Eg(|IZln&}ilN?}q^{Mmj$87<9btSBg8a@n1J50-Ji{MNaK z>|TxOF#!0^n7u$3Xg}BR9@%0gB6ff>C1S(Pd|eythWCQf;qpBQZlE6le|~4;(VZxG?lygdcco6|MfIx5ZWgUzDBCkeY#{$Z^@YcL+Udb zQK0kV#!>`k$a^r{^IkHcPMJD0PE2mg8}%V z0{>f_9bfKUIw}%-K>QE(kv`I?%Ec!p=d)u=MXMpr9tWiWV=81(!x%4gfB}S<2dYt^ zBC86cJ0=CH0%J6jcPLvcS;2_f+S>7c(a%;RfdHqJ29?}Teb+m!@rbdYm5-Wq3vtRQ z=KJlZT|mGD;a&Q->-WbZl`;lCCu0DKx;$FLmX09$nygfZ^UrGl)USm8=?MT%txS(# z%l`}Q(6Ij1Ie!tS1x_m(bq*$PoSX<<-v9IaE+Mv4Rnm|-pfoE0vyuc`6TGjddAce9 zP!W8v2ihG9KD%5{A(789WnaN$quF}KDomtuYSsDR@;AU|Vi>rj@=o-R*R0^%28aAX z&J1k$!8@z}RSv!;Uc7O zS=PO-6(az_fj=upO14Iy{+9yYGWgrtf-yrg=)AV^Z1g|3(>4$QNklAah&DEJj3)rl zndw?umOv7guhH-qR9iWpy?Z#;>gQ*J7j_QbW&bEBvdde(0x|Y0V5G;4d!;pJ>l~$o z)e!smOO*~$AwKHgJ3FPu>O*LU3svdW`ABwO8hDMZNXg)PRe+a!6reG|gc#hA|H&d> z6F9iJF@X{TN@rXOv35cn0ME&MaFkeKyrpYKw#SeVZw)X844sg}U-|F&fB4;z}>w*_Q(SKvS$qg}t8&{&5C| zRLDz`>*ZFYaleefUx2z$t0BP`{6SE0Di_*E6Upe_UzP#*Scv;PFn4`yFbw*5YtUJk zD+5LxH&(X}fRP`cpjZWVQ~Y~Q&oC-D0|b})>P+SHN#b1z6juNg5>9;ay->X%Iv;4` z3>3i0Fy?-L4a_6+MJXNW^@V63++<0Gl_YMqLFIv!M3Q-yybSmSIfReiZ&oF-{}s*x{g-A(i1l;B(ay)Gd2+|LBovz`WSn^ z*L21-71)E+-~N4tAVa%r+A=UM936`Yrr07I!YMe>;%_gy}t-&L}G(fW4eg`ckt``RKt2 zm-FGW^is{wI!CkOfzF?ynD<3d}2?D7Qk(o@q1?!y0`EhYwS8y>Z=wz)<)LAUor zsVk=oHSC`YJIRGgytI3hYng|Md+1IX~fE)$P>y#K^Z4d*;uw zYM*#NJ7<1rFpD7&Az=8L?tky|SLyKT9y}|dvPIbC<mivWFs{0;c}0GYl_8POnIfp}2zC zu&Hj+9(cO*6ugs-Xa~V&7zll(#cbhZ+~)X$H~@QiizH|N!i0=yb^9{%mtgaGY)TGwYNpM_$7UI3WFdASR#F%=!}BNk^sEMc1)O7=pf zc1+DD0A%wigx^EpA$(<>Z$fb>z(&JNX;F2HK*16~bz}p_d<0LU?tL_2c}MZ!axl@9 z$j;V+!;J!uBvyhi=qQX;UgUQ6;pUO1=IDCfqhayR)RB!@SG=P~{`~j>0lWusKx5>S z|ExPvwItUMO6_)NC%#1ghuSO?7@D{GT&LU7c)*uB!gB6nvtmFFuX@rl_l>SMzTdbx zuW!)ztHACE^hU{Z5x{e0w8O`*7v$;^E|tgGJNEs-u!eyxZGE? zIQ0MJ%EuTgc+`1@^R0dWOeFSi`Zu9yhoHcg)br$}naw^vv3wq6w&;AKDInaWj;scv zP_hl1o0>KN5WKp(``V^+)?-Mm_Jd_ ztty6HK`Sh5@cGHftG5t8kL}0PV^=6Is8ti5p3N$suD@rRn8iZ({K0qD*h~H6NOMMd z+7VHW&lE4@`pIzr+2?D~TPyh&Pgnz(;kZx&dITNJCcXjwOHx?m8J(FI1|=3w^FcvVcn*}1YKx-q_Z<2 zTFvxedFykltD~P3mzRe^(yq~Rhibj~p^>cb7A9zT1ozQ`f>=knlr#HI9^ZP51U*rf zmcMqSj%2Y9V$-NI*z@1TiSkhm`hIeBMzQs#&Lq?4gI;j!=BdA?+obe^Fn6`Y2$% zzKm2`lX@Tz@P14o;Al zKvK^KPgAu_zyAKaMY#)3?{^1dAQoRJErNYE!9?1Kz}w5yVx29XCq~G3J_F@d!+gC{ zn7)C*CLq6R%)b)E$us9_0;pcM)wj9uHu?=ombDzJUZD!z8#lKCP|CcT9$)<$Zw(T1 z@btmiyJ-*rp5%KHuD6wab`erK8Tbt;U55IqiC3yKK zZIlYQZHyS&iPPzhc!sG>+~{Q@oTSXctnLRi^=>i-)TQVJ8%?<~i8LR*o8JE<1%=6S zZjw@bqe{EW)B!?MK=h!9GMKN=j^Pk{#PXYl)zstRtI9HR>6m@Co)C@e>Q7}asBf~( z>j_4T*awLuSj13d!y|%3ShC}9FHXY`)a6Sv5vnki?eEj05h)m<6k*9Isd->Iwv^IG zo=RN|kIqmW>1WAzamW#3kH6SVu*dn_iCX})*Vt%?lx%-NFWNTHSW+@j(}PR&RB9101KBb_D9^Ep6ot}Cprw#e;30{` zV?wae?lXw>aEe#wZO4Yi{-xAZ*{h#4OXU&xQc50U891+l&{5%5xfTFTY|fuU$}A5+ zuQfInmd6(XG=MJ$Q$O6rqgDhZcjd{RhhLUl8xfbb$lK29!MKUBACg4Msk|-j*l5DR z7r$&isb1Gq22r{=Y;G9M9p>r0_^Fm>BF*gddrZ8_B#!gJgf$72rId)!%p;=_c;ea_D5s#eTS&SloE(v98K*BqQa;kOk1c-2?PlVW0 zC11&5`t3||soqja2Ia9iE_Z2svDE?9AZFH|CgK$Cm-nKmgfz!OYDu7?76Zu%yf&J2 z7)0#)+TUtCC@2_cyN&0T-e2(1)Ze77#i;sSofzc4KliyvC;rnM&Wig&ty1eix!c$F z&z)(prHAzm4B2dJKU5;T!&xj$Z+X@mh*KecucjVMiySe3UNF#tzZPpaNR72y=!`#2|ILSKyW$72{c{OY zw)fo3q%UX|{6>@9jHDxebKN0U`&CsWO^iCZ63NkZ7*T3Y)2Bm^p4XR>Okl1uI)25( z1>t1r&>~%BsiLx_azKc-Tq>6GjprsmJE2if|MmT~R0fK#-mifV^x2E046~BKcz_VC zyX%i%-mTJ-5B!3IPc8v}JTe+GXOCXc8bc!%u}$?K(fo^cxY5m?{E5Z=W{=}gFhO7u z&0dd(teKGAX4Z2Qo3B7l0X=VkyyXV{h3FXGoqgnadeI&LzAJ)&;obwF;>?YfdG%N1 zP8o@m;1+6?8=9i}fX^i9U9R(Fwdm~#N0&XMT|2Y3roOHYvEjv!2kd;G{n#sbp2kWn ziVrmc8xfU!v^{fxxUIEBSNK@Qg^4Cr&^!WdMx-X&?k3gcNeH^;#_w&t(c z#c7IM-CoHAUrq69v1skS)o5@-9eGbjCsYsPocO3FpR1ykQV&cCImE20-m!}5lwoL? zW&3rN7g?~molvAg@GwI}a-y-a=58m0)uX#P`t{j)UyvIv z&qrkYA^y$J0sP^+u%zk)Qw&=vJEb5=-L3w2L*0 zOx4X++CeFmnbrn4{q7F2SBQmHzYG;ApS@WzX)5*e#r1V2SS3=90XA;ZDJv_hH*zX? znI9CsC??JWk^=a;HJ2CBk}3y2XaM3sB((YP)ZQ!yQL* zpH4_(Xhj}uv9vVpD!36rZ~9V?Po$vhLc7Hzg?5+xG2IFL3f`ZdkZ33nG7# zqgev4k8R4)-xK*mXdZ1ECCHmVWFyrTIMj8OwLWn@OQ{ZEK} zN{$-U+g{JnNRmIeJ(qyOchBxBqU+?sz$C4DvUwHblZn@4zkzp*LGMD-@EU3!xLp7gIz~UD~ zz2@JW{;Ys_0h%!)vBe`;*I2z}Uu-~yd$mdD-B0@URg;vTRKR z^B@o=R9UfFtLwb+0)gi(6Q|WMEuh-7F3NO3S%mQA(5)#vq$0Hn?>-0NOcgBQvj%V= z-5$q7Dq3=!B9|roOkNT8?B858h(mBfcwZgZzC5;LQK@TO}UUl7poa`>`q zen5fX`RCfB&CA{351q;qsAHE!>Px-v(@~mrJ0bFqTz>Nd^eo%RC-9SQ>i7fY5$b>! z%wkq@lszpV4$Eeo9G z>`B#%MBhoJ%IyrCrjrouX=#&M@Tw9^6$1llIHWNRVyCXgW!tn_Ug_E8Ov>pSE)^<5 z`kAw^K+6*W(+xpG<8*+tV0|e~Nl6)|cD@>;qK}3K9!JxNQEyNttQYTjg!>Mq36bP> z`p*)Rv4KlZRYDjzD+^$6jIhO2;?=7~i;e1>ay*~s(92&~78lgwWO7z~!{L*@gl+bk z01~qX-UI&T{=(Qn|G)=q ziDLn4cum+Fq`z^ky#|Brg#3=n!G%}4CE8qvbMIl&_W)u9M}*xkH}}?B?AZW_MliO4 z4TW7g*vW>Rkj(+ZMX$(_dWJhR*T~K-UzDvG+J!Rv(0D+?U`N+ezo|8rkW=PGml3 zSi9x$qmKB#*%5TfuD_o?!QqP}y7Jhvd}^)dVrEwN?JUEmb##^B8?%0}11O;Gg^VjR zEQ_$d-};cz96#6+N`^t!Y1TKR%=F#f4 z6IazIHTK5G1$u+~*C;p<0GvVy z^dI5U?&vB|P7J81L?8w#T2%wfUO3%5iPUbvkg{RsZa0@2LHVgP5wEA>{Jr}^aJy3) z!=M!*_$eT4?h-7GXE$AP1#7;Qcs#j%@4|g=HQ1}#0d@D}bDWQ{A!kiob_>z9OUAdS z4a;J1xu<|tLESV*v3zj4E_YsEx&QS(-so>hK~{`4W7-9EAyPReGJZ{3FI2EqHT|1d zjURvzGbcuH_gAVzhw^M6U3FSgMx$0y&G(vli>_^7uq{<1ZrbBeo|D^m)@rgWB zlKbgCM5;%xq`veq6jLUupeSqe7e_equx7ISm=+*E^I_?2_}&S%=6X$AM5ai>W9mIz_d zdwFZ+^>l{8U)m7vNXH=3->Kb6+hK-EOlAuZtP*QTU4EhNXI215M}W9mP=x0-vjONZ zSobo}Nn;58-=dw--)j=g2}Cjv0tsVsqgUfT-t!}ES!qD(DLvlXj6@`jjHzAxP*UIs z@%iwHdO{a5s##DrQH%2{fM>lbt>x*F2z-vd`je<7+MM%+gLa+!;WCZ&0raqh_}2)o z&Dhq&3D$)>jty>AY0HN1Z{5+{XCc+Wf*-XP%ub4;S0^T9T0(j8D)89J$@4CACqF&M zDo>%N&VH12S`2n5;da&b%D752qFeN?pC^zVSs`)C7TDKS&1b}QD;q5Ou z0$l?JiEz*LDou<12*)7J^^$j|ABJiFI=7M-x1(_*Sa+FxWz$!7^~kV^Kg%2<+c9%$ zvfBOxb%ss_G%3c??rekNsge!rU~=FtxiD<$P%2Q46dp|**!2_cKE{`zdRyoQWPmOJ4w9 zBho7s5QY&6CnoI`2T{_^AXkTmY#vemr3w|>?qP)DQptn90t!uJ{@g>Hj8Y~*%wecl zYd2@gLyjfxKmI@-C|6u;CW`N9?x-287}k~&S<~%FZfM9VZF{|~ETqpq{Tx>=!84vT zC$1&lvo7i0Hbi|VJ1v)!3x#Kyh#tqJpw zzAoQgTtvKTq_@#fDoNm`#H+)J0|7lfY=z`uA!yEYr7W>Yly}<@J;Fp^kj2?fi-8+)AV|T zOL3+IVjG33LDQ$mqG=Knbyj%s3bQDQc3}C*eG}sObUI#lUQZ8?aB`PIOPk10>?5FD zkGO+m2+o9&^`7AzLz{WsOKD@W-S2B_7A{+0qLDGg~Ky6 z9cDTdhx5VD0VjC71Pnq59cg&j{R`Z^MC;oMJ+jiKe+$~6_p)YQjlJXyKQ-cXm>riF z7*cHQbA@5x83B`?DonGqgfu5~8WZ9rs?)dY9$cu8px_bUy|My|A(5BxE}-Jl7z?do zfKt1JfH@q+LLaM0mV#2}4UbwyAZ2ZGKa@0|6EDpN1!lOQ9Dw^8@nb^cpSa{X7L?>6{%3UctiSz-;PVI_;VrNXX3E#K8K4osb;=o-+9DB}dDHR0}W(Mf8+xHher zm{|3bRa_7eA!MjDG&HJvXj42u3QMn6)jq?Sdj@7MN7jLQdIJ#M3BZdZ*44Ulm&?^V z1LjE!+F_(tLg^|ICY(wRBrADEcnbExZ|@)y#m!B@qyZA3LQ;m{ERw}hqD_IT*~5hV zs!*Ub;qe6))?Fe9Vku|zR2Mp8W4eCmE~u7}6P(B;4s-f={L7Au0)M}F2LURuL=-&% zaft|g=n5W}ob`z9LLh*OK*9Lup66lF8e?sjBk8!8p;OmDcnx0DX2@2TNCMr8oV%zQ zctD&H{#>voF5V^Z7H9%dJeU+^0s}qW7VrxYA5_4CCTa6EP~0&CoK&WANd|oEU{Tau z9nk!UAV&`YEM%cfEfh5vJLY-7c*&UU*+fFLXs4|mVa@o_a+-$x-FDEtt4 z)zjL*&OZ1rMaUsQ(;ZJX$wkDhWC87#2lyB;9l^tE2XxQkv}kgN*dy4X(^5czfGy{P zQAzGHHy#Rk0Wrh>2B!K5Hbh*M+?xU}13}<^@Y(bckEr(5q28g_=W(1F%O0kWwJ zq%kjH&2ykU>Hgj;}h1Bd>UUd76VnSBdfH??4GyFb@6K-rXHiRskE64P_{1V(k|BGU{5j| z1#RDZ9Lgr_@Q_`zgfxpMI!6cOcX%SV`CZfhBqA(m0N%<0{ulSwUyX@cUd4+8_1Olf z@R#S4{u$M`M$>)-w>Ck12NRPOCws45{Q%#!leHAUg?dHjuR)IQdP274zmXr`m8}GZ zRf6%$uR=;5b@<)>G00VsdF81GUE)%PILawR)dR+=*B6&K20%iz0Y6Li2uHI!iVpyt z4m$-RgiT4|AHYyNLXlFA!DgR#Jhq-5OxOf(pn8|TdcYU^2aF~9njWGLVx1YwvD}xf z1b`Xz6;8DPb$~rh>yJ?jz0IPbNAjt>qIqX>`G|qkOT}}!3`5_LGMYMBw-}N9I~#t8*+5u^++S$mBH_roiA$Z zLa%R07XmAbiM0;>Z(b=6QuBHy5m-BR!MyR?9B%>^Ub%|bj_G{80u_e15{eE#zOw-6 zA|1uLe<=gY0<_@8fgLa=hmT#5v;O37Cdh>OH+0}H(G5%~@G#SeGcHilHmsrvm5K}*d~ zP^r)mwl*W#ccMtSI#_Ly0a+3N2>Kd8tEPHB6vqZyS`4P{JJ`hhqc!gA|K?vzj79I5 ztA<9m{Rl-DTR(!nRDo*d^>DU4AmB2GE4F>Ycr~P_L_#f=6O{Hb_M6d2i6F7l&g}Uy ziTve4owz|WuCY-_b_ftMV z&Fz!ru?E%!|4gwa0>djZ7fn@i*qhZp7!|8sk17n7dpC<5Ou(H|eYGKUIlf2CCZ}B| z?kQLb4K2JABiM`(2YM2BIY&KnuK}{p=#KLh;DzJZZyD{%R$#G?4%YCq6X;DbX8vxa zV8qc-w|V!owpQA5Ui1TbuO+(={+k%fy)hiA+Z7IGKrb3&gPj{6_QapK)7}m+m)#wN zTXcdES&As!ON*EP%Z|zZd056cDCe7f{GG5S-%H3txp+n#IZ{g4{pVjettTp+>zMr9%@hi_}E!mo{ zj^3ZS*up*HnJT|>edI1I_Z71Wt!=Gq`L**v@amvweyG1a<$Pndlj-Ew4-)d~b4UmS z)EA6!!7eauqJa#L=T5m)?Mvu<79NK(P|O7k!~7$4AinA8=@#KlIHitnS{2(yAhZM& zd9Wv2mENpo<(&IvvBkPmQb(%s+o{d%K`e5HU1;P%W!2(#H+8HGPk^6)<>Ml|XLGo0; zAgOkKSc^f#&wL#bQAeFZ&i<(|>1${8kd@=gV~t!h>iz|`#+5WuVfsdf#pPufu`QVB zH+wHY+fR`(6^l3k@id!Fr%DCHXz5K>*$QMPEsO$|9pM9l|ADz|N+{=5fSfLvskJ9e zJ)8XV(4VJQyVw>`sIl7***a2u&yW^I)$!-lKjtP{G@e*8{^0v^_*j>HLGjyjgZrZ< zYxCQ+2_73`oD3ac3|6m1NHTamvwMMwOKqgp^E85f>GieK_fC*A+Ik>B0j$n8oE0j9 zL5-|GB$H5xYvMdb0sDFaCI;kzBM+kIUQw129MS4?m556%EH_mpV8OZ#EQ(Y?2~h;( zbtijsLR)|tVRS|?hgAoPgddP{ygGuJp{1-qCjr+=mEsowcu^&;c=&{n1mfW98zxvQ zhi^iqBCDs%Owx0<7Wjv61^M!w5yN=WlQM!=6qq!_H$UVzLj^xri2KBSe zi5MgKg>x4D3x_dJb)1MeX2F}2D`_B3fOdFZF0;UC0G+3zxYnGwU94UQS%MqsU3rXz zMDcaa61a!>Vpa_RH83PXszMCE3fZ%Kq z-E=4N;4-ERzzuzOLq|;kDRs<*5B%Wb@`8Vi9Fw&pXZWO zH_9$+B#wt&gVY|RXkU=GKRj)5u-MMyd1AHX-S*2JgH}{3c+Vr>+TWp6NC9y`#7_Bn z;e{+@g%b`IKr|>(xlZI+Ip#!6>AbSX#c1wR` zg5!0|vy@vQx%aX}fUr~#f69kDTfm#WL};CxPQct;TO_X4}G90DeUPJBDKuG1CTyfWATk)QL~} z=7^ww4t6nlOm0;LZx*qd|6F{4Nkfz!a|C|uo#C+P*xueZ%xJw*Z7sdB+SQ@)JpTRU zr6Q@(SQ~r#{H>_0fx-;0tD@j=5f`s+Q~L8cKSk^1-aH+bM=C$Nu-=L)$Np*xB``?q z<>flnQrqymJWFFWe{8ucWK8m1!CAzu1OClf{?C&|R-t=5$D9UTY z{5)~&VbTGs>MVx zyfR0SOvLqNVekz*ziXmhtEUuKLVc1-GG3{6DnZZ1Zl>t=f%V5vHJ$o|-4~J5L~n9% z(*x2zM+pqJS9Y|y88u74uq?kHyuB35PPBb{+(GefJn`pg;rXv-=bNcV@=3JpN+pJg zv+tO2C(jON&c__Ty9zqol)br5wX(`vu37E6#i!P5>)o&h! zX#K<`VCLOXu#i(i?w6mo#IW1)t;4 z%HBJiQh+iMkAF=IX&`z;n_m37U`UC)c^#7Gcai{OUU#?4vz5GkBaJ4Hx>1bfw|VIF zhDIn%ki?~cuD|}?lVAK4fqtNc?(iX^bX#is_J7?7kY3}9re{bnnA{h5l978ZDj_@K zXcV=K4krSZaj?9Eel#P6o_h~e-x{AZSR|FGIXF1v6WMeeq$;n@eZ0Mqc4*my?A%yZ zrJoyt4jlZlq{40zjVJ*ju$olnG#AnUElx)N>pa?XCrKR0Gvj=y9u+MU(9 zt_87jvW|oWo(nL;X5-gS%Qu;%FUaHN)c~P>I6c!#vr+u6jCymUGQ-8-Os)P8bwabQ z=i#52eR+8OjQ2=OPuDwGpl=n-&XW24yHC+i^{Y(I3(cH;=Xjy*c&gN)kn^t$iRw$P z^+n=Nw?v1JX7+Mt!e}B2&v`=PRo~RYSBX~6I|!y}2~m3XJ~bufd6Ub>aa1Bk@dhn4 zv>#`m$N2)AfysLT8bxYY1w!z6z0LtJ(VE}XEE^5j?sU+5%L3tO9)QwyS7HxD|G`N8 zu^?!G&(8y`-MwCK^Fb4UIZ&b=0;q^*3v7OofcHck)Ec&A3FlbcL;XSjxfr%6qZQl= zIa%7~`>0b3la3-%-N_k`cwBR`dWSW)CZMq8rXR=i_Yz^+?$k%Mo+#uJX| zMydvv7~Ka0^cedZhs(oVH@$_KFk!pb>I)lE_n_1>-l|=t|W&L+yLtT#n82a{I5!B7!=2 z%Hy4Hx*rMtwpWl}C(zo#^byAoOTN76RaQ0nb6PM|vwXsES*~cZr8C-DSZS6q$G4Mj z9rirpMxx6vGgs`q#V3QC5h2Tz;KQn0#m~+!Iw=-SM@hpj?8%66+i;H~nX7k-RC5D+ zbA5vKbkxK_uO-_bNZ0BRbwRAO$XH_&yO z`_Jz>*tJ{A3PuiSq%yNVTIU!%e-iQJ{@^Zq_t6j9?h)zf9_)$2KOLWmMC8p`^Z4;D zZmwBG!${BT(_h0gF5wr_5i1gU-G|0@H~ktyS{6B?gaP6k!UC>4qyWK>u5d+o2|o2h zW9vBn@K-4{ERM1%1(0v1QHq@TzjlDX82qiyfhrM#N=@P@=a&xn1^oS7V3}J3rj_Ct zpR&Dw2j;Qf6=F?*c@fXEoezln6Q%&Cd@Pz?crg_9(X`3(6P%iI5>&T196HeeK5f979uQrLB3HFiZxNSdQB&E2x*kQR^(*n)QqSp*q)FI_##{~XT0^HT0 zU`;L#3&R8r9bhvutN^QimxBCJ2Zt)g?9T{)p$2&t>4Y#7a z(e?UsZ*F5EPCtR!Vw1E&33`tG2hCU%G2%3d8=;+3UWB17dX^BpK?&94MKGh5^4PgU$ z-TB;5RWrXr_>d(fcL+BFLfK2`>Agi1Rne&BSrt~ZSCx%yQU&4XxUP1Rszit6SFD28 zs6OhH^wBQIF^qg?{5dW4TqUc*&~DNF4V@sE_x@-|p<9t;_HT@CvX$1ySMWp(ez8=a zi-dbo8L~?6out#XNNB`zuFclf61&(L!w-6@MnzC%_RjL#g__WMa(x%{Vb_}7EM_5Z z2I9V=-nD#Zcx$$=xcHKFf&Wj<*Oq;S;Hq!$7@*H|BIMpNC#Ynr=t`%^AI6kSsXhC$ zQ-m)Mnd)z`pE}sNA1GZ;DEsXzK8Enf)4i) z*hLZa^*wF{f7ckr^=G!}FA8U^3*G#>#@_R!>u9ocnXxMl0ZXx!eU7FZJMiyLX?z4N z7PlNq26_#xi_H4Dh#!_6Gnk>7OlJmIvtXO0m+7@4Lteam2UFhE&q`Lb2=PAr#a?q@ zE>i{6zf!AoVRtG+6juQH%m6$f7){T~@s#UhUBn-$VGyvOf%#Fck$qcVq>J$?7dDSy zJq3*VCqP4ObEjfu|E}m{h!B3uXD`KPkqBi~9<{{aU`>nng{Zc8VcYOP21|zVTv~lw2oBf4# zqv8g?!YiS;gFM@ZP4UXUp5;4-`&-Lh>=gnp~+Woxh zcTR~duzFr1x+wS4J}IPA>)9V6CxsQ%q^ECer-GihT$N(Gehtiew3aP3D0ch9?2SRy zcG*Ucbio463sM$o8v)hceJyMhkH^`NE<7|aa?2fOmrL*rLQf#7P4Iun`tLxh|Nng) zkCc&3N%oG0kbP`%WMxD~2qk-EWk*KI&WMm%QTCo8dqlEFcJ??pw(srqtX}Wm=l4hd zq&Oar`?zk`?YgepG03j!g&1>dck2}b$=(GlbBDh4KHApy50$L^gm)SHg%5Lh94TXG zd%8kY2-m}%?act6Jb$0^Ey(v zP@clO&z7w}tH_+0&GEka<;Mt*M42ZX6rp;BtcQafGzxExOXC`lh76*f>|G99U*GB`)A@-(+FBD^`MmxCPP zok{4m?b%WAW3%sl>!yVKvy+9}535v)l~1pKlzCG6L0@41&B;*_@~nvI(K`}D0mp@I zMDv*|BX_J=4>fVKD^D6M5@9UwX0`%vlMW49)UrYD@yK40(*?V^D~fNT?U^T!zO)fB z-X;lgS^ARp=&17U>1C&PtdsG$m2-@}=W4&JRtUWyN>{NPVd9{Zz5{FfnYqclWL0Zp8bD+>uMmATO z1B`lsJ9^dU;So!OKLM{xtkKaowsky}m^pSZp*UDy&aK<@U%YqWi* zpm~E>9V~roM&q{UJRxz3PObe4&yT-a0D2nyH5V0E`MIOPB#PdtY+Pm83PHVtqYLp{ zo3!@w)dze!as}fNl^ms>BOhuUbCf+=gNs~J=Uds5MJ(H{d}NnDde4#7K1SEB@bv=Q zk-2WJ8qQ||?3VV_(a3OzEpYh7K*!`w;;Qe(Zj#Y<#MU>NIRkcbYrxBL0bWpWvFGNz zVNUt8(!b%QEDAt&BhwGvP+GNx^uFuzJ`*;ncfJJuWWp?S)jj+x{RB8Sc?U20^G1xz z7Mje^tZl837$k75AgxY%H+tzq*d~MP0%S@0g8aE-1!I+LjvhZbKht0%^du+bv6kT_ z;k_x8%M#1b_Y495O8(Tw2)5*zXciZdlKS8>_F=b@C<%J}wVTAkLpdG_91=TXu@M`v zs3nBe7G~ep=u|F6kvU9t=&*3x5RrQoQn1y2Z+*SmdAJ}tv+$C_`(^}6V7|)iz9rqz zrh}LS3LjO2HspPA>bS#EKd*qONv^&!zks!hPBnMvT>6NMYLM@2yt8sYdh8tdrN51toiY|fp>mv9`rAUuki{q<2bNtAoY;OO$3 zi}K|^l>_Y<@*kJey7Wg>8eTEq7_?^=_6;rMi!yV-J@r&M$8r@uB}E$*K;VoR_cAE z_;6;3#72xOVQwW{nt!uZrg}BPFVz3=5y#p-yL_rwbEFtMgQ(j;YeN$A*zDdwR+Wgh zmUpR60ad5f=4ac)N*NoZ*INW%b$)AZC=*}UD1z*lY$FSe_wm*l4YOimHQrF!67TJq zn?uzOwM>hs=q~mF5t6-T^xNothH<);tCA|aG?9jD(aMiDyVrC)?+^Vk(#;00o`B#ox)hSupnTg_J4lR0C>Z#hJv zM&GbaYS)gdY(649q;90amw39su(|c?1N-KAnwT|Rc2QWJlzo?>pk-pZvj{Ff26>n#=t3uIv%!zkgjcC_-RyUuu>XpB|)G0PB!$%6PrmHAZwAg9jr-mBoSvMpoLTI1G#N z2CCtC!PXMLdhyXpfF%0n75U_BSuGurr>|rl8AfBP7;Nj>j+zId?nVg`l+TKN+D;Ep z>DtvC7;o<5E1Z1nu~{59Z=QT58>il1FO|j-A24ufC?>137H>jx_b9$&TMn}-iUktt!qdYqXN&9C)kJ3i-|A=kZC5!UIM*?dQgD224=fR!mg=5gS9gU ziLLBl;}?irve0I}lPZfp=|aolq298`iz8lGy%Q1bgy%3*lk2o&8vc&mLgXuvV9Qi& z(vFUE%d>~Q_VsAqoCUw$G9{iU6S_Ss)6uLVYv$J6+Htd?JCW<8BeDwLU2+y$@B?6Y zW7k{pPpuD06f9;ttW{j`+B?UR!$MD?mzPC2o*p{m;xZ=(`;-W{{doHQtSMLe$1EN4 z@u_+@ns=jB(wqw?Eg2QYER#nIqByoa=>67CYQC1L&}(dd)9XJw+dPEsg$flMIwspv zb2Q_2yX|O|a{spx&b`&2L+>0&iwK93+KLjB4KAS4Nq(`o(c*@d$<`nE-JNi&ZMD>R zl&~GeA*1<5^rdepa(5uBOMsXYW$@TsTz;I;?D=x8r&Z9AtsZ~P6uB*{M`iwma46%I znFHs}bcf@F^q#zY_Pbc2SNh+03-?^syholi&E`&c^8?*n^*3po_F1v}fo3YZXY^d! zJdbK_u6#@4ercUFHjCuh|LI3^iQHg?9QzV}ZnhNWAoIOPdhzDt@YW1}%4tvx7=L|P z^?kwvrJOUcaOQ=ix|~nvHiTo-aKu7eyiL!x`k_tVGycuj8iEPGjy(+gthv?OxbNlF zh0l9PyxZ)7^3NIo|#ipBW|}{ITVsDMoIpaud4 z5+0Q^{B-17-L*UCcn`US%S$ScVl8QU3%$1G^oR776XEWGU=&{L5Wn^UNF4B_E(PB>dP;kL8=?r5`} zdv)4W zH@%#fOK9{U_XtITmD))~sK;cPsK3NPtAM7W;tz{?qD?MN&>*Gq`Kjh9n@>pt_Ue}% zpZC>xBbV;JkUV6*1CceHhBZ|`w_cM5_=9Y5-j8|Go{LpItO7Yft^mqV5tQfzaKFgK zlAXJM&<{SN&x>(ji9@6xe%9A+^X0(8Go~}nH~#8>q=?)dRG&(8?8BQlv4>#G>S5Ba zd%T&kS!-99Nb4V_v9$%acv?0gWCTv8@5n49MvJjIKID_#87rNs%#moIx*q%PTr9Qp z;qvpBk^K=UcCM-tnaAW2Do;wYj*cB@IHUPBa;7({>e_l@D><_WogLDWotm(Fjuje) zVIKtd1#YqD?&=2acKr;GP^NoEiG93pnah8d`RkbLt}CC{x};W>QB26D|1|Bl5(mvo zJf)M8>-{0+1r7t^!YAFgrX_EbJ5l%rv&G;_?E4Tt*)Mw9?bRA3{`QH}FpIp{BUDi3 zQpX#S*H&&zdsl{Ed|IP4QVDN0OA=r}SOtwz5)U2KkGZrg@pE%h^{$0II^t4vIb`FY z-DF5D@%%v`w@Qn`?3s^RC%L9IGVJbrc68r%y=oO)9SSLwCA?A29K})ufjOU<8M+Sd z@@A=+v-V9J9~+&KYe;nRsI#xXJSRl>GrWF(aF%qlrP#e>Zxt;eT0X4Z@<9H~xrYIQ z7UK{5<3_?`Z3Q1y*t1LCu6aMbzA01L+s9v{>G)_niLx<A6?_ z&73%9uMgvSFE+n%Iv{Rb;F}c21@DB5$Vn>f zwj@ccc=k&BjQYxkMpsEHI6mP;U|A@TU;*_p zp12gmLgj1u_C4t;g>lrKSGi;X&-^)_pKp^8qVPCaDc$(-BfS6IUMTh#(4B@v*Zkk7 z0g1n|UWXg<5NwtqqJ0oY>(#(C`Qe$-(2D5!*-!Z?EZBM8WP$pGwa@r*OL+>}3dYP$pD_^4*uzzCdEM$uoGG z;$h7koF%+SN@^P8pM29i($0|%KVZ1NCrzFRKa3^l&#xls%IuS72|d1a2$W`DQINiO z8R@#O%!(*qvDbZxBh$ImDL4@wYw(nlydv9E^FOqTe1`XAd9dgG!sLwVQ_+UM-BI7cC9_-F-2|M()+N(w`uLOp*snV3ZsX{AO6bWpuSxg2> zf*?S76@|2ytY zWMty8I^iJ4FuC0n8#NExFHjsM@t8EF7%{z}=iELzKqn-WLveQIuA%yskOpapj_-7# ziZ~7EkO1!)sN-7CaaS>FWGY37u9mT+h?Y-D2oOCRq&o_q#@+z*;SA z{TiA!24AO0k9x;EnOnIkLeV1uUVI$tz}i>W?^pD-d`lK{*9uC~_#ezXkJbCb&O6U) zh&L)yWyJvpkCN+7iVwbzklSW@)y}XbLBww|N1F(1$IJeB(rpz^nOch#Icb^YVAN5L z!&bx%y)ShCuR~Xv zL|jX7vs`E~Oq~w8%p$FP%?3{rJ6z81@Yf8653(EO+Fe_O5lO?c^%{msZXwto5lIB^ z)TOUua+_#4xCrBoEO(0Mz0poPqpt+VFVA3G-ek|;{#qzeQsqv2T(hIq|H`+tX3#`7?rCNJn81MR&R6a7 z!NpB}c0EJHwfmp6$dHtH`ek-fK^;@Io}KPBr&k!7-8wV(p`>7wC0N&cO?nnzb^pU9 zehZxQ_$2R??+bl-UdW>*&^lad%g{B}SHp=}8W>t~vxOxL1eT5+V$UMiwXX+(fnO0M z)3Y1R3{*+yCtRnA?(_ftr~3RjZ*H11gE5KsV?1~5;U+`|kzWF%L6pMxkE|Z79Wgs< zn5q=p7sSofxz|ne#%TOykIC&nz+&UaSuYFln3oFk_zcR`a5x*!5}g+IM`C?xLgYrM zYo8;;N~D4iNG6zwRtsgYtoXrlp%2%Ko8(D&rySI0yWWlFS1SKh?+qwCLlz zb4s17iq~`uftFUXxL`Q&w127QItWq=AUd?YZ8(2$3DIm7hS?}`4_PwBpH8(DgQz+R z^y^JQ*@9Pt+SqbaE4^(-S3C-nuJUxtTp)I@q2*k@vQI+oQ%z1ac*iw;|;-aOYBr`F66FTQAPkEnfj+5n0 z)?&1v-gVpB9cx}0E}URVn!t)aW7cG~f!ewM^O)Yjdv4isZfh{#q$inAT%&~EV7fDG zCPl1E_}~qUO34a4CLHVza0-2A?KR@Ic5G4&t3ht}kTLBI=~??O7Zz3o0hjM3OCQS1 zlR1%JJ%2z%kkXq={J-}%=JFXO^YJ$X>-^#VQts_~_xKzjfn?qYSX(4gLu4jv>8L~d zd9-d4+F(Zv9a=RCU4E+Y)$ZiyzTrLI${+igm5@na2qdK zTPYKJw5GS>S@fw{TxK)ba;!UgeASJ!+kSmoyx%j|YH6Bkn1D=C|4Rnu*5CK8N8og14pU%WFs!idj%0K8-*x) zZ&s6$HM0w;ty_Fy@}W2_)c^UuUic0NmpG}+iqB(VhiwNFI{YS?)9N1Z*wcnI?GgkX zIwnkF@QB&Y0?7~qNtuRf8EkB0994AFRi)EAOarJY!Kokj$44yPJ2cKSW?Fg{FNv>S z@0JmqsSzX}E)0{mP$*1*|-iaJXskoMl>*P`A#G3&zKVmMH+ z(bOP8L#-9xQdoO|VnjkPAjx%eJaUaEVkdk$r2YG-<)CuCdx9bIu^u>JH}y@%6R(aA z><)WOUE1&V&0#)eI5AlD;AQ)#NAy=yvGHuzw z!%q-|i!H=K>iMEjz3e*D;;4n46`&32VZYYZou zZNJR68GnpwZ&^Xau~y-}4Toy?K=eEB(F8qQ7sab1?ke5r)p<63d!M_dVaepN?O=lPpJPf8h@5$Uj6I_3&@64L1WS44Ydu>De_mBN!H))luzln1?-2Km#jbJpc_ zcU{*$1l75>ZR|OUR;|Z3+kfGE_I8P}P3=Y@u~0|aQNsh=*HUO6@$4%34{O~LCpVMt zkE15I>LH=C$-~x%&&2VhcBNzlTWdlosC)j$V~WHt8VbAFz9l8^04M|#U~L+7CklTb zy%@|MA-hg{!L9I4c-7!fkp!l(@P$Y_Lgvd5;vjUVPZ zBsk&E-+zFSqPT~v(!YH-h zkDv8>i+1QdYrz8I#>eLuupG)+m`KWf{25$vjR7CLDzY5tnkoC?Mt~!A?av1z?ZT71 zL(ypu5Ev(sB8l>D%Z*J@YMX_Rzer3>P)f@GaXpe`L994P-@uuD$SIEY4@(dJUh^BM zhc}WjipfGs1d;a>s3Z?$WoCax?MnAdUilBJv0CG_oUVZYzw(951f?Po;>$pp5fD&r zri(+FEvJla9E;~(yeoC8_!)!Ot`tO7KS>(3Sf`HK~*SbRiGEdn|z- zb;NQ>=seFl0haN3pQWI%XEO_|Y|5|~r_-K(()jmr1##$~{$i4;lWBzMsf^>pFv;#e zbWiDfhxrtKGLUf85c!HpC^!3YyFQTI>67BW|4fP#PRFrd4XyA1*ZBZnO~Nl1d$eOc5AJ>;Imql&s0&~4i%c`K18Y+>L}vyPO)6iChoQA?z#BGPa9@f0?_>Hf zlTu(4UAjrCJ`-WE5~F9MmiMQ^qVx@tWOUzImQ&VZnPf4pD2n>`0}f%J+#ZE0f=N|; zFI_3_#+UH58E=v%z;Emj=@|<|40626O<7aS0b!wy>kGp_;@|tiQHi+(GeEYA#UElJ z1?Eqp2t$ZR^I7~R{qI|sF_YdVr#Iv8Gpqk%m)Xb^c3Jv=fRLPw@pT)Z8s(?_cY9wp zQum4fUcuov$u4VThLA1`xUL&v7*bVI%}~gRB%v$*`!1w^f?PRZ8Vgdt4OyqfH}6JG>%;V+TmXJt&MR%=g=D>1!yUXv!mFFnkpp#90wDfCZ3 z{_g|vFx{zs>eL#~m!+Alb?tI-5>yy1z*1PzPLK~@JeMFYOZ@TQ)e!Nb&W}X=agw>c z-4$5U>BKSze}zhoq_8RjNr`ec#h<=#S8 zGCUX3Q~xftdn~8$Rr`dgDLUAv&%>G z75NQ%Km5P?TMkqje}GHZ6y|bC9}5)w1NkNcEyoye&9_5V&?^~P#!OZZ#`osb|CI#3 zNGZ645BUf5Sw5&|@Zx-dEkyC;7X%4KL2AbD%(Ehf;j(`}_$oF`%XkgRm2rKKZ`b3# z%#49E8gsyu>EhiZr+?k+Fx$3gr8WC2w?E{u=-(w+cUL+Ej6a+6$=+}JK?ZBa+Qu~P z$HV*?lWPx~N6>!_){@@`mG%HUUsZ`CO-w?-Yp%Pz2^&IkIL2T@rJ}vs(x463!~e4@ z?SNN37qJ|^;VV}U9k}t3Fn1>A9rCvDcd9_JDJ2M8dsmUN^83ifJYy!ApT4;CHx_L$ z_!|P=SxvVsZvFj~k{-;G6@{=K+rpV-;ILOsDYp4nIO&t#D|#Gp6NWe*u2Y=HuwkHo zQob`L3KJJ1l7JzJ4nBUmkP++u%Bpx>(&x!(_HkP9b`TM0bOvtvV(z3HT>O_6}nGDHU5D2|LhA!JJ4ygGk6~|K?Ha; zB?!=Y2o;+Ds+Dj+H6xC%R6vZW>wmwIJiH>?2?L1pxC$B%o;~0Z18bhTfwZo9 zaUho)zL!0mghQ^9DUiAWVlErl=^l5Wg(;q}FIx{pFbPG*-9HbH^)mKh6P?)8J|Xh6 zm+-q!Im~xm1J^PynLGXi!BIQE)_6V0rSX4kF!F~+yn`bhDhaa*lwK6C)f}imF3N?? z{bK2VPgI=L$GXQ2i=GFFhUjEnaXRQQ+T) z1ojkN4hB8&BeKEJfkzE*5YriovBV!$uF*>1{XKYKc3>nv1$#>zj|sL#Pys0=%v~vr zzN*8UB2pCoZ#93$^XGz-Q9hAE`;)HIk3d~WnQR3EE7+@!K9+tW`u9mTo=bxF?jihP z@4O8oai+7WKWYDc(0H8KaFJ4~d{X-NH>x--3b$pcNV4Go)cN*yx$i^N&?rp$Ob4^D z*0}5RtCXcaf+-ST2n%Tgjx(fr78=l|Foy(-e`lq1qAf~Q`~$0p7z4 z18xA!kY_mow8d}`84J#jG&oQXECjNx|DA{%p{KCUEDhv>S~U|HOwKk{EoO}&_kJ%4 zyffH1Unf~VMXbM*)L04<#1VP&%ZT*}#&iUn%4=;yPyX>?f|4n~7|lpT^Og$)xlOLY zRX0~V@7^|yDxpWax7jD2>=ajb)nqZOI#!i7FI$di9~{GyJ0v&G%e zyF_1Uz>e6h5vpnfEs`?#7)OT6)rzGK*JU12)o6tX=I5ldO9G+pe74Qibd|sp5)s=} z&b_fb)TM{>hYvL`!2BFAtm7@uRgo3T;{y?uGdrWe!P)lv9z5+HSX~lZLTe@2vnUfM zB{|u5^*0N5qj#PjB%@YMif}Vaa#q3)M7jrxWYKY#I(*)U?~RS?Vce)Wbm-`;JHSma~y}lVeREY3m zLA%48H9~gTS`6*vMsMz0`YK@=1>JVx|JyrvL1|Q@h4!?cD#gljg1Emc1jG; z-k38I&k{%aR=uNb!`x%T%mTLWNXqOWML{y%R}5_2(u*7A0JvuN+|f7IU#$;&|9 zaAeH8uT@i}J6^GD#fn`xTm8+Mhkn_*yGiC@P`Gqux75_4eKJKQ&OM zV2*|mj$xW|E8OXqTd%D1#)GtNn~hlzW2~tOKT)jGV!=xL^9o5%-E9?+h65E#8&=;7 zi3F*aK8f3U7Od#OzvIWrr&u9v?r@;24lB8M#c29)9En-6^Uu7qqa;iLnJo=7ww1%TJheIvQ&0?xoQcK0_N0 zQp32f0kQ5~h$Zw7+qN6E9X$Kh}^=ZPETtk%RyAdV819To?ZD8qymT8>*6=s zC7MGl_uloUgTpbG2_i0S3r5dxyuK{oAle*NtqDU`rbBoj3)_c_g~Q|Jg=;g=Qof5{ z=R?}}7Z**2NM1(n0=e0iU|VCpT0D@W-4;Q%d#IKDZ$AYS`C?iGkga3dyJT)r z)F@CXLjQYHl5=B+ufqvQuGx+ky7>_F=i~gOWGqV~rMaYEV_{5+uJ|-rdDR;=LP?Qo!jI8@4i1pE)Ii_`DXoT!isy5rM z$ZgC0D}az8c!=#j4PMCcKwVXoefPg689v^+N2|C<*YI-tJ}=uO%L4%ldj>d2AEx5} zX7PpLlAV@C=+gc7U*6qfggyc$M-O^fp&r{mw7s-z+)$6q$E(Wig`|6v5}rd>WbeKK zHz5PgxZ6BkT9X=lpA@8PDlt;tr_UNX?Db|o!xaB+5F9)ZB-HzVXC{!uV0WPLi12G+ zMvCOeAV=!N8mMwVaPiyAshsQ zDeR{6ekT*}0&lg#JL%{Cqw7fV+Uev>)XqE0xT~}}r5RlZ6kSwH=&)5{9%)}F+QERz)NA@b`&j1f@Au)sco zXsim^b!5@fZ~UWwA7}R%`aW4X5YrbP6V@8;!m|IC)>5{M<-P&jx!EHRtgiv*sVNmr zi+YhNm9l3Hp_*62B#z52<1=h>_7eQ5#p+M_0*^Y5o3FpzDRM99yRiQ-P;#ijA5*n9Bn!HNz1)+sDJha}@#EutwL~L4%CH+d z&WKk9eZG4i{d{OYx5jcOw<@lD3I&||25%NHr2*q@vCe+1cw`ERQ~4{eMlIZ~kIPr~ z7I6<^a3a2uG%XT^4KCVEOEJDE%`fA5SQXN0T@Y=Vf_5jf(;ApiSE~d_DSA+C*Dp(d3jt}>a%Jo45|NHW}gTs8m z7rLVD5D*IBc96O+)LrHj`3`_nfhXjdiBSi7-VqB&lIpW)SS}c%SEgMt@u9Gh^ zg_@i$TDy`rZ8fy{$>_RgpWYnVA(0-Z!DrB;OU$|!QWTri#Dmlg5qD+zW|Hk^Km6D- z$U`r=^4em6DH>M;`Lt=(yyy!(wVY-tNBM0DtG>eA@`vMBTWg$_v~uHyx3PRzi3$f7 zQwY0-?pWH|xJI$$>3d-*k>mk(hmNw7CkG8&F5%J|PfT~9(!s5t9x61()#kEFrF4B8GV3yvr%^M!~bpod<=bYI1ZzObOmh3X>Hek61o8A zL!%?}KXjhYQfIpb0*8k?SK4B(B@6~;Pb)Ix-FrsKcH35MLIEd$Ra7Z;@RP~&Qly*D zBw|2n)BR;hU|&fn(RhEjwO_^K25njK(DQvH@vF+zKB&IuDBse@HncZ$(gFpWaocpw z&bGNb<{@*=O=c$8L_#Ik4qhGQZwcJrk-Lo**vyahAuWc-@G{`-NSF|E>rQuZ_V0bo zdKH^fJw3d1!fOPPH+A0f6&lGr`O{zC4SaYr(1%qW?6O$ecvfebTg?vfWDzi%2`DIT zJj;wfWl=B|R^Nb4+2mPA)vXS`ZX5(_w3$j0f)j`s{IbwE0*6`*sRDPMQB_~=>wk!O zGT_uqFAQ&)&CadiRe208Tw_cb+j!%p$k|(rk8q;>2hNl8;4xD0I?gHW{Yne-?$VEN|d?iQT+{U`6oo#Sl0S|LiMZ&~ADS8)>Wk}1q zGYU8@YP!KjG=(Ei{r#PH|4*@4=SimS`z3N+R)|)@YxF(o*TDOZo%~JZyr6&HP^<3$ z@RyA8r^Al%m&eM~FL!t_Ah8n8OnMnx_9DgkASDv>l^4&)^Yr70bMYRP!_@t>L<+IUuEc<=!q zMO3}QbUZOCYeks=hdOBAy;PvN`Q+EPJ6hRJ|5osWSEnF^1HC5b-hkn7muf8L{~QUF zST9epBCs4VL_OtqkNC*-ibo(?Mf$-Z(dIJYnN=WzFHSRz0k1%WGEN$L?LCYa_pI2+ z>3l0%nxj|RHKX1R=4n=@|X zJ)n}xfS_vo)z7afuieW_NfdQaDY2cPpBFLa`u0grkQ#)!%Oj=S>Dv|$|13l>FR<7R z+;@q;c@?sz!@;@a@9t9U{r9UrQjAc%vY*d^-+{57-WP7jw(qJE*8u$SMtER!h?r9O zm`Tz`BL~8_3IKULIE@DXASyS{;PqwFU!&!PGi|RG38=1L1x0f&E|n8MG?I^ zMuEDK*4*S8#iqzk!LAez1l|E{p4v}4uh`su##JDpZCUHAW z?v;|tZlhBD2|yJGsOQREHy~3N{_ieg{`&NqQ*`ixu7kYP@;7cY!>sYs?sCb`!JlXe zF3|{Bg~R9pbb&-$d6-+xyuOFNj7rOd*TG5wo3dWgoZD5+fp4LepF!}{3bj2>L5M_0 z&CbqvvL#|E$dX#~4kJ!(_0Dkn@%v_I#CXz$E$HjFeM^RdRqr#Df$;Yd;U>N@Y1dqa zmO#W_)a`BPy^5;RQq%`jCzCP(baiuNaO~zl`eE5IVSbBgY z&_pwEPVia{-iR_#{R3MWdCBU`F`cAM(67qRothf7OqzyI$i)>$s;v8#tJ5V!!zgF^W;%hYQIo=S zzMeQ3EGRfHGZ?#=KVNpSZ$XLH?)pB&O{9ME;)`Iy<-%4e))9?EH{0e=P+4N5f```r;q*4E~tRZpTDxqYs zwLZ?tDfAcA7s8;!D|Rj>YpWFK*+62tiVm_bH2t(D%g*aOs7jdf+e-c6h(x|o=y-@I|eJ9KENyq+xa! zYqxuw5bYGJ2rZQIT>IGk0e4Ze+?O91NL_8Fevr@i(R>#c%(p!-I+-RtMYh;!CEU#m z@(Zh&C)>_MB+TPT(-AGDqp3@Zi~G7S`)pecY=tz4b#5dw85a!hD-6ZWeZ4P;GQ!t4 zw3e&ex59l*`7a};ye zgzu6XokSZEvQ1JVM}C;!US!litG2enXRxZTWY7^9R~g&udGEBtVw z29I<$uu*_!w;O>{+I7}+zhL{D!o_%WgLaLOyU=$+0xdKP?O>(oi1l=vx}eXJTKfN$ z7XFw%>!|xABgiKz{FhZCA15ed1|HJzQ_@~M&7Zmr*!3#(p74tZG>F*}pi85Lxw8(K2inn-qI?%F5CTrR{}vw-i4(q;3!_D=_T~~WI)!*B z;iR286pH8w$h~G-BAVh~Ruv-}&RRgIYYYSvQ`a*AkL;_JHv1P8VI-;_43H7q(A@#L zy9*ZIyZa4F4pNGsGIGk`$A;(2GP7l(xP^k1mjUe<>z4XZ7gUmWbp! zv)Sga6^0|YP8vP#u^yy!;8^`(Ax-C8I;iipU;EpF`CZAdT5bHJhO)>+8Ye>fMh2F) zTAq=VhL*yv4^!VW?Q>Yn_~AWb7z(K+eA0qH!Dxjse+yc!Eixoxrlz}5&p-v!Mk))f zn2^%RNW&;(Bv^vjgPkZDB*_(CX3YVa%VtIFywLkqsOfNbI!qmC^xenmON8WD>BGCn zRZE{M(_vPtRDb4I7BXMx+-gBv{y~iiB$cvB9IneqHb^!lO|aovR25SsctVjWT_h3l z67a`utA_6z7(jxxCtL~9KD_Fc9~`>x$_d*F|E$Rl}&h~7~L^^=oggF6EwoJwG90;2jLN5-)p7bo`u z0hmrEq}QQlRDhP$`L>U_x9>x9Hxe{t?Qk;^Vs!+D*3#Hk^;jNY8kSFB2K->l2i@Yu zy9*|lFS$Wgf2J6FbD_Zeu{F^0>;#YpU1gB42XLZOh_ks&#(IXEp2yff{LEFr-R@O@x+TQ+$sib809IUB^-g9b*D+kx1d^o?H`6Eu*pZnAU-Nf*Y$?K-P>pT_J5@VKGv#^!fA-8{_wDGiV8~q z8{X?*a8Ij~6tH?g<4#H3HAxgZA6Uw`1B*VLf5$KYccnVw4^QtrGPRY#?6Xv`ym^0! z?s_Z3HS=;xv4mEH8T@n6nX|Xse#$f85wNpO`aR>Wu#*m7f4AUv*jLZ@9iCVQC;~RV zOUC`UEjj)5C6)3wk`L0JP%JzUuvf?ztv%XFfL?7Ush8{%xyWlE2n9hQ7er`O<%~dt zIWG-jd>L?d6m(Hr0hM>i-ATXLJ0nZ#wb{zS9>`Q;{FNZ889KrtO~LgEkzXKZx$`WG z#FQT0UoAjSJiHD=WPm$lsln{lpy0twl0jv`F;f^sC`)<`N}zKJdiI~v;Ri^cv~0uy zbSb+)vcE4mQfkg*0JtH)JZgau1g?6?pq8|AZ2{r%H_tu-=|CuclDQFUU`4u6@uE00 zNGb)2k~#OdU)x5^lY60a2PQzpM{*n11X0)4(jCEh;2%vRbY-39({J`rQkta>Er8Bu z@SH(j`M70Kr7EAtilA%!;0n!M_TdL0#kODNn-No)N3ODi?6YgO_z#;8H^fCU#=O83 zhTiO3>UrwMiRb7vTv;S3J$hUY+AaCBTP)GHGHl)tTE0Kr)9&+JSR5FM*PopDtbb)e zgEh94p-hjS^L|I7UYS$ulcZh0oDugsFXmd98Gh8NuCAbOB|6rl58U#rc->;IF7)b( zDDN6vu${Oj_z*c?boc(^h1$j?k8;-x1mB|y{Uw~uhl)|RSFh zCCsDP_4V?}LHgCj(Kda@x%%f<@-B{TPPX4LEOt-UTohB<=l{qu@oDI?qkz@xtHk5n z6Ylfp+whHczS>@SI9B85aM+=v;Az6k87Wo>*6>loxGDo&K|iS`jx&DkRX;mkMXoD6 zC}Hskfg3#nASOQ9cT)TaV<6%#2sp0y``?on{{Su1`VDB=65v#e8jpsaYc&!Wy>Rs& zDW}Y90FIbaEY<5GU4c6=vUB`o--VcrV2Sgn^(D!fk~nOEb0Hi&COG_$66xN*M5wi7 zm|z;-Dp%&uX{e;?R%uf zh?z$^9d`*2KbcMO0;Ubagu>)8{OK|>62R$%Bus3(=9@N%0d2E*%i>R^bv^jih>tyQ zKXQ3|omwOF29*cuAi2P3Nrk$`t{h?b>Dgh$!>qom>UC8T_B)AItH)J|jyu`l$W=Yw zLmQeeBI6us$!Qf(X4%nEN!x4NG&YL$g7%B8V04yA$$E_?!5u+_OWYb6tK0sG*go`V zhO`b$nNcrK4zDC6I&Nhf*NkpXJBHC1AA`4!*YwL;CP}T;_;u^ms{1>%5=?u1X3^q~ z7h2!$p<1E^!dmP06qP*hXC1MgNp!r%We_=;`^1r+Gm0aSy1UcXiudinw4T5APQq02u_=o{52 zQ+nPWiHhbmn--Ar+YMOCeaSxgWg|-XobrhG>JayIOfH!rtH-2Oe8!y;C3SJz>(s?F zBszs?pR3D<(a|l`=ks>AdnMAhe|>0Dn+{EIRBY1Z>`ctdgdl-2+r+B-ag++Ki7kT~ zuQS3KtMO7b_f7N1&T@t{VD+kl0OFiQRr)na z;CShXiGtxD>p)?&IB*?Dh;NM`HU(b8ckouPD*(DEJ0^H$an1vl;MWD7W z+u(l5VE_)|7$}>ViK}}Qr)jYISy>TW6e5Bfubf@`!zjon{0?_;Wa?$c(|$sBW`%5g zmtQmwDL25i2 ztt=2%;y}*Jg?Y-QdW3VoA}3d5@xgC2seSg8gIaqoJ9V%3k{x@k#b+_DyjN=-|FwG-V|0>6mvEKZBy19!u z2-(9_Dm{e_+ILQ8CrYdn%vpH2te)yg*l|J^M}vHPf+W@;)(%gyMkdn>_*?brr*`84 z+Ln}{Uc1>-FMEc9Zdrgig^4fxcB$Ib&LO7p+D6(#6rB7`P8M@czG|o=5*#&3rm3ag9tEQ9*7R_c*{5j2jQ)_FFA zRfMnKJw6qGD>rx-1`9m{`)z;_KW5D+KNx{ z)~g%Q6kN2o>kpE=)$2@Q;y9xe#*p{_k@c2QQLgX%H{A^iNDTv$($Yu{%mC7uAV`Tw zmvkcxNDL()rGS)#C@m!*9fEYn&`5Va7w)~k|KD2AtG$+MZH4>3?(;h9_#8UK7C+xp z51iDw4ix%oT8(k>&>EVStMnD47~-A*AP!Z{%+d4G)15@6f$|ZR)sxO5-EvcvhJuvJ zl`k$Q&z1Jef7)sAHGADW`dnxf??TadLdV$-8Z+!^3Y@bDCFQ2Q!XvQim~!UpXA2JO z(pdDFZhH0q$@pfeur`!UH%a77Kq(lo+8v#*!mu0!iOAQFScJdM!OWB@miyv= zz>v$1&|gJQsOo)&c6WX2ZY^zpjLWq+>R-@>sH=GV2*YFfW>e01x@}QCS7Y6C5=rK0 zG|t+K9T}W4d?=LvTD^Wn7IKy@^6b{h$Eii0G(TybHAKGsJ$L=h&9iC}M`%Zi1m)uJ z9YVRo4i(+pQNAU?uV-zk2p@*(p3+f-`&&%i4OkcTK}KI#6z70fgr1ur2^F)``#c}( zET*3Uxee*i-x*e~QdtXe)td~sefm6!UAB0i3}llagST`SG(En(e(?L}HFajqbF+AN z_eYS9HFhq(Lqiume*Z-`8#m_Qd=yCWQmGhmyc4cRB~@mhZ!1PLf@NY8f@e_VOzPamG16PtfGF~+`N`_>Rt;E!#1Om zmR^i$42p?dqNFN>K)wFu4+i0h-`Z4=Q>uI&2hLFO>S5Mg*1pMBDOOSJ zeo@4qQZMihK7N;BAtila82cb zlCh~l;`I`NCUg2bAv2XZ=j-+_JR{sTYIGy}YM(sfdRu5IP83;~I{_y37Q@qEeW`m6 z6yHW6(%rmX3{AkvC`WYkg!AB7C-%JrbtBdj=u?g+%Go{73W6L0!;lH7t8z(yzeylU!sXlBc#m#4Qt~6d*)qy&hdli7% zR#MrD{b=@@e)lrBq)T$tYOc|Hm-h)x#~-m1(ilgx;A zc0w*Iyk`&8-X#w4QR?^}Mn3~2`PscgNa(wr zW5vt!tICQ-{x<>(qYMC#DfvQkfDJT$=qx3yqB;uZ@YI{W_Zxh@5XkSh&%Vp`1SacO z>$}gbE+2Q4dcF*j!;^w3FOB#rQT1LtwWcdHVxG;-?hilb_-SyQf%gfo4yI?;yTVvz zxl8B-Ig7{^>s6oec^jJ7O}*2AO12_z1AJKMaeI&NB{WXx?J%rL%B2jk0zN+8R=x`% zc5g{boAyzIGm}3YRuV277O${UiQffRhU2P#FS}`uE}DZlW@Wv6?%ec!`ECW`xN=QD zuJ=e^Sh==GWN%vzK7P5U3^b+Yo8pwG!-VpC*+ zs8~?DYgfoGfbg=0M#gB{|IUeo_+eA7R)9T&$3~v3x0T4^`k0(pgp>1JGZQ<~h*e4E zNCTqEWYDrOnP5I!`Iiej*j7Qc_0mMCc}k~^q-71d`xLF|nxyH+il%IPn&^8MKVD+P z$&d)GpkE;y7a)@2?k38LaO}E(#m`o>O{HX?UaqEX_K~CJwe#i=rd{TNL;?NIokaer z#c7l05sY$C?AJTmtV!39$jo@FXdxkS)VTzP19phQI5jKTyHPOFAyvT9iBgDPIG;@( zZdB=aXKSe_=dnmSNPLoo=D#$`9E-ragU&SE<2ObSZnC^S zoRkyx>BS5U))J8XX0&;{Z11HS5RCtpm4zlQ_RViZa|N&Y$g~I{Aj~Z<{>V*K%^~TC z4?c1$<@mVn&Oe*}-ixgK81Cl~Cp5TQASl*y^@}wyLWaQFW4Qr+P;JZ)<@{ z8f(L&RNxRy1-zz4)HfDJc5WNtAVVx!-qm$zKRJmGX}99UKVT`j$MEda?K%8=r~Y?& z-98mC>aHQmsR_4i)^8Du5*nY^VaDFGeU&_PNywIFT(^a&?(5K)Y46BSmpX|IZ1Id{ z@!e5vD)FsLL-(vm-;z#Lib!-d-^eYiHmng6Nr0Ml$gWD z+0IF7t@>Hb!n`GOalm(yBu?dnV!gU1Dfs8Vmp-oC`tYkS+LF&9m7Q_3&2bR<*2T}z zK_g=1ZwL{Mi6V4z00@1RoQoKE?eYOZ?)9Sc5RBN#`M8s}n`ai zyC~*Rc@%wMaDf7w>s{A!nNvZ)G}I$K!5yOrEPJzE2nJiMHymFj30s7RorXBLF~CI7 zA}uAH9mE|(gc^f*Cp}@ZCBT{43jb>zaQe#O>7};Nc!n?)k+t5(R;1UB2jg}ddBqQL zM`_$aB*e&k0A(}^Gcyz0=mIjD8H=fSZjp&kVlA%LebuhlByyS!SUpz^Sz+c86 z=J7eY>(G=OAbDgNg?yT+S$q_H$KfIm2+kveE$5&m|&j-s0r`Pgk1=|bnMbB;9158hhxZkGO^LVi{aXH>u}`fVMP?w`Ls zTsNEwMkjq+FR~cum`IlZlfYCTfJh#t9(=C7XO@273HsfdrqiCM66WzCX1n>lELUU~ zjfiW}w3j@OL@#C!Z+z@!^k7W=o^RYpPWQWVdl_236gl%B!mAQ=A+#ksY8%$0cuCoRKe$LvWnWBZ4lNnTb z6CzY4?oem)rVpeX0d*Cnri;wLsRZ#X>bbLy$6=zFgP8nn3_|Y+Sz!U276t7r9{IbH z3%8}1;W$h9-i1yWqYAaul$sf=>(w{gNWL8CtNCk(FwWHbe19T+y|=Q;IdB+(kHyqR zw%r`&FIzp)riztIG9hdBvlKg%sSRg|VE$GXIjW}6v2t!3!|43vJHHOzbHiyy`AOF^ zY4|Kj>wdrv>Gda60DKPQ0+>DD&*jsy1!IvTIh~_tm-Zp zxF7yDJo0BO!d;6h-T6q&fJd1qC$@e{pra)^qg1QjksW=+D8c(gm?N&0+&q!vzzL?+ z^@jLLErW7m@yr9CDfgq_C*PjJSdO6s;g^G&$)W|Rds5+f9$&jMzIry#^e^V+>V5W3 z)DY0wWn-+~%Shw8XgExB+$FtY%TKF;WMJb~>K7+GSl^+pb!2?I_*%;LJ3%+i5qSMb1NES6U78PkOsG z`$5qkITuY?x+>*&#ZvsMzl{V0DtortH|^g#Yr6BoubSdKEc$n|beA{p>@HUNN&2YX z?z)tDO;6Z&a;e!+u_?o6^;<$ha-_oceH&}rB{9AO=&yuc?4BGyb3%{e=C4qkJ?OGT zRi4?^Lpi-|_H5edsR{d@%8QY6Ul=J>babEKFP_Ed>;`axrDc8Ygfe3E*&W?Zl$0fy zSoC*l#tdvB{&3<*9ie6H@#d>u?s+rAdpls3ff zJb-ng9rdl!5Vmfz2N#^0YM@u1La@xCDY^TJI++ZUk&DpyuGdYrvGk=@b79|~x5m8A z?f+um9@HCuQ)Olr8%ArE0Mc5AMuYdV5$l>%Wn5RiZ%5Ov{EmS})EUdL-Mb8N+eBKX zlYJ8IxUPjo`?v`168&4S@pOb|<|6N612+$(XB8V=KEdNFGYE5G3!15aio1GfY`0%k z$c;F@+YzT)>|&KxvD@yMJ0p@Rbno$3RK%<4?XLw6VvVa9l1Eg%qm@!oR6fkbw%Ln) zMpaz5JxK&d1&x;YlSS}noj+EXEA{B+(%^nQ7_hO9$9E5rGskMlf#t4U5}; zTk$7$CRNU6Xwygd#n>yWwLE4TS4QE1T!bAXM8YWlNH9zziusNIkQW(ZX^{I(8pY}P z`nxn;L3NcUJH}E0RkI24C2y8lQ8(&gVBNWv3i^Bl)!FVsy~DsTc8kB6m4`OASL3*K zP#irShUa&>nj2|TEr%DTK_i00@O9<9>T$DrydgF0;7PS(JG|Y~T0&fY^l4D{CKwd$ zd)3}QVVfUI1lFCy?FCAP6rap)|6#R@X|~i011Wn)*h>x`23@*9W~cZ(v&?8E6A77L zQ6-b0k1Zh1E@94dks@RkqJs~4B+OneohThe3R9UNl_0R$4*tMNG?fw{XEq;w${G~* zfn!V3nl2Q_+NNsfHi;F9hbE4U3bR_&4znp;EG|KXGUA;_0PD5IpdkT-w&sBOlHfBLdrwKUjkR_ zkUK5McN|FCaKs+ym^J2y_T>qfMVt%Hr#6kZayQw6{2rij@J=d)8~&3T8+6UKC;ax4 zdQLD=OUC^y$Nj02w*9E5T1|tt_Nzy4JGXGh$Q;Q|o@Ju;v==FwR~Z%EL3=4NW_zaR z9v41i=IMU&+a*tX_H*eByLVRK$mxfLVmQI_6ZZck^y4%1@f-k+g|-Vs=3& z>3V9~$bO!9#k)Fqk~?W2`7Nlq-06JlE%?DHwlp$~xjXD@HF=~ZY$jjpR%}S{@i8n4 z!(=MlKka>(wk>iL&U+}7RlVfN>Bi(dxR+q?sld6LrEqecmxIc8+8&O+z)V@}uaQJ= z_wRjebbSAEMy6x|=Zk@pv_#$SlXq4859(wCDQ3@7LS0jgxC3jLW1f+|3tqlt$%Qk% z_E32n8N7THvJ6R&`9E#5Msci-#nQxfX5LlLzkiHQRbm%s6jr&M{AL`LDrH9S3OTj> z`9b_sveHO}yKJGGRu9)EeY|^HZFkquPrmwIDO5}9;jJ#i3P+gs%Vk{Q^l`+K#!8zP zH*(W|MD1UIkph#jTLW4(L$gN8+%0QB=L~ZcuDO5lD&rNd-?43>(YlB%Y==&=vTug+wQ?#-Eu=QEG_B%$a+ca*lE{YAM~ zGW)D2d7x|i1-Xi}!Uu`%z$}a}j#sSJpP$RF8s!#D_FE^d>V_8F$cEQ>b<4b_lT%u< zpWdWc<3>k5eSmIGZnzceed!mDza>dVuq6$4yduO@wLlIUeST3Hwb0_o>3~99b(2-< zar@bEQZ>}9`U$q~QPsAArG4i6qLg|!W$~jz8&#`_jdNnQDMdboRc65&smC<#vaq|d z)1_FzXxS#zY538V2AQd~bEr@6$|_)7k~uNe95tzk7n#4t8Wo5b=ik5dKNDuvr`8JBopIXps_|6(LKo*C0lD zU`YOs4km+>BDZ9hA*&c-*%{vftJ-5x!u4-=W8^Ljma|TF4Z%+o|0>77PK8gg6z*#F zo7VksYDTgNGf=eY7KdkA>h>@y$8uB0wb2Cg22MOrlD#iM-B_MZ*8OSsVUGNp5SA0h zemsV<&AzX4?wn{*2}}8=Z>oV3i+CRo4h?Zf#~e>^VuY9fq? zMe0Q&YlXCILLaNnx0HF(@jHp}9VKy;EQu&{al-|eoRT{CrnWn3GdAx1*jrrtl9ZnguD zGN@HA{BIXPwxWJQD$Id%;QZtJG4!HkCbMw5pLLO=1SIxKAoSRN^mVn2yZu9t#bhqY zVr;S}SKA`I+3C)O->7>UUhk<|^`DSi&E{c@T8l~b67@(G#~SQ@iZ79^7LHCUIVp|V ze#`MN{*2j}hm#3<6_jY7AW2MatkPuA=8w5`GvRBLGe(uDmr26vL`0u`B27n0g63AS zGr^IWu%m-j$2-YntFK-74Cd(z?Xw(nfDXG0=G)(xPEJ^7HiTnN?AX+dU4;vVW_^?^9R(ftVoX1l>*Dq(1~KYz*2!mMjbt>feqrMg8uRJu77Gh*_@~m__AW z5%aamM#`V zL@l1D`cK%vYXdNd)X*2#B&0Y*ln$WVV-y)?CqDf7)x7K88CYH142ctVTDR9dkeki3pk2v7{zm; z8TFYV{&7Kkp+s2LH{h7551?a^xiOL#5i#>EG#&1tW;1+*(Mse|0^Th6reajgL)3l^ z)>u}YtX-(Y#H};=cz(#7WsjVT^3mJr*r#}Pj|O4(2GxfTBc0W=&ax05Bu$>(W%qWw zs)(Y@&!2d-v*u+u6fm)?|l;XIoDAWA+FB zuTDS_#PdAp-1rEz1MH}$hLlxA1i>PN<1^pYCjZCK!P&ig77DCsK6L${c$R9Ym0 zMV(eqy|5&Kr_lO6wRfP zMiP%8w*$i4N8SbOVmfs<6Jmu(!m^ z8WXZhc)`@W-*_$X9+1al&C>ZJeCdX9nyh9~Lzu!qV zF`6#u%gH^)N3aS4Eq|EMG}}|=bp{^tPQlmS{2yQVm(B%<2C%3)Q|_tkko7qstT3pU zTcD|wqcY@}6od|U71_NJ2zLsiV`f)ND(0B&<9dJ3kRm)P-$aO;!s})-LdnDRN~7Q7 zxYf6$eu=Y6%)tV%y;nAMax$OiFHI2>1NQfl%Z){Rj&g6Rk0 z;5!-!x*4uD?vZjjD<<+ZE5>ye^E{K2k35e^;l~dA^0~{PV`Z zW#uOlpE2l<+s8J~Dh;QrDI2fwCleuQ)CMbTvs_fnQRNQ1qVOgqN8F;q4Y>fm}(paOP6+TcL3YE^Zwh~lfW1$vQPPyImRGd_(eB;`A<@PdyT#iqfJdby8 zJfIamc+2qUn-~2XQLsGyN6WZ@(i)c=FMXh5?-5sV`=0NIN7iaI6P{0>e#h^m4WYxt zIrBfl%8iFA6MZ`nB4jbms}4j#e*s~#s=SgDG7Go<1EG}*MJ9wiMx1ff-yG8_a^)4p z%IGAfLNwBh4KI)iCSYghNbO%kVX{G>7t=SKz0TDCE=G#tfZUWSTheajBZmJ=li9G; zHpm_;CXph26iuU&YB@u`+?~!3_Q;C1^Q^1h=M{zOs4WK`ohYu;%LyrXMd1URmr4&X z)NqjzZK-UF&0w{hkO-%d#`*>~8Dx$W$`CBZXfhk`-!}pl{jES3V?d|r3F+~?7gh_D zk?;DjNq7OwmWoP=2%){lc3J{j8sra7$rZo!Z+HNa*49ll&zY)rD*heA3 ze*2)<{?QG=0XHA^^Q~{roq3LRwv#8Q)93~+wu^5s2;OgUynDo+{QWXo<4Bo*zw54> z`Awaru8dAp?Dly5imE%JPws}t>PYVu6``D22WhgOPZ(s0@@MpkIvXP~MN!Fpt)_{< z8*6#Prvx5tQ7~fVa4bDBhk503cEJ=mj?XTyUXCx|OnL1`T~fpzLzc!SY5&57%Oo6l zEQoz@KHSIUe6+}AYyTGYt*s7fj(>m$w=7$*ODQ@u{W&7^&-oI>QlZdh_ipgL$PVXZ zV4Ec)fXEYsXKlVVUWs5ozaLZSGdX-OUP#Bg;N3lJ(G!l)Zdbb!aJn*lT1aa41tCKT zS`jNyIWMPM+NBb}xmMp}WO`@Y^7pXw-696Cguz6t|L_nQ-2QqZFncDH1y=?nNT&uT6xCg!Q{vGi87tG9Dbhv;)MWH^1ynf`jB(@O?_Z~{Sz&Q(2ETbukbva{gOaE_p z2x&`E2@d~Y{AvPG60trd`Iv2o0Yy(-SlNUbk1b)U-U!H&VvsOn5zfSJPO{Hab9wPH_#a<|fAHmWYEHCuk*BJId>Fb?MC-%(oebFK z4i*~u6c=xXW^bGo&`V-z$@%RBbRxZ1-#E2>+>!HPT`nInSpUxR89LVk0t9YA#LAXU zpg(>xX})hAlk{HT(<4dc$j_V~$s(~Y^B{ptPWxdC@N$DFqlu{WU>4y;s-me=h&Gn- zVz3{VQ&Y)RDs}yWzlh8H2Jur7aaxbJwHtyr%1e3(?P+)oXUfh9YxvNoI+kU5KT6#% zmg4SnYkz(2uZLFt_|D%WSVlI*i_L!}2`DtEb)P6`g!8#jVn{}v`Jh8WsjKQju5JPV z33+B!_L|~_$ct{2j+V{eX!JKIJtx7;{@rrP5$N>=-u5>nA>_i(d}O|XBjxV}#rQM@ z_VPO++S!DnmiNH`@%;N>o;-gbpl#pi##K+86zrCxg%~}+g<-{;QxNvC*IG;}G_omo0E&`r zI^-TvzPn?vS77muC$SqDDmB{ti-Vo=8=RDSDHU%r()nzj$x3u5ElJ#k^4c6LkSyz-}#o107%F>UYt)J5ILJifBpw^n$ zQX5lmTz~ogkkMDsi%d4b=^TlUz99njG2p@cd3pcwXS`f~Dvt%f2EI09?ZIe*W4P9| z&P9Q$d5a`y*;Bi0?y47PPbPZL!*a56jephQ+jH9yI5k=07jn+Avdw(x5|R|h-D~;u z_K~q_;rk&qHCqOp^C#nPP8eQCoyWqmGEcmN<9AdUfQx$E2uLZ3P&$<59Mm81Uhw8AF2 zXDA6eH{*94U2p60SmoQ5kTk7w7$d|DFVOQxN#)AEhhK6QW1E7k5)rU3wn4MPnUAy- zRqDM3y0m z$64&ivov_r*^Jm^0!*9+15zY=d{(9}Veij|nb{2Qsk+02ZAuhd{|R*vntI8`y53+u zpIG~6n8UjFYs%e665J5Z+Tne6nO@d;#_AOTVnyaCVh6>SoCL>6$_B&`-_;)f1$6hT zoX4S2=jupNY?5cB1Z{AzxNu*Az@>oP0D+KZ*@Z%3GCN_`0LpF^D~+-+GP>`(0G76? zqUJH6Gf6}iB%Ebx>^5+qSLSL%Ub-8QS~bqzH3II`$-Qd>p9E$?uT`iqUQd4t8>}zy z{uf02bD`R~noKobz=3*WEDlg_Hu43xtLU$f$8#J*5McCvY>j{oQSc#`R_|49LU}Oz zr`kt)*lXJ~GMaKCJk!TmVp+ zh#E5z1Bru%D6-4!M$VlzGwaMX6YMk<#Mv=W=TKMWuZ%3U6pa#G3O#S9qqXfAH! z^2!;#YxXkdmreNk3-C#DDq@f+09CK_VtY&whi2*u?4EO9F_3!o9&%{Br|^}wEeBvW zD&#Sc4r}zLK<9L}K!7I*fB1jz1ZgbD9M~Mt()MV+#A5)7E4V8Bt)bU^hB3!;`(2!7 zzmthd`)e)A^)GEkHBJ3cNg|4pm?FCoU|6!dE4Z;ef+KgZaEaP$kv0+FgkaFG06EMr z;JJmxD(_Noin*t6=svkbDPK>Yz3;!w%g=LqnhK<)U2WTxB7FyF1^=U1nGGk~4fvuSoPEiK||> zLq&UUT;E~8A%fn=4ku~EH?o0PS5l*4vYZWUj>fQi13LkNhP1<{pj~kdp+s+OX(70^ zzrVIUJ>JQx8`^RG-vMu9LrFTnP)id0s1H}Jx%XfA8-Za^Zg+Z&iNc>7lmKahDo`b5 zgXd;&|4#>dt!O$7uJOE)txH;E1Bfy}YNkdUl-(13d7=Ht!!&wIyUn_VUF4-UP> zfJJ;PKJ@!hkXHnY3_&0j=lvvzWDM{jH6kPN63f9V`Ve03dGJatS@;u)`5?pEHK0!> zqO3k=&Hw!x5E`*xneq(XKWcp}TFUl?WmCvNh6oPz zFAopR#i4;#(VgWDW4#Aa1I@Egg9YSLGWBIUY|l5}GSHfL#;c~oFRgp6y5A!I-bD)p zkOVk*EEY5_00SRk+x&`Ajdn{h@3{~Y#UaV&?5;N^4R zcd^+ORTwj{U({uY+4QH34*2ihBOnK_W(Js^ z?8eJ9%>{^5(!VB<|3tB-4lqYC!D#T;SuALK36{4y8sZZB!~5}t=NSUO-U#A^t!hMe=`yWwZbl1*fQ%XEvy!8V1D<~~ zrhd+T$PRZXmPGBEhNRdBGUq7L72ei)f?S+3@@0Iz97fs?+OPOAL^mEXSJDg<03#ZM z91f*lT7fu(-C})!q23pPUzS^cpYmgvfV;J^JXk?Wlzd8=_N(}rG`T!7wEgdAZbpGf z z!#@U(nDCBLu0uXu09aq^2U+Lp(C0_HplL!3$Wc^4%cbEf_W;0--@up8pSAgEpqBUy zD%j6ok6Tw=f*Hg+s{K(16SIF33lMq(P^|v5g+Th<*?8x?(!vbKkO^}`4pPI!Udo#s zZrWNOmXGj=fJ+i5@nZ8Mb`)|_aev_vIMES!Id@`o(qr~{DqVB})oMLAlm#;bva1l4 zqN@a}TRUq_)eCgul0S3E`(>8tY9sFukzdcVOfcvOqW|-Nf<`vL7Ii`{tzkp~!CWjO z!`iPSY}dvgSba;aShewcf{YCJB|W)*;_WvNPW~vxv28>R=-SiCoe~D!3gVP_5D?rp zF5z`}CvLlV-(q>qr#!*G&7HC%Od}TvcXhxCe8z;eDfjpDG6|6foGii?E1+t$0-|}7 zVSaJ641q*=@JyYT_;uOqN%WnKu;c|vbt0<<3qVkX(+Oq9Q&lWJNq8`lT=USN)#D-2 zQ84AUA&z{!;vrL?LzBnR=CfH5qlJJyAd)0>BD2`>P{F}(-vj=U2tp^B>j+wDKa&Gk z8lCvP3#Y&b{udx@V6VgL+#$4*#cghF*J4#pY<7OJH(xpeUXLd8yNm#%nrnuMLJ7_Lds3dbU+QF(?Oq%~@yGX}$Th0tZ6+O@HJC}1xS z|Dwg(2Yd}{D$gJre3I}FWQ&N8&+q`SHLCF&XQ3a?O@J9mR&^&omUVc~IW ztN%eBGx_b!UElw1wN?s$8XknZ4G*w18W>eJ>@1?dh|vbhspPhM>@nNhNL)g{^p#6H-U%wpXBu^53N~903DmYKB@7L}krFg6AR8i&|9LQ8I+wKey~570q{ui;Z-kfe z%gsXiH65KVY6V)nW1%B?0>Lhg>`#|Tduw|ww$A|$_N z$Wp$@{*Q>SB}HA)y&S&ilTew={60L!pUn3>Y0x4$nMkl%uNloE_wF%Irr|xa3+SU7 z=+|-!3O-kW>}j-i&>SL!L-RcIF7sW37hb9tsfxlIZybMQ5~Y2`$c3HDWEd*!jzR35 z9z%p@7DdUXGK+2Y(@Q4n%6)bZr<{rr{>;6Rl^`|*T-Y(`dnj>WRZ~%J(YCDQFAt*Y z?FM=QBbn7C6SNl;XKGRX--MA4Gg<-cpG7CXV&?k$jo7`B2c*qau*T~GfGKai^6$X6 z9{y&o4+;n+skMke{Id3X&#p?v1GqK&`*R5QiDinEC~&FAeGP}uf;&~E(V0RXhY6{N z!U15Nz0`m`1U6;fv+rUcqN)2{T>&lo>*bsS8mdd6SQf6C2dP{@e;yCdgEN~oI^^BD z)Ks2O!v6Y)TQ^R^y!Ux%>!6o|l7_jrjfT7^UPJEPcvA4}a0y`?zjJVqu<*Mw%dcb& zn>D|k-8N$%U0R{R!os4e_9{9c{vo@-+Hhl1p%yu!D($`=OTjZiI~mfXBJFR1^>cv>Pj&?ndWHt(fqV$Lw$Y0!FkOGEwv&NB9F z!86YBLmCf~WZO4yd@nL*R-#{N$!StFIn)Ry9FbcnNCbTlQaW zt_1rC=`aps>h&x5w_XG`9rAPJM=yEW$DO%tY*2 zb~LKUKmJWQ@tWjKu!+z3$4O8iSo}- zUY0IDOw}{)JZ{aGpo6Zvd`dE8xc^;t_gRAI4cij80c|p>oK$sf@?Ks}(jp%Jhb9<{ z-Dx79ia)eF7f}T~tivoBqYL-~gJiQ`o2n@T20%986m&2m?>B`mZa5nJn@4U&_(SIa zl~-BRde~+8Zc=I?dNWpqqu*Po5q_UwTbrr zcKoMoNZ7LX&XqGO{(PqL+oa&i8}j^p*IB}KGdoG)10ebcfB67BzFuFbhViv5y(++^tRXR}#k>CeT zB@O^ak$OONpG~t3$mwaHohMTS6|Mo-V7RTC-$JEJ#cWM@lE~*_AJXxmit@j+*!Tma zwQ!;!04*71v=BFG!daX8UU@zfx=ssZ!%3#iKc>)Y|2w+TN)V+G5-ZFUt zj==B!=TXek5YPbZLlmQ=qBemg)iifbu(SyoCTbhho$(9|hg7X5k^E@Z8E?GC_4BzuP{fCT8}>#a6S_Ruxe@S^H#VDr3yUi7 z*7)GEI?xXQ4;#gy0_Gq)`lawe`=es4{~IV?=GW^!IUH>G^AgHg_A=g*aTf%9V|GE7 zySvItZ}4NTcpC0T&56=@-e-!Y7s+vPzzz#?a);oO?>c-wa&PsJQ1(9FX5Fr;!KQZ`C$$=QVc`q&fkqlk)_TZ?b<{@Vrc zGV(Gq2$j|Fk3DQ#?s+nBJCw!E$b!j3BAKGdQt;mkM4<+_nS% zH+0>qH^Jkren3>Q0{gqWZYqm)53BP542-A4N%_S>Ri_s~dAq3ovf6u|C3=p>hKob8 zIJ70?0N~&;GEp-#M_V&pBSuC>phgww`gm_J|2`%1m=sP=*BM~bb^wZ~0S|emM~ib5 z+vg|F9)=l}3{jX8xlWD?^cR@@rjLM1R&PZQJOj=b^xMeH_r2j<)^m{V_v+N5)6anM`jzix{c&&&GwTZjNGUiQ?z1VL#+?`&Q2l8 z&No48L9$ylHF)dTn#l!x2@B79K6*w0rg)wE4%ESJN3o}*-Uf6{vcDxKw{{Ff4cYOM#n;Ie zO0?C!2@iaMN#v*%;O8kg$u%r=nE{!iD+FL|T_1#x<*j1m|2tR_{;D`%{KC$3B|3!& z``!ls8wVdya6SG4Nt1941mT7?$fcVeWc0#Wnge~n9clpBt!cG8KesX&8+c~(P*Kwd z@Uwzth z!myfU0J>W`{<$QLxcQt6ixuz>cW>ET1{A8m065f^3wRX(2S}mM+a@CWz>k*|=ISrl zn22k>9!V!ALFqw0K%SC;tbKFt2vF#Y);=)iR$%pKj)jS3iY0sj9^h@8K<*SN*Mdos z-wdY}?mMQE4=o&q{Vnfo4{!ft z7aag;a{DeFyA9FYX26BgT0P`>SV%{ALNuVQT(Fj9TfQ6t>%?*!{DgRqRn8ZfhpeZRnhe`E?p3!G7B>2~M1dn6^^kG2S zz;9arf(!fWvri8Os8*iN8?2)PgrDvjY5{KcD`U1Ax*5r*nNx;OY?A@~L>-Q=e$gLy zRB&gr5)@QcUp1UBwCRpdPdhzEqNm!=fCGc-n>+1E3(lQ44+&!ZXkf+>_d{T;WugjZKpfj#A*C&qIC(HhE8%F zsX~NXcSM%3E{sV1)X1N?}k1WO0is(y0Wq_c5Fp+ccDepEni;&B#|mUV5&NP;(xj@s#*j?T-%Lq)MelC2$ybhtqy_M`A z&6P$NAMRgBE6~6RPrqBLF9J$NV#AojLnWe6fO4F0l1#L~k)gsY5(yLAor>Q>mbbgt zeS0I@Ic<2cG>)}OnoC%;5K&)PEz6xtt!Ml67H97q^TYh7qk)8{A$N5hFy!BJeu|xo zA?9VoFG5_06J6k-P<#5tnBpMpV9;1|z(pE(f+2h^xR2W}3*2iGTJSqFnP6Xl$u!cHi$|Uk zkWv$PA92hURDz!XZ^@=bN+-6qfKg0|_3e8;12MMWjTUs6l}aB#f5XYly)XUV?q181 z2iSKY^jjEEe=m6~v&b{AB>L1>C!*9Mhw>nNMpokET+V3xX*l13F3t>45xi@zLb)iGF^pUJI#K+@xuy>$M!e_`vXeIj6x8M+6nR^0oj+GutTc|=pYoBX(i1^61P)e#Bkdg8bne3|7U>k1eAu8ntj-=QsngtOtJV2JUsocoCR0x#kq3_>@j40HbDN*U3zRUt|%{hLt3+5GF&Hoiew zWKx&@D{bg7AIW3X(!{6EIe@V(Z+BAMA?5C#+Kb74%I#O}khxMyTqlOuwc_X3WAO+n0BtTti8UxicAPgMyFt_Oje}=2fAFG5e53%GJmtE^JdA1~{asJcQvPtdmXn z89n+!!JF-=aYuU=Kg^|3UJXK98*bEA8p$B87Up4rd<>7jyspcTRS(uNmlb{m;e>u9 z>CE&&!(ryKOE=5{u>*#|^pJaxs|m%zEVP5);EjYK?9q?zKSdM3H9wL_)ysh}rT?^1V%R7(Onw=+Rn z?IZE6^zVuW#XADXPny9`+gEb{+$H>g+F%q4Y9)zF#i8z0)&ttq_>Ti`tmE(boa#?o z|F8nMG%Ijc|Li+j|9frNMPMA>jlsgZvMp`<(fVI0R)qxyHZgX$xHIc&Yr9`3vt5_a z&H2$L3XNtj+bttj?9sT8iwjw%H%u}@f;+7XmH)I{HB>60geRwxCs&h zZ((7?&^NDv7E+c+y*vdJz-LS8@Vf-if#2DxY*|)%pVV%zKdy1f-&e%dA%n8Sp1%BU z-Vs{{U=8#dnS7z=8^`lMH1+_pCJ*>_d_4zcta-ZEzTR8v%=-XP!(qY7>T2JK5~tR@ z1;~=7SR~bSbAn%94e^Y<6QixO73l!;xDk~PpibWX7S)$!)X`K9Z>BNat3JM+1Pb-gWMOdPBnh zf3IwonV$U|pe3>sYio?jyJX!x4Z^kCuEZoPl10ufoBfz1?xqN%juC%;#%!n8XS$#N z`?8`GF-~T8{7BCj?l(yjJtz+S=Td-_QF1*k<5IA?HOUgU{V}qFVK{mcP+XPR@N1d* z(LrsB7h7_rDVvITnVCLkhp_}I`jVLq3U`gazt)JIbKvqPse_hX_Ytm5V%ViJdsuY> z+U+R*u*mtwv%7)%f-uF!;eKP!GSKYZ!l9&?Z|5W*^C)Uo1k{I@V82S3^Dfc}VHBwU z2z?kv&B+Cf5~vn%2&vk8VYN-)>$U>vXm89@dAK-bxn=v zQz>U57|AN_!)#mpYhO7|KBND{>=81=4U>p3IY8utWv)9rsP1IEast>WFn>m|?pzPD zN_0cyH%FO+bcyVkch2VTv!73lu;kmG`2F14@N3%PU1^6ZUxq`&+a?}ma4n7)4VJ;5 z?$|snU~c$)?H^Pi^zqY%8TqC+Dfrvn@vsp1xcH9|Ljko9qH5BGJeTQi`(i&ozS607 zvWQCytf8g(K6`H6D?blFZ=gTCob+%L7yOUclT3!c{fSwd!MqrT2xRErKLlMULW1Wd zlvfAfG1>@KC9%!jGZ7t?Yy}-sT~OL@)Q%rppy=V|w~*G4%7BNCnyMmN06(8hkpV{( zx!4-XfUok_%o|y60203sg;E6nA6IW35M{IffdW!Xmms}#gP^2zE}a6>3J3_2BHbuU zFCpCx($Xyribx28bf#8{2kOMH&~u$FA=k4pph1lXf`}YvoD$^uovV+@n+fxcQ)Z;z=m1SG44znODcE zr}dXhKzRc{8b^!dG|TQZX7<>-Hd;za%4AMBo%#;MUUi@6Yaw zN$a^H&iI~)qX-76)Gs^CL&i>Vx++6zY;kBs@*naS1)G576jj0MZN@?6PEcWeKG-6~ zmha-{Kr6Hw%oyci1-L$@e!wnn1DrMhuL^lsY5v2fMEG|R6q&R(&G?n-mZi)n0Mh7R z2*(H+nN@#pwmC5??5*N%l%l&{lB(bygFp{mP!!xzop;7)2|3wS;SuAXgKW?|jQ=%= z6aHl@i%G&;D;+$VeA+P7IOWr_i6DB1CgG30tFwxR15Bam;Kp z8%6K$QlMo|%XbZw|H{GQzWzH2y&E=95#Oh3^|}d#kwF}s>Llc{;F|OrWfO|U-m?~? zn>ZggB0))I;iemFoy zT2wxN>rW%$rEbODmdDjh8^cC)n|1y@t#w76=UQU4^LhPuUtQ72V1*`Tp^Z-D6OdQ) zlGk|V<2^ieeuq}eowqznd0XEh>2 z##BX+G|B`2-NsZ1%!tHQ%+}rK3ld%KE+b{5NIMk0E#D1bKmju}s)t8;#D&=>eBR z$yw~zVZnVj;~?k{@}qwOC+4_eZ|U9;v~={pdm%4sB~KC$ZhG`Znd+ z>%T&Qr^h8G1%X?~eaCwvED`txKWx5jL??3uk(egYrcFxd%z{6O&NiyC9yVqfoS!Y@LO`j=;HUPCtqiHAMO|L1~q!>P-h=&9>U*bTSR8*7kQggCqIb_ zK7+3=%mf8L6=^F1D{Os+dUb9pq67TmHsGq{{DrVv0boI_U?;w>f(qw55fJPqdyGO8 zhNSgk!38aZjDfO`1}kn1*X!V1`FQ8RD~uqfJMuzrWtTYlI@1RcGlzK^wN3dcL)@1P zrqmZf9hxDE=^m}n4L2RxPNZehY_l`RA56;O>^V}D+;&sVYhqeMT~qhF!}B{9mk=kC zi#mMRN_a80n#sLL%yb8Zi+f|9?=j4OlMu|jLO!GiXxK5gish7s|j$dTfU zDumK&eU`mbs-cYeLE|IuB`Ee(Af!6Ig0;%@-J1~>>U@&t@&M~+ zV09u_mj#5^A1ms^cN%wtNxZq&+^?{`P(fdiTRGt1bwKk`2j0$nd!|wXC)@L>!?hvX zI&)$cwRpZ`ji-+!d`08-UrKzKq5t=IOQi=weH*bD?gAiqu@_OAq$d5F2^b(2S|~eL z>qKT>k<@fxTCec#mtylvzO7dF@R~A>0MvspbNLLePf|WGvV5j#DV4xNjbV9Hb6f_r zhhVjl(7oA8OGtXYPRyhvi%%npDSvA~4@yFM98r3GvlIFS))xEGV7M=YvmIZFkL^VzBFGO1PNU-1*cTD8vhnV?lp7vT?39;vq}!C#Ad$qB?+apqOJ(IWUb;}j?NJX_^ji`f?FD84ph zovqyVr9|HozEz-ied5lO2N$?ks*aU5Z|FOIK~ra6$+G(Q6aZpHEeXk+R~`PyqhCG^ zT@dP@eN&yTTp|t7$9O?aKu1F$9JS%tk}$Fq7?)^qV%15p;w*b%g7_6MOlZA2krDsI zTo}YM^WHdg32n{3PhJxt_!GDiMHNG@K%7^{*xX|)iv5Z@*lm0Rxlb)!T;6s2SHYN- zT5P?Wieg5tY7K41rnK%*o04s!e&Tf`sm!sSnf7N_EOuQc&B_mtLE()-y*>5C`T0*k z6X0@@q2xKGDUK{YQuVmj<;eC=>!GM6-U!QO=g3st=kA@_+L(3>-~d`rZz5 zyg9ycq8JsUrY2mzw(WbsTMpOBm6m?=Sw`XPgwW}8LoR57Wqh*afx9cU$ z9GPOM#+=FNzokrGZw<-Rn*1u2n^u1voA?S6LTrikxs6(DjJxXfDXkZC->(*3txMhS z0es)P`pFYF7Y^2)X+-!>7!dR7!Bp3=p;M(?0nqhGvkyf#Z{9}rZ~rvWjVWw(y;+H; zi`1*Eqf+cNPro_rd)wrB#DBpQsYcyR!4-d{oN*yV%rkn~PCMv|V3g7+(GV5qC-qo6 zFtKg$?Tw{;H6$A6@=1F9`QD#y=eJ!dtHBDsUMr0bI;tyW4n(33t*TH%wumAd)&$UQ z8Vn-eclt{jxq|@l)YnU=^Kp0=TaS!BvFVS0W^XGxqzflI8X1b` zijO@=QOX6U&An*H#xs9teRYPgnnFw!KPt!_*F!liNgBMVB_k-SKzEl02v^Ue?*t9P zHb9#t&yB5%eyDhU4cHc+tY%kN=gp0SDf@9XOF-Pg^6caSA9I{w|53A=hSZX+1D z_2htS-53bp$Hg6Hh2X!iN5n!gpA?+LqBbuKu@xe|R%o&NBeghP;r`n7R>|PFjl>u8 zK8!l0@>H5nP?Nk7DM+xy!#=z`mh(%g-WT~*oCbeS?Jm6WsuaX^Cev0sCIYcooj^#(tO_+92puHc?XfJ%I?V@qWh^%GbUf!;pSl&w1 zed$L})+%A#cwhi1hV4(tXY#XAaqSwI0SxGA6t}|XvgNBU#YQzZI}gB&Wm`8x7R8ci z<5*vF_=Pm(Vx{K{xR{eUu>sK-qWxlxlKgqZBsA@La0w~BcGO9L zl6y{8LQ9voQG+AhWA;ZUzu67T%M}5nmvvayxba&1<|&hERFSI?rhY7!g7I43ch&SS z!7*&#kNdO=RM>K(s9z>2D6qW_uyqPvdOT_am?irD{Rr7M+sTNgM|cBK8vETDKL8`l zbvgi}J(%ACd?8&4h8o@N?XAFa?w%>AT(s_sqL$hkso=NWJ6a!B2Pv)CBU-^-4Xte4 zBact>4@}9t;xk?WAiba+RQuct25ZOM>vOergYDD8{1uKbfIhdAe;u#wct5uV$TjiP zacKnp85~aOkQAR^_shJw0%#=(pd+u?mp+3^m-i1(HGjPxDE8jXqqEloHa{7P*tC7< zqElC-#NIi>BBwy{o{dQLGxw-D_|n{IE@EW(d3T5i{x|E|=7v7Wl~?yAGCt9SPn0_K z-n#0`&y}YqKGT0!{>G{Zx+S>l$vCi>7mC7I!u+FMo?~JzyPb3`MsxDm<%%Bjbw7{bxE)dCA zZoVGImb)ksrh~?`y<6yjnk?Mp=E*neCI<+2($oUnAB@}bV!sS%nJXRz_CH+KeO)F- zFdD>S8cTRw7R-R8A}2FUm7pjhIOBC5T{n1l59n!Fa7pi7G)6MasPEXm-g=42- z^AEElU{}+m`}};v(~VLjS9_FusZ5L60*}qDk#yl54iiUtI_%brm}^fGO|-`TvJ|B_ zPHfI{cPK6gYEW9Uj-yK&{E_aY>``hj&%*mzuvgq6ZZrHWHn@T#o}=>AY&}>>cG;e# z$P?VgW&Y@VO%S0%6~Psi>HFZ)8TIfYFBi@3yyjbt=aElfU~rhX-}SNBkf}%bG`32W zxQbk-xi`y?wybv2Rk}TW`4IbX@a>xgL8XCcP0FGt7Zgm2lrs04*y5_nk>>z9 zA#X%Hu~O^$E#+ekO3UvF_=x}GNdW=B89x6?XrR`5Byh2sFaWShiu5#B(i%pAd9{=? zcSHZx0(?$kvT=@3l-n*jn4-5VFP{{*b*d({vD*?M$ZZ&Q3;ysl_Qf9zg+7Kt<7#6{_UUfYs%{0X#QmL)vuyh5%#FRFtT-PMyF z4SS0*&pGtU`!UbK-~MrK?E3aE^YR|X|Dq$pXPf%V$@kI#(7RCu&$8h)8nJl z_>!+gU&7xEC-hgDVItTzGWd zQl`J?-=x&W@t5uZmFTk2m#=@M*p9WHa>a-ov7B&2Tq5Ht*_g48E+oeXjhNZ1nK#}7Cr zXY=<4nX|-qW&Co9C`I8aK&PIl!}Su}iuNfnrk)E)L0=)`S(O>q7Xa|!_c*yx^i>6j zL!5d?O?K~s_?MCx!(=6D1D#dl)t1U12jo!~8Z?SD=(c7WpUgJ94Hu_vy`{n;vjYE~ zOdk6Bd(ttW(v();f@S6gYH3yf4s~tLr8xn3sT4lEJWjNaQ1YKqcc-+FC4k(4q|y*K zy#}9uzLODFfM%||qA-j5tW+JAENAKw!_Vx)cAfI~M3;33WYwc{CX=7_qDyqk*jG1Z zV}kdLTqVTFP?>O0y`|}L=U&3lmF(%&YngBsH$pIVR&2qJ8B5myS`Xk2>9xn`Z!gwk z;=7_)$G>5(MJbD+N#PW~({Jp>*xWqAXZ}NdXO0g1NKlMA`o-|@mfc)glBQO$Lwe9F zQ8}*zq@Jibx}_G}d8bQ0lEIZCx45K>cEG-OG_rE5B)NTB zh(`(<>+XxbQv@N?y9O?d5L&xPEHos)7%a0-`nej~N!*xc^MbQ++ob69G;G+Q(Ioby z(+NelU_?rZb?rM5cUbUb)B}qR|`CyU{61wX=J{=7i_)VuXm3?LT+Q`pyfGj{@nbUVzY1PE2cGe5+;LZdb_Ew4#&2n{w_?{JdfRklX=}g~4ai1e@==JbY>c1nR!$-Sl_Y zZMo{JR6G=kpZI=njGOPR6U9sdi)~NIisx zIKrz^E5MD8{l+NazeNAzXe8JM`Mh&-(lmOx2wn^o`m%$P?}rWM@q1@PX5z;vx~xH2F^0%>=c7iW99 z4nj@o7u2cUl?YJ}PE{4J`rXhTi)sUD8D0L>J>QCXmifWxWL$1ikpoiRKSJ0#syz@` zFeU+IG6D{0*>`Vc6-hefhZ>~Y-?k`B7rAMtJ3fU(W4@N<>*6-{k~?XpDs+&WJM*P( z>vu3@=!rZwj@j>Ln?G}zeT3aV!WzQ>v+=M5%LUZ|JBGNeHW>_$?(M`EYjMmG^4Ptv zAmLg!>hf z{!n-$6^{8%$hQL-_5EtEZONa_qygBuAD(!=z09yyA2WT3Pc6?N#`Hlu5u^CZ4mMV- zZy z4OFOfLl1Q7Sw;JwL;%$CL4KFLm?f7MuPh$Q8bT$)P9c|!;YT`}Oj(-fzy>KmTZb|nx=lnbYjBKo=~;T;>h*1uRj#E(l~<5a)QXFx`H zSbUQc4*z~h+0C0c}$|U)lqrK zv4)U$kUP z_`BMaWG1<$ zw+#$e@do{e4}#n}$rm$a)X@^0R;)^8k!hPe&LX?9V^2K3cwv{C!z{xvqUQ;zULyvj z)jM-D9U1*)0%~i}>wIrjKKWf7GWA4Jr|uhnYyTA`Iesc4*$HmC2Hrmy#()6qZTSM2 z$$~L_t&)T5$;I3L&k!Ci2{4+T8)uSxiD2CR`_eejfE`m&QPHBls8jJCsXy_agOwqW zWIP8~UCWNLLVZB*T7yf-tNH!7-)@*1eorl_AbV^aouPfnfCygOFD%&tGM$**-PhT=|8)(J59}CUNLbagdQm7g2hgw94DZbu<@*ypa*kI&t&Xq5e z@5I#VQ6ZR$4)KbtFi%7uHO1Ay_3WVitPh7vU(f>DSN!~vog8sFCFv&0ys8Okx>%`# zNY~ACw1;1(?%`Lp{c!q}M?W6CrC= zeyUffUune*P*J{4*9Bx!edgMwQ@Ckp)Z46t&NGDBVy}R~5A`cHp8C=EKhOhB=K$a$}lr`0Z7C0YhO% zYFdSZGBz5@L9(DyXBXsUJS-FmXTj`elZkUm`yq4BLl;zttKoM{-DhVK*~us&l=Y_H z;?drIaKT)2x|v6|SUp|62wYLS^O`qLVb5D~nD)=7NB3|nIt9x9|o93l8c4mbPOU9VXVg$0xk26zd%ea$d`bD1bT7&j)yh_aQI{;&U z^FGS91YW~@AQ2jvx0PkD`XGklc-`i>hyrS?JIdpc9IZ2-+7JPaW+W)Z_!0B}BTD>X z?mO7#61l*dXfHDOlalllbpeL2AOjn{&%Xe>@dTI*<<;Khb;Hs{R=m<*&iZ-%2^ypx z?V^Ww#qvTNcsjk)y9gwYO_KR6F)fJhg%A4jX23kfH+=%6y4hfHV)zbp{TZAl6MrM1 zrDiDa;k23Qk&*TQIq%?igA)U&l$4Y|qbI`!cZPkR$OrL;1lhRFAFq3BfcpV;SXn}% zoj|UWR@7aiWE4t7KqM9O2La(`v-Rs^u<%v<-bz2@93-qyG?Mr!9j-wYFP%UQUM zh+HudirGjydbp`~cYo@QnBN3_l%rq>hBPitbtg6aj1=vY@iX851C>Nz=8qeXQWj(i(5|yEBo`Tr&A^@4x`w7x@Li z^^I;)6}K;3@35g;YOzMkT>JnZ(0|_IP<(4}rNPYbLX>qp$an$(>DJSbyA5*^1Q&j@roa#>GC>W1-p!vFG@78ksTb3LU=9Tmj3(D3E6ao@g4j*R=~}0B2Z})AC0pPL;MMCea?t~V%yE4-_%bC`Bo z5VGiezw?`L$0hH^$nr;8@{crOhN~il%0$PL<`glSPyL53$n@XtqtSB8G*hCuvRV?Txc`RDsjI2#m10lSpKkeyB~q zL?x%NyC06E*~rW!lX4ko0P~DAr@|fZauelwR+WPr>b+Ah9$((VpGD~NV+ie1bi&y4 z{Xe~4^nABiwL~!saC9pdE>wbmba5X~Py$lU&o{Zj<$SVuOBO`{n-5a;HN;KjKzQ`D zvQRbcO9cM2O#X8#mj;0$?mPH&Vg>-alrhN;`Gs_Xw1c5rto5OiXJu<*J9_A;&tyQy zOK;O^B-~;h`u@s{HeZwRx6|dR5avI^=;u#L zn3-%BIH9qiobG#c9GsuvuCq>Z5q8#}iYiUs9ZC=V#cv?3bw@FoHQeySzylG=8f1m= zZ@@E2D4=4hku9L-8c~QKjDi%TG1g34o91#oD%X_FPr!U``S3;YWf4yAb!l;(Hu3ienP%AH^nA9gRJDWPfU^ZaUsQL*Tq z%N2qtNN|4X!b*1URK3nMZs~jA)BU8a59noC=51+(LWZgG?3|1YE4J?H=Y2UK=S{9M zcWXi|lc~>Y?qqIc;`vRDLS#Dy?Pv5$2J!$iB^$FJ7VLMl7I|&#BzOfn4{d;anol8v zWySn8(cFpN{!_Y>+oe^EA2px@F7zR3txKncnv%%3Q1kuylrI0>mNx}|a1 z5$d;j3j-~-648D`Vyd`LlghS51sxU*KJ((P9HQcX5+9Ju34}r)+%|p&0A=ZY7de0y zuA~~)A?indS5duh|0uUpi>V4`h!!Sk$_*CRuvwe+EX517MQ4}qHn>Md&9%s=o3v(E z8J;14j%GI``l*W>l4koB90qxR#QotRBr|LH5qm5%+D(P!TbX5Ey^Wb0g{Nk-@2CAv zNX7h)68M;+)Q!W|3n_&8&>Ooocd6T)Lc*|K`RIR&t)^Cfe?1|_F62ui7fluV7jKVk zG-N8V?wj>I^)C}Rv#im~-f-E|%{{-(!z}K2=xdFW>YGV>&4Gb%Ywr{1@ur`NB)-`r zZhDN1pkhQ0+#<38i1p1(UY!@QkI3yaX=MU9|B!AsK=0XrgM^!(o#5r~YrU9^1h~1j z=HXTnKfN%gq^3o-0M=Z;t(+`7pE|8-1rvs9`dD13!KkKR*{RQcOoVJL~t{^a&{AUNx8^T>7GebUixKP-GG~ zUu^75eIcc$9;ZOn^aTpIIKt^m*RXdu2xk6Ovuvir728gGkCJrR8XhzNudbK*}BeNy#1>!;07`6jcF+io~ zePPu(<3UTC*|B{RCDNf3Gj8w+aw7bJ;wuoT2bPl6CioNP?ALZT(@#rP2N4L${d~X? zp+~qgZFQ_5_jS4UG@wn?^B5-5Xq`KIK;eS3WRl+X1g0e#u(m^gm|W^>hQa5hrWeKK<~WjAWj=&bTLeWAw>m8v3R z840CNqD|ZQKOQJlf==Bx(=}|$Ng)R_%i=im7jS|u2c+??EO;R7nHR3+K5*$+63v+) zU5Na1e**}nWXSzKIZQ{<1~jn|(UF)SLiFl2cnfksj>>QM+U2lz@3V|S6=M}B(-#(1 z*$-MQg|^X|qfOdmMP?-{HO1x(sT9qp=j4_&0z-_Din5}UY6+Xouf*JPof5=(UKyHS z@^YnWcgXvdg*ZxuoudA>yDPTWmdaQ@@tykeSjoch!6YdWjq8o?4kuF>bsSb+wdseU zgtlinVz!U;pGml``);**V()H`#8xw^5#$L?IHg~-j~>3}FLqAI^74rK+sqNW4{E6B z2u8sO z)0NcB*8Da@<-0rcNjkQx3T5qJ@%#5F`IE zjNPke?uowr%;)|#3Q2DZH(}7WPht`4RGsz*#T8Juy znWX&=X`rn9m(oybY{6uSAP9C48z>*An7JeFW_TJXS>YMNs#=gVzE)TfNm8$002I=E zO+;$S5;w=%W+|A493WVwy+!k5E?kSQsus_RJ30~~#J&`bOf~Qy9XzHoDuIufS%<&m zdQYX@pSVBLmwCXRs#s_uJ1OOA2SShNrXNYaAr_KwW^7@p;G}u>uabv>5Gd>{te4uk zQ7lUID%1dxiT%QouA!+NzD2{6vv3l#{S7}oFE9{s7QX#fd-tzOXOLq>`@Y}P;@4t{Dpb>g33 ziG2M`*k{!FQSIf0>Bgy=Nzwn-uT%ogh zAS~&A#NM6$R7sA{mvMjA&tL?A=}o!ViQoq!Z*m%RZ9^Kz?jw~tTnRR2_X#fYLlt*1 zKBNUJSXhE5xN?RRx4$-tv@aFKo)GvxeHd~UPw2>kiXeKh0wkMmvo`u_VhI4@7_e>v zx+jzT8If0GnvB<= zqOs_%>FselgVjbTj3XI@CnlS>jZYQ_9TOhz2Tk0mKKnp*YfCs2&n+dorSp`Qzdto{ z4l{7*EM(+$-&0ust{ab~k&VqryFzi-;0beX$q1Zl*ZAR^2QNOVCi;X^iczd8Cr9|Q zo5njcDlfL5!db6 zqx9A@4W&(nUCCbAwwhXP;?zs~8c;)kL+Wz0K5%s0ahD(Qvm*5w#|fKZQVyISqxp7) zJo0x<>if`(<|Cs#PSWsEg`M2!GF{yg+(%WddiF~EAs)CTTIoj8KDsNG>O0hN@BQY} z1(4_H?;P}uTt@|Qh`fwKINXcrPJzPCgxy<4@_yV_dAQ)mW|g**z*a-d+1lvh zMulsk$D7$E{`(*sc?)1ILxH=Wot!Yi`at=#u>gF?C(o|4SEJ4QV&`x7y;ub_gHb^o z1OxvsONoF4Ab_xkL86@i$bIc@Ckxa*=&`{eoX+k87p5M^@zr>l+__DJvu_6suB%gY zsl=HS&q&0btjj>pb0}~L>@9ovD7LNwUTf&E_V`=CGJSM&X1_9R^sR_b0L^844nyl7 ztrB-+b`NMZc4Vpo)JqxLSzL;fXWD&@Ud;0%g_OTUe%|t%WURtdY?P@!^*eHV{FS5MWnSHPqV%#F4#Hl^Ejh9U z4_XbBt0~qAJ^Z#eT~eyeC7ypziwY}HCH!qtmX^%=vNY-Nrd)YaF6z=>j5uDE5z|@> zg1_mPhXiN9^n?Ana_`B)lwSR%favTObWom;tU0?~W?|>IB&3U_wZW^#>O)$w!uZJb z;Y~+3#VXTchIzM2)AdH^J8>A#8oBLOnPY~v9wX1{!IiYTp+V}Kp~zALgNfaC+^5y5ox&0m6Y{oCL+LV4ONpRq3 ztvK#h>OHH2*tfw@jm2Y0YNv3~8)ehH-BHg@{A&)(7Frj$VXYt$qUONxyK6XuH4&re zsTSA0k@#6(^}0$&^Es;X896csW+^ZV+aIS3yY%MOKvt#+pMPNWxK~L#;-X&i3#&k4 zWed3(q}4i?#gVksi?s{!z~%(xVitJfZj6iB%z=>*kSrtOzMr)yhjK`RFC*=yd_G74s|V^`*TSJR9v3d=T$8xVmc{AKI{**_cTd;B%AQHH?>HL zKbIM%R?D@j#n7V}H@#Dw$d0nP%8uHO-qbE(R8EMvPYwA^CKgjVw@fZup7E|XZ<$Jq zt4KoOQir1?;(dViAI(0smaTyQY5{(^7AfcZjqtEPNAHUvLc1B6{M?T2o*MgD+x4Z< zv?nf)1;#_!t2f{2%0?%gtOq=frNZMxLdG9sS-G&;lUzjKZc`(xPf~ zYyz99-_MUqFM=XR7!_k-g-Ufl6_DDkpfgT)r8tMc84o-t^t*Poz}+$m5~v`XS@+o$ zV*jt~3c%%w-j1z-mvjbZp_f5>J?R6i_Ka6u3DMyuSHVSkJ3T#@!68_*GoEQ# zBcb(x^~>`p@%8>>lIMsXPa>N8U=(eSv6D;&!%O>eF(JDSr`+tML~>X5@H>53=-MLA zNQs91A^~CIxWR?M_GC?s4Q{in?#E~ziR>Tk*hI>)oThU{hInroBCvkx4Injce3@| zpC9pXcYAxMKWJ>qBktSBMWX-#^D;T4yTzE6X@h~Sd)h(j+FaB5=`#Zi@-mmuiDdL;o%x4b2D={0 zY6ABb-q?*&6OzkzZg|_qL>Z-VgQJv)IP14eiT3M^i;MNl-Pqi|J!Bkq9l-xA!OnkX zD%t;=ZPy_K^xHJR;$ovtHh3? zU!>E+XM0cZ)z!r+dx_qy$<)XpPKJNl&B!jQUl)%EL(anQV{jYuA7r|8U09zV$rVW% z_IeE8&cz4L_WT;mwKZnBJBTzFRP6acYz9JL;WBQ0R5-UDmhErtvJyO*%1>mHYJNM( zP*^+_E3slw!Ked~JnkA<7%8Gf$zS3Tb>}?zIXJRT7JRj=SF!1R$HVpv9pZ7#E6wbu zUQg=mBNHWFsDT1RZL?fDxB}^ovUl$sq~;@%8F-V~dI=2#T(@*m1RZEj{?4X?WKd#* zAtB>d*n>crJM&_?{Tr$=x|#TnE>vevM9x31r85~bu*6XVhf=)x1kORqnPS*uM+ruE zS&++2_c*IZGmM;t?}Vxk1p9sGw1*Qj!Xy!WqYCl1kk@hrYdsAX$&+)+JrqXd80vk(!wC-_axq+ zSG^JU2($C*OCg{6Y@vnqbp=Gkq(UGYk?%k(lh)q}-7Y_S*-pTNb2uAi{CE7WjF1F> zoB!n%uxD=qm5fz`-93V=^HV92n~R#TtBYIAn?7`Bf}YOHv1JFW{Mjo?hX9790z(tzTv8$G??AjP%c?mN1gm z+2j81=-YCfhf3+9{fM#?W5eH6F|%ePci;}-@=hDpDT6YfM&0U>_z^SYV;Q}JNH48& z5!uu4iIE?5xh;vwGyP_al(wi1V3dbVpe^Hd-49h()3Cfl zKDkaF;uHUk3K1`1>ULK__yf#QEGTVjLa@4+S*!{^Qm1ipSkhgtt8ub?_Tl$@E#sUd zM+BS^<%vLMD`T%|tdfceOd_65rL~8l#%`e_V04jcUJB3|xMi=?%I5&6t4K6ww4b;2 zSfTUT>^S*00jYq!Mv}kaM>61kHzEuGw$;%DF2mBn;iYDzk$v{{e2%D3SKl$+iBKLG zQs-`%5N?EozK{{!xyONl(Zm8I>+;;(oXx-g1qn6jKPrK1HM2V{g-f2P zTa-nJL|2E6>*h5Q%X~Hd2Y%~#OXwfLHBJDmyQFpz&_%!$Dgos~zEPv|R0MMQhoKCy z)O}MbHkto(hRcI6Fu-VR*}tf^fCv%^ujFF3Q-Ej%MG*9y1LCH%Kng`yK107k#&zKV z$uk2Tk-7SG#z58NFqKS$&#%nXPU>mJV;%N1>Dh&R2m+5j!XF%Ojg{$Q5`@(`*@yfZwrNj?|8<*O^&z5t+z&8Ey;Z z{JLxOHkPfTjl+82`NG$BQZMyOAWf?`)1)%dp8h)-#6H_{PyWH5Q;kTfWEGL*;NalC z%j@Oy7vzwNgbc6+{{Vp?rf4x;v<7vFLHA|6D`wgheRaai#SELn5AY!i8k#0HJdOs&2*(@ThdaPY6oAXN%EQ5s3` zCkR-Jn=6&a8Ers_I|-88Fqp6NMfiWFd)zR@8%D*1a*|BsOwHHH8HSPPagB?{4 z>=BqN06BJY45bc#fb2P(DUqm#15^}sx#)dHquMKinN{0a5p&cpYHJkv{v-euiFV8~ zg~gHs*U&dZhD?FOH`@&TyzDFAemu4)iDxz)tD})4E^#a<1=nsZPj9>0>T%uvWJ5lf z*Zi5EBFmL4!lPlAS5}WDikd^im&&zN#4{!3_8LRA_<2Ce>M*%}S^g=(jj{g9Rfcf| z)-N*Q?=PXfM-ds+oJ2HQYw&ZPL|(3Y92h)s>CVqe6g;=-zR%vv6Lp7vkn!g;tmK=V zJCtE5&W9#`wi;$VDo}_pn^KgnEpxO^k-+%6*;VwC3z{PCvV^K*2-^BcTwGN}*Y?Du zAFz|;X5?d|eP#zjDQh~GXjZR>Ls5g-_8QKIInbE>!6}RMexPM?`$T;E!^N-ZX=S)k z_>Ei&(gNDl?c#H#5oSV&pC;@N&zxUG-McrO5*XQ*AUiLI`q_JeQ&wObfB4VA0}q~W zaLWC4_YqjrXUCmF{?SZA1JlNN+OJ)f~i;eymswyr)6MF>!3%RBq{Z1;q^E z&-Rz2CA7e#WjSZ407{Tv8LhJ8JFPdM{1>`};t)t~6@Y1p0P|japKPZYv)_Ju`T4uB zZ3SqeOaROyl;Ow!cPH=<=Fu%py5`yg@O3}OaV14o6vlc-Fobv%AkkfKk3suMwpO5! zsM{T3{y`*%%Lj!}l9(T}Ub4b+Pya#*18{8*zP(9)K@yBH4 zNi_I<#^S+|rO7!_&OFqN&c=8B0^4A;LEbEwZxC<3 zCj6trj2xTnJa)lp9J@*~QBR>g6ZWHC8p3asvhCRU2XPnUgdUMayG5Of3-dH(ti_04 zz1PL93iO^5Hwyz=;;jRMQ=4UQ`MVbc<(o@=xlicnoAMB>7{#=5sP8| zk<21^KIF;-wY*^Bu2l5f!?ttltn-l`6-99+68frvzQPTk({`glRKS}ssD0(k&W2Yh zwMuTR_p+35CMx+xX`E*hlWO2mkb%?hC{|`451GJT&{4d?tADx8tLQNho&v`0mzP!TP_vomHXPG4Pv>$lhHYXTw~HMIGNbOs5I)$aAhmEEhuFX)9x zLKq#d|BAzWChb0D<}$qE2IroNMCb|rdSZB|6n#vuROkLHetpN^l`oG0KdA-sv_#p_ z3_-YTK8im-##`n^Ty940f08gH_2~(KZNY;fM1tU%X#TTc5&6N*7!c?E3rtN+N(F z2M$-D9?Tj|6%rEa_tA(>vOP99R5{RB>3D~-+&rI0JMT9Jm)nvkpL-?rS{+2eU;TTP-@FFK|ZE zan}^>X8z$tX6`@*UjnyAg?1)#8S09U(btNk+Mrf4-*+NBG3Pd{`(D%gdX7^1`Z#yp z`fNAk*a*{aq&saiTxV`KY&AEk=VlO@uG6QI0Lu+dGILHHxX-AmZd8xg#K$$Rbmz59 z!`30~J`^L$OMTwFO1t^DFU(+pLpJgdsD+TFmn#SJZ+f(jKd`Rlua{$mAgN1VdgTo% zl$@3bUw2zQlKsJZrT$ZKeSL)~>cPjNzemy}ldgD@sE6>`K8qDWR2_Jst8Dx!5n-f# zX*So|J3Y)*6MAWCJanO3x=+djms?GwqwZe8#6fW|A_4@@m zF6pbA2l(n2cYO7l{3UE#&GkG9natsLxIG%nK6yfuj+K?s)XFBy1Ph-I@fXJIR!LtZ zEEKci`roAzbvB|-7Oi*tzdP$SDJ<9tft^3G`k`^3G@^6~e?_ne9JMeoGhv+~UIa(8 z>Y>uIfHfl^mj$?n8kco75GVT3!_!mgrE#$wB!)ApVL+;e-M26D6zCp`B?D0SR}C*tDbb%F5pf%}CC)^bM) zL^AjeWRqHi@?#3y6%b02W@Spd%T7^ST>t0))=xot(+M~_BhX0yclOU{SICq*5T_X|SXe$i2W0y0fQj4oMVCvez5 z_iB>~R(R{-{`1R!U#dzp6p(xs8)Xf0@LBR+;r@Vjw);J_tG3uvufMSSEP%xnt zyf@0^_yR?FxF>d5Dmdih70`T}g{;Edl`|d^_jV2hk(q%+pVd%{f4TFv*$?(37-&= z39guZ*MFYKOR0a-NkJN{h|lFacozq~D4*);cN=eGSpG6FQ1+WT-HCx31kyzr#vjlA z^Lrr?KaTRJ?=;8t1~7Fm|5EEUgjKVU^;^)ppZ7n*V`OFGBw$e6ooh{fGf}Dt;%94) z_g@Oo$eshTGN!)XtFU&vg8UoPbH&6^w>UiVfR|B$Q>`WDjInS}p*>~^94 zadE_vTngebyR0 zE)Ja{Nq~_S(8+v!%uWnB-us zGG-_SJt1A>^5Cc7B}F~2d;4c_yqdFv5;?-{B?@5#(v}3n=qI^z%=b$dj$ID}$RF&D zZYP>rFHNJQM1NL@rm3b8)!MGctRQD``}gM&kX!(X2-Q!=N?&{_0^du1=n;4}^4af* z(R&c_azGA|G7; z2KYYx_aW)G`--W8j2ocNQqV7qV4cX4dI0L?>05Bqo#gy0K+g4|TaA1 zPtaK);F2jcd+VR;ZDCPL1-RmTbz@UuY6=620T6!ZB!wg$i{O^^dy|LikE z@N+~l(x9qSQW&tg9RbdT72q}81z4XrLyd9JNc7*E#E2{Yi{&CeXcsmmU$l$hk=I&) z+KQcqhNkz@*4RIX?h1Jj`LjCt2n=%3jUFWhaXw!InfR2VL&X}DY)b?_I*$uvje}#6 zLy15o6MXJ^+F+PDdun`lvH9RPz*N@dp}U{B2J;>AS67Z0r2{7v^-p^R>a$L6!wKaE zOCG*@m8hTLG0(9diadje`J6wvf^ScJJ43r;Qw*%W#;@pR)OFA?L^=Iw4j`Y@r96RZ zQFu5J_Rj~zmIOH*T=8ZIGI;$QbYWkuervD_J(y7iuS^=vG?|nXd`-n>-z!jEIq(oG z1;!OT8P>LFib)91;bNUM3Ki>p{C)JFJE+0#vcG9#!9{_frgf<}2JhI-V}kp#y$ApU+@F77iuC z1`nG*vFPZ^Gy(%&ys{!Nm;>)-ce6n>FWzQl`9#Sp9<$Z)_nSOo3tFGWE)0Oc!vT5_ zjaD=sSb)Bqk_-T4+wk{a)sooXT23+;8y=8?SFnKOqtYcoNhMc<)7T?v>*!FjEZ!-w zUwrTld#i#m>Nv!q`9|K8^#rd`m$-#0jKfJ8tf@L->vRF^sCtGNKEVlRCPDDI`^fFS z^X+YWJEXK%>wnlfT@P>E~%BMu!2I79T7J za(&^qXU5+AQvuH6L7vQW+H0)93xOGLHsTZnPZmUF2koEL#Z{E(b~OK z+jXvD?``Zg?|cEWn7b5{5)@`z1rwzIKBUZ4M0JZK4mbOiy|__FAcy7#cj7|3R#dg4 z`$+8HYa|NQwAI)D3WFQN_56o}T=xcjS^#m>$S8%*QCfcblY>g>F*2A)@L zqc2BbTFTE7=1)W^c@di;F~8P_DtNJ?ZfvcPe7KS>;?UCtJ1aSPOmy%&PsY^2DkYJ_ z70=Zn_CVAFm7>Y5X}I+w;3y3)+%3Tz_E+F!C+(u zgi^=if@W^>`G5Z^K}7hPa>z+6q6zL>wl_E^z~<`V`j{qIyl34g(0e_2X1xRZV|Quc zM0eP;q9cr6%DvNSP>Ycu$+Lc5<8OnV&DUeIBJN|AZG|M(IgB{<{-s zE5bK4yuIK66MAgsgoRGwo%a_ZIS+D5=&Nc>Yy9LM<;^%&@%vx8Hw6Ubjo%D&#a;Ww3)( z3c_ZgJj1j zmgnYWXzUcKUO}_j1v_Jl@V$k3YC%YmA;P+beQDJx<@SltFHx&l2Qveu(S^=R_=tLg zcF3_FyZB}+ygAnQju4CPzbLKwTSvz&&t)o-NmfSQq>BPb7a4KlLd~tBeYwwP`tbvf z_T)j*89yJB#dyPNCd^U}XII(FtbVHt$d!D!+?GPbj6Fj{%x~yX6R=cO;!wDk>bEiV z*xlA(`XVus5$WS{7H)EH!9>&F`hX=wwG{=rKh|x5Cv8a_J4~e zo&Wo*;7V~?YHmQc^@8zQTT_Ei6Eq!*?Qg`sbp$JUT}jJ?h$}s4H5cG|-++K8AVN859A3X1z&(vs_Q2ypjY-eKBzc7jF9p_ zJ&~55X%3-M;{hK)&FC!gyXZ)2&HEvibVIP^sC5$k@_yPL-2hLfO5&f3lHg1ed=Nz= z$OzQe;qDXjOqCdcX5FF2C|doG->NS(9Dy)_!v1z&zV0Fj?t+umg*cYj?xa4oq?=oV zR6g&8`-Hb`%x9&y>3DDQZEkGDec>6l`W_)~@yOTbp1xV@2udY-<*V$NB9lv#k|ZK3 zY$#opK1BHJo`uOa!9$D8a`hyUg42ey`{oy^=IQ2dc}gDWoHSit+`M5pZ3pjZcEkGz zsXKvFCvza0K1udS@y7UqesBT$qrmcTOtf3D#KfcryhJy*WE&|(?VNpYxcm-n;03wAbAy;1&39slm=AI8gbyBtv}X;PMNzQWD)8 zDu04eXi5WDJ>AkfLj$xL$#~e(At%lnH1Acci@#HUE$+nErL1|&AI-ztQ|V?m^wEjV z+sg~zm*73ZIX93zVjXO9W6kWat98&Fv=E5ulbGElpTSOS>}m<=a4h0NLPEGrF|}}N z4UxDMHWiLMcX@5q4taBZusXK7UmB8+Df|9D~aHIUmLkF9OYs*vUOF(>XRMDNc}-=QN3$*aho=pe z9G#yx^Rnh0?n>*SCSgYLcnTWV|J4En^2(ORJXoU=7^D%L_0c;~7P;J0GMV)4>d|+Z z*6~-82YShr3H}%vGZ&svedUk$erI=$XQ`06o!oXYpA5p037Hs-MhLz=i#&n+Az^g` zxV#3Q;rAZGvpMO$T}bpm@$X>>h7t9hK@s(&(H+QFi)u#WSrFR!%?2=Z-fG>_(~(W| zCprFpb+%H=+e|;TDB)}3!`3a_v0JHvm&s;-lN?SuzX~u{ORIA7n5XO;yr{Bh?nys- zLN#v}RV038S@O0Olba3RcN32^%|tu3WBmfz)RS@Xt5PwG*U&}#qg`7iNdYblb>B=} z%fz5M6MzzEJX$ zi}a|f-~L9!u|R#^4r7fn6`F#9AYBF zBl$nLN17jr!;@p+Z{BKDTm|9S=v93eduM6Cz3<|<)yBFfL z#kO9Th+lf{byroD;L_--{PDBZswv5`CzD?8pOzyr2dJ6{mlTGgcfS!g7YptY_EQg3 zO>vccAq%w~IXv61b^6&|@WOGx#LU|-nx|gd^G_wXwH8}z$veBXU2ej1TBeQ22N$Ear~5bQC(jdYHz5vKOnt-fZSU!$Z4v9Ry^iTr&8yB^-p*Xd z$y%SuT=&Xc7tdTC$vjLgQ9NhJvp$);7@e_<$ST*Rw@KK=P?F+QtxrpZ#H`7LbrhXc z_DY+7emJ=hb^18s21hBBL1ALyNDN1SEA1Ca#)-JA$iB;`0X`1%J&!WTE1f&r=_lp zGho3na5J}ki^K*G>*pT5=G9c9lkvz%IRrsQ=wn?%Twr42at1o`7roz7{sI>mPMq*I zFvn5HMRbbZ(~#rEEsk5TrM*HmSxl$m&qGAA+D!F*^v4Gy2gB}- z9qD6NY?-#1Zuly34e*=i^7io#cZaXZ90bz!HofvsAn|Upe%RGsPNPSS@SnVm{9R$@ zHX?I}BFHw9!conkIi1Vpnh)hWvh4SYY&@A);WDUvdAKSpv0D8^v6aAJBv#G~i~;Dt zH`E|E7mM=LRS_{q5}+n@)v3uSpylY3vgA`8tMX7+u7-^lCAYv4T3w7756182zm=`Z9*{t5>*x!hO4%6i3B zyXBw$3Hx$VBjiG51CGQizkDH-G^L(=CiO-@K%fsEMB2nu*}wq6t>PQ?dDBTnGS?=* z#SrjDxDZjW32h?0#IPkp69-uk5vn5n3!*A@fISq|hQ_NM0riA@4*y|^d&XN{v#wUk zA%Z-3nOn>4z@W$$z@yH4gPJ%UMlNkbrB>llJOHSDRKU@CicP8RG>ndp{wVB}YR5$( z_puThAi$hH8L0##PWLvfssc|z8TlOTG0skw_SaxdQal5Q~_Lu zb}7jI!bUAXOZ^PCzS5V1TzU+OyElZiNGsufPdF97+_%27)cUba*SRe9S7U=z0O_~Fm0DIgr3&d8=x z|0XJi?(YpjJePxx%i0K)jZuCmcJ=S0_)!iAJ1~@T&?J{4nD7~b6lZuGuh~JEW^P02 z)o;XO!GP`M0G#==#jEe}Y>U_Jz1-XsQvSGbXn=x4YBC=Kd{swUNAU;|472{)xYujNUj+c?%3y}a?XJd zr;cvixP4&YB^Ijeoen%XGU})S~~XyJ0pqE7x7@kj7MVfC7-m=_I>Vge2R!! z7yy-p!+&qyA15z%^qyV0OQ7ykM%Jgfv~?`&uu>>|0Iutz8Rh#;30binI2h0Uy11(pp2B@2ohho#?_C_Kk_*_lK4G5{6oO*KEGQ^5?{!C#U29s(p9kD&+9pcS@G z2e24*^e`ITee<7&|=h>o}<8A~5+yu)!W+ zK-jpr05eXZGM8p3g<3=(6h64djMD%DCQR|F-FdI1f#<@t-jfr_SD*`1k4`Xr-iiaW zYOf4#-x0tc93cI2W^Ij;XKP05?@dOF;K5!;VjKnx%~`E|tp3(73u65A-t_N`!X*TD zeD%cF4C@#Hm2Zq;GXHh!mR|L6{gYp5({Nm@t^L5#7D2p(VLk$1pL-iRUL6{Bb3`Lm z9+Bjgsl2xYUO#1IYBPSO>QIcqV`J#*tb^T*rdEH=y+~2A6xIHuwYsgl?~ycxdxK#Fsn+O0Wu>R4e7=>lgnZ)Bx!P z@snF1$09zILu${`_}8z+118{Lw6Bx~Rg2Zw_*4KKuo9+xVY!aTqW}xQWMvIv=TdrH zzHN9~Zz*pdf$6d^d(BOKv=@5fRaR5=uYDIRoT0(#V*i1BQi<)T{PbLGiobiWWS2v* zFAaLf;H_~YHJ_mztf(0DkHan%0*{~!7yG?o@cq%o3-`aG{mg`siYgx0Xvn1rP!=S} z{zfqd154EnL${)I8U1T&7w2fiL#fjuQBVk2F#;w;^g!}&844z{{`o@lDTWIf$@lJG z92@?pd%-x51mmJpStrjYzr}_(Y%qBA>V~*rVHn_mZi~O;=tLqP0A-Q~AV?4Fh2&gx zZWmqCl#zoQQv9k8szm{OxCDp@0vqV5Boj_JC^rYBQRc!f3~+1mT?^65LV25X83!d8}O-+SkQU%|921o zjCbFL^ahfLRY)F&V<^W&i_;UhWSt&Tat*x#)pR}kSfWFqEo<%$#HnXrG=$y+6R_at zr@^d2G;!u)+!K)Vp|S@+eHcHt4-E7a3;h#viSN(^1g#>)%i1+_vzR{dFiEgQ7A)ep zm`|^)@Uc#ANQRP#HleVsTz@VIbim1!ZSgUk+3I9(XJ1*T=2N7!FvGCUt4uWMJC#dE zu5ZT|m9M;%&CA@M8m_)lU*)lEO&!B$RMrJjG}Ii?vvif?o#k5qR>ZZ9ot^LLx~dc! z2raPt_gi<9qs{mw?l=F%afNVIR8(FoZNe`u;``@b6HrS3(vKX7_lS3HNji7QQQH7l zF9L8X0_sX5G3iX&=s>JgV4<6p`#b9_-_lwlmo6$ZV`3V(50;6^NtWertdISmn<;?G zpVX{m?!xxq!Gla_g6-F0B(a^)CVup)OWrf$)VSU-jU?iZR3J#GU`>)@!8121hi)bd40pSF}{U>tvh4+=?5tn5ip-4Hehh?emTCt_%O z-3dPfnnjtMg7sw=o#c0|_agcCB1H({m)Uh{0X~rqZ;N{Kga7|KuNIu8Vedj+!a50K zo(fj{MJ$=m8?xXW(SaP*N|WKwvd6+Zsyp$*Hfl-2Hcw&G2C8hGA3g>r4)(ffabr}| zpr5l}tA?Duo(ybm&5aMXn(fD~gjlE}wQj>yAvEnYN|~=s^G9!COjAP}084^O1OUHt zC^_2wJvC>EZ@H9L2OL*PjFcii9=D59F)n)$)H{3DZ^@B1DYiWuG_ zl_IA^bmGedo@l`8zzc_~rGw#}EN=mOVpWccO>@GQszJu!_%G(YVf)$+Eh8ogJRt{F z<>H&zKsYE_*0vj+OHVREI|`<)s4C`N z>rDwyB~oU`(V8rCb?0G~HjT4iddMkt_Gj13ypw ztGbrnmVgF8i{{O*HWCwA#ab-}BmX0g{j zCp|m6+sk6Q0Vo(&=ehTBElUhLOHL=)V8t0Ty^efEtA>URG;j{zCVE`YUov zB`>cwd5prg1H2RVP(BjGoXu{F+gV<1hULbe1O(amQf_l3SmCS)s0oDLkdd|F*AoYW z1fr#fjJ&P1E{bZ?`25c7q)15PS3pMoABdv0dSPm|+D^p9R!c2jLXQ5%l|&#PC^HA+ zXssYRz5MQgfaqw8%tX}g;U}Hx!F$i;Kn`FJUD2kb!rK~`Lp4pG5%K|CA{Oyfs(~z} zXBsl*0X;2dv^M+?2UD}0TO9saaX(P3ek>H_vzp~&msx1T(6S4WQDvPeTIPQ;?|!N& zkgu?w+3$F%#~yS2?#C~>we%bIW3TA-ju@}ZUw*du<~2{E8rvZtZej+G94=aFXH`NOS=Xn@eG(#gi-ay;nGu#Z;Wpb-6R?_yR22|`Hx3`?#PTUejQya8^5m5b?c(oT0n@2d0<@URiA0C!;Qjv}9&ET04YRk9^sazIoHepytu z`pak?1zb{mzIrc!s+YF5_UpxG|EG}yW1`0zDomCr>@F>;(K9W zO6}WW^h(B#UX*zAWWHTh5}o+gQBlFX_%vY4i^>5o`54Ij;s zS?$%0PYOU)l!&f8G(EQ@{{+MECOeaKia-&4I$|IoOd*>W5%zU=FnnmcM#+PhGeaJw zz&OX3OlZ7ac|9z1IpoX})y)Ib65Z9Oxc)WB=(Fdm%<_!y&|R!&u8((d9ITL%ZF~ZZ z$#UXt^IQ|7>FzdUfWzlgTHmw|^Fz7e&<}EYgkSHSIm027O*ubKKrxxPU%hjn)jwoL zq!%jd-5p6C)y|S~#=^g8B#-w~_2Nw6sJLIk^1g!NJ=&RiI97zQx zUWnO6IXlHnc9(l6H8w@~*qlvY^1Ss2d1^ZG+4QD05KY|hXhc=>+`HM2=rH=si1E+* zb)VR^x<=JI)finoaG~D&+sMNNc7J*#H)B`UbH`f6=^K6TeS%b@-8bkwzjPO9Djn_K zLhltQe$XpW_)|tH#$pyoF(rxmWvO~S@bH_;HDN2W#i8g?gSX9y^hYEGh?8#B{ z)$BZ#A-_>oubi68sH4~IN=Ln;SxnoW?xrkHDJ9Qonjb_xq~A<;irmtjIZYceM{)Y^ zJiL|f_MxvYS6t+J%|pHZYI3@pt(M(uUWh!ZxjJR}{Z_kM*2IGIp;RU#V?N z5>@{dHlKufArm6S`0o!KdJ7vqo(Zk*^YHL+lL73c{jc7R7$1BnlQ5GvL&H7k_42JI z_kmX`OQr9rFMS;zR8#~_Nn9#5H_DQnJd%q0PAcwa8V+)K1aldc$4DIG#aZFe?U}HWFZUIVGZ)@<3 z`J83v35Uk{H5#rd5rY5*WsH40FxGx#7W_Wl zvmclto4P&u(7&}^MLq|@?^kv`dgfLCyS5NiJ+H!_LjDqwzzSlgSMlLJkq4_!3Ht3^ z=*8B3HT9`!wT{!It>}#<_+Q>5MasN<)ma=DkSV1RSpEFGfX*U{$ieTl?c{Kw)6`Rd z&&PKY> zM>rN_c+(S8wxmdYqN+m@Fj3nIV8IYB(MFV?VX_iq{J#)jJu6Uyh?7SRjB3TD|3?l8 zPMQg*BdlRmN{Oa&J!=nP!3w$MQzg6X)EI_gOnoYUGT+@)R_g1_e^`FvI2yAWNPpi? z;O3`cv1<48y05{^hKFqxyM@UoH;i)n-|IX(Rmpa;L7MSuf)u1atZu)7d@@b%CDMpX zB^W=6qtv$j5>{wHRm$9gH5oPt1l!_7(Mbu08Xmu!-3X&w%%tCR`{~bS?Dn*i>BXwF z{;@&je92N>T;u+DizaS5@PfMPPT?NWR@F@|&m&pXB+~Vns<*-zzmc5bcOC~+b^P3S`*u~^TP{}lVr);*~nfd-MriawD|^JyQo=m zeuA_Tv_kves zTIO&GpNqR5eJ*`0i03jF)^G$1n_Lb(va(`)uH|aieBO?`0fP19L!;Hdz@!`+g`*-} z?97ri07?_qo{X(h;eSPz=P3~{QBsa6e-^q!o^xtz!7KJM4x{SOOnrbq(T#hVv4b>U zdbrSl7X}rz1+q3)AzV6O0TKg_4}P4|p$i;Ey$GkjWkXz!35No&2KH|sQTI5@89lm^ z*XLkw`+$nOtYQ#uq&=jQhVs$olYRAAzG*ND90j%n+5_#39U(Wjw?&q#KSo5X|3FRA zi4OC=d>HF5c5ryjJMcgrOd!wcnKTEevH7{6r2Z(X`OpjqVq5j3sy$jKqgP_8EQXPY z!09oSCFpx9DDY0l6&Aw%w~pC+yE(P#8?xQE8J+zL9yS*x+(ux?J9YEMVrg0HJ(;{? z?vI{w>0_E>=4psZ(5pq)RhabHj#%PWEe3DcIqGr}?AT8?vlJJD&0WuR~%TvlZ%Ah|_UMU2~#xm?2$}fRS7cuVXNK&xUo1?A*7*RH~;#fK+VkGP@ zB#w)Rbs|Mc;c319^RJEqJN*I=z>3ZA9{n09JGeN6W;+7SNC8a0+gtP6l|omnnn-v| ztIGM4!(GIh;dQ(&k2FHOCl6M|Nv6ZjVI-8WHgaZX<#)V*X%!mKnEogtGL8tU<>627 zqWo;I^W`6eiPRi7dXKa3z6D1sYvz-RK@gwVY)iJ9!C3z|d-c=fSc&^1;y$|_E=jId z(d68%pVr*+zM}~I%kAS9M}3j!1E)hYSZXY_%+y}D2__wHvkNYXodi>*{)`Y=QZ+j5 zo+q6B>7ZiJD&9r;QqcWRAx%onQEJJ^pBJJ{(~Zv*Dq1;NzMMw3Rc-0Q6J^XO`g7;* z2i8mGm-|v-lmw`=h3Y*%Y9+~wfC8=K%myQoUG=PwSy zgx04H5}!Y{63XF_I!OA``uL`^TVk`hiFJh>{?TngbH|&Kj|1C%t#V^dZ>1@7JT6&#+>X62*xva*tI|^7XklIQ{KTgm9(t z_M)4NUq(g}Lv@VW|H~=7Cj#_YJ5|bIcyWPA+J}vGyp(whpd9YKJ8`f1SBRL8`SnzW zhKJ*Urz!r8O^$q@cCizH0@`3uHtK?eba9*gmGjYGEx=+gSQ2uye}F@hq9Hm0y3|6W z54rzoBqlbAw2h^=h$`0J#BILR$acWnGx=5oe0EQ_-Om^Ng$L8B|6nk25_)XwRNSpJ z#s2b4)n9wxjiX&3Gp9o1;y5hCexa38wrfMd#JqIz!svxNrJXO+sHBbAxMe(@;*zLz zO_{oyD!sAkCdMnRhiYm*5e=&AW3{e@a+;BS5=?B%x;0m>%a0l&$NHz~G>tK#B~SO8 zBk2sHUM9Ub-F?0?t&(MM`fmDu>yy!kbs-yv zi{bY_Q+}PZ3)y`now*!2r_OwZNP{lP;19vrXxQla2aaFRRQ?p<3(8M03o}1&vCKqH z%RHFluL;hZ3Jd24O! z)i$E(#WhMJue-~i8eWOZ9IqZ6KBC;S__CS0`Q=3W*gs^5$qHO!4Wi#}8 z(5ove*;)9G``CVILIua6V(62zHW;MEMYb8g1qMbhR7D~hh!4mAD4vpJKVI}+b6>UH zSsu3Uiu_^QTjb+N&8mQ7A8gQa;hJISt;g+!m#|{zbBbI>^|n&Dh|?T8^Xx~JQ@zD; zzoVQlw)zTOh+9+t^)=fjp5e!3b-CjM9mZc%qpUT%&@be>OxC+E|KeAB&i15nzV1vI zX)qxfqYkK1t+%n_O+`C$N}tDw7?EH@jT=@F101(I4#6*Q;ze;P|0ll|3T8&6Ta&QC42~(txTZNx0djE9#?f+GL%K50-r42bE!Z9y^AStz;ojcfhyGQv zf5?*GC##x|S zw_2+j(0B3YUFRO)Jb1KqhMt?mRK050rR2ORS=DK;%cG{#*AG&-WnEXQ4rvcox@xXc z*-=OlTdv0O2gw0vZ^KsI{siNt8om%swhd2L7YI7{tA}uK`}XLrx#vfjfsVVd`Z8kK zo{+AtNk0k)iw>ZnKC&e|s8cVJ4^^o#&;INTk+T1(WG99eJ*FAY29 z;m$Mi-2htZQEZZ#Q`33s$^KN_We37$)$E<7GfZzb@INwo@uQ~Gz`nd7Kg7!@gynzsyr&nkFk2F%H zDyjA{lIT0L1iHFN&O|42o()O#?KZm1;!W^DjR{CV*sQ#(j&W+0saoen<+4oP%DC9) z+xl>uFiwii_2Mz@kP4E^75s0`J#wV%dw6W`sYpuN6oq@DhW1l!e%IF6rQNt8zx%ZB z!aI*){7*5p(_y8n+s*Z&=~s&qPXaG|_Yx;t*!n(xpz!tiq@1_5CsTF!GIe#LYT&GK zS9?_Ywu3!gi4{9t$o;v{^rpr=^5qLE)a<-xKJpHf38&tlBfJ;VOC)n9;U2k~GGaFG z{tLwDE+()H*iEeKMId?^mN8;fz5D_R>~2yL#VA>4-)j2tI6@LyMzhe;Z|XiEo)0Y# z*~_FCcVPyWrS)vgqr5i!HkEV;eHYf;242ZVr21w<$PdVB!?sfz!=!2b>sriEg zI#Vjq^;NX-!H(NzY>0ok&Lz>k`?@a!#y7+saQ69reK>IY9Oms(PWso|IYjXiMvh(B zty-W>Y|iOvsW@{lWyq5De2k$J(^BTO)NQ>)f~x-T!7ZXdMlop_N-7p+NRF0a(>ap) zOHZ*9QE}-6IS!QYuM>g%!N$5MqLdEp8gb`y%(r7<{Cg43Uh>S3ds#!#v(B@eW$N;M zAr~W$F_ePUF}_zuo!~j4)p5CDX3uQq9xZ*1E^OE3Pbi{XunAzeA>InFBo5DFj z&&g3Na~&x^7u%eU)WT>OP^DO3e4xM*cBXWhf}atg(Rf%#L=X$8S(2pc8lNZQ$d~{9 z_O=-J8Wuvc@WsEMyhS;=T%oO)Q^?-$jst&rcC_4d6WPuL^8C9d_`52$R}Hzi=eo{k z$qy`V*S8BYRG3!hCkU!Y={C!p&(S`%d+Rgp(^oqX9G@z3=YaLMF5Gc0kIh#%KKM~U zhgWHc=pFkix*L>_=p$~%L-o95O<{iHMETi!izAU+KMcFj0s4=Mq>yiqkkq>#y*7of z>Hn`p-U3galFRTtv*qdv@I2ThS79CoJQqiFDF60z;8Waw8P#5c=Ip&VZt;~&cX2h# z7lP(vQ`5J%f^G`dPZMW@$i3XIlhKIR5^Qd;g7v$$t!`K1a6;>WylDy4;mdz(R+H5F zg8%LvLD=Vb?}XQ~5=^rslBFC0tneP3Hsk*K++WjEe>9j;$j=_%B420+peu&6bIU~I z9j92fpB3s~R{bRQuo6Oh3;JSm-;H9! z%UESXu}5J)#^O<V|KUP{m~VJyDy&e{L;=?!~ukg(9MqnG#Mek z-kHBEl#Twk-2um4hv%?s@Yc~xik1VM?D{^T*`$^XGG*B5RVT9BZQtNyq+vl@)EHo= z{(RkrB~m<8HB1p$xA6XvrC)D(X;MK-)n=q}139DoFCvDW7`Z?%-w!(?YvW%9KNOS^ zkn5nmvKM86Ps!0rN&5T4PXHv4Anp6`QG=+O&IbbW_dG~oNw@kkm5A$!mV0uDzxhf* z-lbnHiVGg?U?C#1S}-3#R__nKlJjB*_F4V)bNG=S&#bHz?+dZivp(a9|sRYL?jb=8}KSvEaf+uy!1!2~A7avcVB&3RyK;yw7kf*1%` zo%kIASU4)o<-Ko3fPIbyKcv8-NdGz8t$C^kv#sm{tbMt0F;v{osU~65>nEz(Ak{LJ z>VNPt53aTHqccoBZ;Sj>EBdX-9L7MVWOodQSPz&fMaTMekmtf?i&`Cso#+`Daz0SG zn|{Q>w$0IuK6WnDg^LSuVe}q&6Bo=2Z*VVOh`Ho?PXh-ZWP|DvTyn;Bpi}9E{TW&k zN`xo=HTMNM*lN#!4Gv_PyuGO3BqY@8KrtY~ll|WjI;kKl_Zm?w)Df(V2-D74#EJc5 zk7ZtexB}Qx$Vv#is=W^HpEkKP8G_eKjo-druiu*VX2#Xy`?@h#9rBeJoL*lN(PX0+ z`Bxqqae@YRK_x!BbJd2H9_U-e-4jn+NB;zECyXD+hwA`W_sCKuV&sIZL2 z@8$_oenlJyqK*AC&FAilx9wij?FF>jMR5(Ip#JcFN8Lr^0~0PNuig^Op)-6=prh} zcmqrUcv|6Jbpwyzp8iViGfU7i?P4ZW5GB}4ONt>T<4BVY%mt8Nzv$*$k=Yldgg#AFZ1KaGp(SO5oPFc92fcw)hK8*Ws5_*7;+j z3l?FWFI4a2JzRXl(D|ZZz>DrOTiDrg4v@sfh42UAJYqxA+ia-r!6^P?2q&L1Xd54{ z#@yB&OFU?9ULxn7e#;_f$1NTum+t=LY;HtHQSb$X4$iCkbNiQr$%&4(I%NT%LK)=^ z&_{Y0&V=q7!5O#7MPl)zdogtlZ^p{I7I6~`;u0Jq?XCF3)^}z64|HJ4%eFfL<S z`f%r)P$7$oPqCz7h(C=+Nu%Ptrk!_#59dm5oveZ2=qGR4`f7iiSkMGQ=c9 zWB@RJWMXL_j9-Je%i=D6vXbLGK^CLVlh*6`0V+QUPwp(!S!hJWZX=5-Ae7s zmN&}3*Bu(_Gt+4XhUEUj$f3{Pep&Rdr+d;%Xu15C3{R&0wvLq|Ol19*_`MI0(n+Rj zp!duHK~T{(QW4>BL=4r+*7s^gnT?FW(oCJoDWZZ~!|qhPw$>8w!nI}ff69nCw2ad( z^eG8eaS5BGulall<1V<_$adnk>$R*-Nh6iqt);HeRD+l}R-(V$^>6?6}saPk`?)(=H8vTBMW zoy;&)2Z9NX{s-|wZ-tTIANaBD_rTD>wz65&+l7*D;*ug2cK25E9<7%rpuJN@p#!0D zbpKRl=;kn-Iv^J;G^@Eb3ev)p!*yAXEJYlxKiEZUShWv*iuk&{Vz5+W^Yt{MTj@zY z2vYQ3J|`$}>h^r(4=fNZ41h&a#q^W0>)EV}Yss~6V>2dI<$6qI&&I0AgV;eFArI?=1L2 z-DoF9?Qd+YFI40S%4hQ%w4|P!Dcf2v(&HxNu2Q_Wc6pO=$002`sZV3>^yGa(hr34k zKQlBkQxv`TW>;3P6buh;H+|j)p`UTt2|KKOs@E1N5=GOhzM6Sa60 zM@sFClG&K7eQ$Mjg0K;ZL35@-XyD{QV$yasOXF^t62|)*Y z&|czH+3FY6f0;D9O1bVU+_jy_ZCkYZd3^1Bt)kePfA%Ijg>L;`_reSELD|542c6J& zH-7~`KexevcNAuNe`a%u_-$JJt#dQ)k-f+C%*<7+J*P8|Jt3Iadgvn;QzMOurK)V; zLb-X;3gwhPVi73zS5_y<53L2~Djt3*O76YcO%sqw*6CSWt|)%1+T{?23MzG$re7YU zIv&r_tcc8()G(3pmXV#!S@jB2*DpfML7TXSgo8D{Vu|oWlGs`d#gb35B;S2I8+~Kj zr3Bn;L{6&3{x<{$rz7C?TqBwsX{MILk-`8?Ee)0egDaww7m&5CC&^T@H#H6pjtLpu z0@MVns~xl&3iTpBJ-l+$kaSI42GLzJk*zedv(MY#9{+6WdxVyxvov!|j7)ijqE%sn zbXK67TU2-l5P_wqAL(|vUz?inypTy;(!bL>(tbI0>B$4dBG#biEQnfC?$xS(O{oM< ze5bR&ag$S0LKv=BAYzm#v!7PF&68po@n3;tF-<7H!Bm56V)S=GJqigh^T|KsrGh=0 zEJ8qO7+KNMGBV5Ai&9TkP|Uy_v_d3>d^9p*xm(~OLFl;Y=>uY51%mqrvCmVLyfnF- zYF*)~W8)m%A%k4AnL_WH&*UxT3Q4oXM@Ov%l{1EWK1D-wKvmI%&qe=Y*b9 zFQvtA&P8y3a*|z;r<{|`AUadl((>RI%hAC4gJ+JBEw1R6hB3tCCm2PAld7G=-IbZt zf1IURI3#Nr|3JPg*4}y7-P~$kDmMfr9C*AUbrOveaRb7}p8a{t$Hh8y0m;~LhPY`i z)_>gN1nqP<{^F6xPap1uF)Jp=-d*bc3r2zo({9wrZGJSq|D1a%wp%Srp?E!3Y2@fQ zz?Cy{Rwd{xG_nnjKl07<-1SV}aT>~tr17E#StEmvOjX9n=Y~ohc$T{MIvX#dC!8fG zklBdw(kJ$uX7BrlO&r#EXpCik$2~Zay~H@Ic!fcFZA&-r5FX5)-#4A83A42M*6nZN zffoLS(d$^oerl{HOAL~6vK*?wVnsRqw&6%g6GOnSkqm zHG1(_Tf}Zbn#Yg>DZ0GKDOD&MQQ~s5!5`sRzdSbZs)rLcP!xV%+l6G2waalOI+2e- zishUQIX(S}SBWhpE66`zu%s>Me)CIBAUqrqsD`dc8@qw_+uc{H@r~d*Okya}^JTf{ zNm0K+gp`oAm{LrV=8?2%CvOfM0?|j_;N`5A)uu~&&8K^dob~I{f5N7kqSbOD2Y-G= z29xTJ7%H;)<8|BK8*e+&ByX3os6S(Ttvl|4P2{p*{ih3y^It@?`FDMncTx%~YEZO6 zjU5drrPC2k5oZljp8R+NoI?w(1C&%x15F!4tUpesFGw+SSkIJEkx&(fJ>A|_tMV0R zNO1I$x8;8&%(l!FPV*c=7XC!R`>5xnVbNkw#O}BDVtv4fMFw|0E5Fxhm50Or=IrK` zdoThurJ}|JqlhMCz-A=m>V@;s#l&7OZoF~D?QHk@7^W}Yx-K1fA{Et%M+WW@|GEeG zk;^GFB*3=e>{FB5cErpy(B;rF+JKo`4Y1*Ium@-km9$UtQ_IK zs%pR}XzN~>B1xy2ZxB3%-tA?bORZK7c}SnDuJ9R@37^029bL#NjnUQ;ZPnFm<_pf1 zKKxE#?6o9Z=^dE8{bZ#e@G(VSc~I5}p%zX7BoutjI^HmI%HLUXa{t|Eklm_{j(qbg zy{+D|&T?XgSvRWW=8JIR+BA>nCVPy3y$8lcj_5=#z6M^O!6PbJkw81J#VySgcZJC z4>l3`tan}E`k6tRI0MVp$t&e5D`z+?UddtX&4k3By)zhFaBGk>Cd6GpM(q9B+$@VG zGWx+6`&wKd$Tw; zQv!Ve6#n&Yx&r!ygdVWA;q*GS903$VKUK=x+8R-&W}e1#Oa(Y3Uf8-tE&?5- zkAOYPn|OPHN^kWMR)+V&++wL4UNXvzPY8w5x&Cj^x)R6e^C#B#My|46;i&Q(PDvSa zOijcgiTJ^Unc_1*slt*xDuNrhd#DsH_TFiVz-XKMWpGH}kEOU?^Y9B@yV?GTc|!9Z zSmdM~ZO@@jdiM`B#r>|m95ZDceOeM#Qn)l`sKczc-u607sX!7 z@ea&X z?N1Jg*7843D;7N?{bWuPbdPTDe$Bm3oOZ(U{FZ&d_3WY;rJrd0FSSs3mZS3`F!6*o zLEBB0&vrs2|&yXHAPd4c%dy|N7NqN23VR`fYdBaIET1q$uB8#gp zB-fx)aI~ZYv{KZm67Kk)4G$Gr9`xp68CqExiK0%fWou@rJ>;^XTCd=~6uV#SbTFai-gXs#ED4U*W#wn#c)DMPvV(gEG9jq$~Z_K$Zk9W6e@(!2K{TRM9m~)S@(l&c< z4f#_{vpg-e7zhQxl&^=6n1qn57+Bietao#}4)-?reymaP+*7&A&zi@~Pc=mkvk6PK zFOEx^Zt29&3`FLK#|Y7JALIG{9Saye#M4K&FVh69Ya~DZ)Bj;IWpeF9C)IB4R&}@s zenyrY`Xj9|(%^#NipAZ}`l4==AuBR3xir=u>ScToy5MgxXTIBW=Mbl;+Ot)`8$iH7DqQT{H*(t(OSKc+l@9aw7 zI_c`6S$T*R>M@hkTCC$v{fkZILoarB15qJk`TdHd{OABLM_vBwSw4flED#93bo8iZ z(&z5dEozgw!@2qo3Jr9u4XlS}tFd+6l)9h!ewIM(sdD?R`5bH;_QDOBh6k*Sksa;qoSc9S*friwXOi^XWJ45d(;@&uGCpS zyE}gcwJ2g*2RV)0mHhHpshqUwsEUI697{EpYR&>ReZ29i1WlfogQ5->rE$t}8ly*B znEBIZL-VIU?xlT=W@I#MHlL|2OIZh z&Bxg!uS0+Cu7|3|pU&jH+AnUWlzv=V+$M^+o+f`H7Z{91%5v&iOEUL%d_(j)r-13Y zYc#jyn$r!%5buUhpt|xh|>K}5aO*2I-D)pv?=vr6_ zsbbYje}4|ZvgMDKN9%3xv;%t}<~gi_Uc&WD`2vl_2}}Di07Lx0#G6c9SutT}5oT&g zMn?A#RPgr?=*+{z!*<6#vW}Q{gc$>V0=P{Fpo7B5x6jBhu7PN<$cc_#yU&Jbv+ZSN zKLHc3xtSx#+2z#!RJY3hA_PD_V@Sj_eUhe#Ld;=KBK&w;Y)yY}B_)0PeE? zZ2Q!g6}yBbG$owqJ}b6=F3 zeqh~s4Gwqrv&y;qY}mz2m!j5nyQa!YA^vB*mzlWk#hw3NPtXZnF%J*iJKYc9E~!#s zKgX83)L4k`7DeY={sS~588YL>bCRF#auER<(Cy=sdHMLW=>eA#s`i!g*_vIJ`R(N1 zMXC5~Wg5cYt}8q%9Sc-A5}G}%V&n85v;$L$rnsynHxpvAUF012Zv`rxJ5meSQ0Iw0 zclJz!gYi$-UYyE<6bZX9AYvw+wxMHhi?^OIN}s88x&Ms$xDf3xb;AO)6=prB;a*9& zU3?dDc5K+KLJOt8)VV6J|C>qhJzedR_&~@~pM!W&cjwtAorN!f{-vDsP_`Gfio7KlKh2 z@~M+bV#v?G_mb}rJFQIiIee9nJIs4GnOZ5X&oFTEXC_T-S7NtMt^Vx7-O&B5#k$EW zZMQQL?p)FA5=MSph!e{BReR^T=vm*xJ>j{-vsRC)_aU2S#g?4u-yVga&X^BdV^K!d z1C)cPJ8emN0x@P<3`3OHO3d7Nqh%=)nHa?1GH+Jo$9= zXU5zq5v53A;y!qjw9$^=AD3@;v=juttL5JG;N!kqH^@+p(f1e{ z;xx~?5>dyXsW<)P8b)|4lwIYm`YkioBne{kY~;mA#|_sI1!WJ!Hdqnv-f@`3*e70j zx#%TAdWk<|?*3E5D%S#)Qqsn_>u6Ckvjyq2U$F3_Z`lF>K+sYzJqSp8?#zwG9|Y0f zt(2**#}^pHv<5#D685wG(^Q>p@7Fe)`H3zI%LcZX>46T3|EihlhNRV%zi)VGuSou=vxLZ<6 z`A{G;{0eHqM`PFiMdr2qw?n~9TGeadiFTDl(sf480qwQbaF)*(;lZJkc~J80fXn?x?W(GS&Wpp}WBQruhWa~^i2YK7n*Lnpb09Kvew#?V zCtTCNobql*R)meRBgL0L{pC54ut+ggGq6aJYriZYw@W5lvzuG`S%22P4Fq2I`V4?Fw7? zI>YaOJXrZ%RIkj> z&9UX{0OF%QSMr}>Bo2bP(khl)dv2STzvwIeWh@T-m`~LpNMu(c_sl5#xJvmCxMhgQ z2vL?k+~mr$erVY5N!nTzYty9id*^y$kh{atEiL(wTxGT@8&!wStewt_I~wR3L%ROw zjLd75A?_On71vaAAJ+R+s6Kxic{r2&nm}FUeS94yQYr)S>~5f`@xeGn@qqYCgahIi zhkNM-N+eUCt74awZ3|1q&bT%SwKksADOm&iB|b|=b zpA^dPRQvh)4VRx@0p!!~@<(rq+ek%?Afj-jIk)#0<%|R68V6Tghy>@-(Glp_LbJ3b z6e~{qMJTQOKr(k*mAvQf;Qa=%+@}fJfi1E!mq2A&Q*zpsdaW%SgScb>GF+6NXF!-ZYSHSebh*B*61H6j%uFn^_53?HQwYv zts(0_qchXaV8nP^!=U!f1&cCfcP!zHis=#Wh}l(Z$cadMgiM&OnLG_RUjt?e_EKb1 z*9{R~L1@>0RTo>^RJejpdl{Onc?ac~!{~%lPuZN&UE-VPZ#Ui~O`8Nf=9Ure-3X=y z%7wRZ>xg5EEKY6z28BL9UC{fl)gkh6Ru;=H}NCzMPH}}M0jaDUeM;vcPUf@eA5~$sJ)13Gg`@%_g)*D@- z1(kW9n3SeJpS5qb3BNe~P_m{s1OHCOT{-<}qwnwSxW(MIl5&~y4Jo|QJz@OI$I&7F zcjn0g#Dwpt#wxS_mL?XF>(o6Bi%9n1XxBfXR3o>O|KnE_BPSxtIcU20u)?`zZ0h~c zC+4)}O0#Q9feH$Gf2u!(SN>egH1ZE2>%5Rjxbw*{#Ne@O7Iv3V2mPLS)D0+%VMrd|x zAaH063wgfPDRO5DTyHSmh7aP#bcqEu$uvIqqviX>HG5=_@1b2gFYc}t+<&1Z`~_?o zG+$C(Ifdq%jsck!a!#i&b%imfKyZ5ZpiUI)<1C9e&(gUVQRbTr8k0d)%FL^c+o~>G zSi9p;dOcMS3{Jd~~Q3Ng>WZCEC1g8hWkjT+Xp(>Gx zmb{Gg76%0V#pwBWU1CH=kLZ8h0jpE1B#Il4?*V~fO`c1`3{4SGS%<_XVFi%z@z}(I zL@MDtrLJ@HudJEFs1vL9Nm~M3Qfk!`rUgtK_ur6&-%KpMj;iWY%JC`f9i-c9bu+nR zr;klC_TkGpi>eDcHg|)a#6KD&uNI}rGtf-igq@JP-tCHbm%EW-|;_YSkm44 z7UA9w@cNrxV>tTbKK_j6rY#;v_DXplySj0PL@CTkYqqjlFdWT1ekX0%FQ62Ca~*e! z5yu^BaduqSwf9WdZaPX5BM-^yw@hCYT~Iu7-y;QluerO9W#7WDJ5!-qrk!9=dOM&0yB7*ScY1)I+}xp=%wT4bljsGNkd()6??&0f0&vz_@LW;AUES8uyNpkpjP5RVIZYcQ zlICvKM#S*T`Yv7?cb$6u&0Hum>=_3=>F@8t(^wUUGZ)$uMUX!!TC}fP5^yamP}6)c z(q71a4_FeX&+=T1fE)SmgZU%rKi#+zp)(g|nFJkgDpXJ|RdX>jNC+$Un1-NIl*K9FlUQB_V$fFp|caJ(qU=zaK|^fW1Ug;Bk~ zGTTs)5ZT7GtuLi>_FhEMv93y`&*Xm8^{Y3>Q|l~4wpbM+h!uw-NqrDgAqIQf+jkqp zY>c!%X(FJKBIQ z@jP=s3oSKDdN8!$8q=k;EnZW1urr1Xdg6-Hg&Y zH~;;JJtNBGXZyoAjos79SW+)HgHDVq$&dV1hI zsj2$d$*b!g5=%S9)vK4XmCi5Rj4_u@GFTg4xOy>kbDey<=8rAOw+pnMk>IITyz}al zy!Ya;m)r5&b4TlVSqp8IgHaZ)hDvWLR1PAN?#2EETr*>la~|z}O}he(ZoRw3a+1%6T zlZR#bA-t}3vo*Sd9apilP{r(M#r`deUhpkuR&` z?+q4Nr~a`-dV40d|DJvGFzA}6cHiOU7%8SVuXGO#t2eH@&pIENcc_&+F4R=TH|bQw zUb*UY7EYg1?vPTHaO8d~@aIqI*ST$HJOi0Sr>)=lYjg`_<-OBMMEmk}1k#E)XnPkw z(;li_N1aS#*>;4e-ys){_=G#T65yik-zhVc6|f1h z`(feiTLm_f@;{nNxYbM~PQZZ@`7>4SWcgvEpz)QL5_2v>zj9xZpk#uF=4*3YPw)uE zv{N&q$);Z+b09VkK~EQ$TU{rgI48lk6kw$S5zn#E^gLel*!@BFK;AW}EgjtD+v_*qJ*%-}pw)8^#7bOc5n>W^Y-MdZ-xUCkUgg4g zq-B=2F$(u2O^R=}k(wm=QHX})Df<5X)Jb6?`pz+Odc7o+NmaynkdS51y7wDZ=}?YwC|mCu)d*2YY~ud{S*9;!k3&?_oqGt_g@zgvB+e_ z8C&j}nu=&VnNGoK!a@H=X-jMN$ayLhNd>l-PI`+U^F{G6V+UZ_=%yXx_u3jO)LwM5r;W#`+<}ieQ?Xluv={U#I*QQv$Tl+)Gg%+)@xLX%z2B5Xm4%O$m zg`7AixUr0D#)r;H>qmy;HQ-{0R6@dqw)FgTRd|@ejSUuG6}!zOULLk4%PAB5iu>j4EE=JfFPIh18_w`Q23E zmxh$nZogj1G_ijHR^Ad0Xz05^FtQR-(_=$Eh+8!a%=~bK*=qBW|NLdtL;^X#UTc6`m8BX}B%<>hfLv zB`NN>Jp9yL;(=gkRIh{q2w+wtELhgXlkC-5r+*k~(=&C(Cty~M^nJA#` zM)8x$D0hL$m=es-wTZZ&>~AF(M&z3Ggn_=+dF^+y(nuoFXF;@e-J`D1=~5ng3b4y( zzxzSuJ>nTY>gYcYh5`q{&jm3;?IPP_&uXKias5E|q2!N8gx3SB$PNBjAU^hSQ2T3+ z$hrq~Bj28$;-xx(Et|lwl_g7Zgwz)bj6>A{Y(;GbA#^Hg#*fMkA>n;||6vTchG`4) zQSlk%Htk{fzKjTc^S2bBT4=pE3=m^1vyDO^FxSH*d`|VR8dPxtN!E!QV;?9dUmwSH z_f}?ROJf88feV~zE7Mcxf1cU;xGSof_JsXe#!J>}pH5>-?0p=1VH^G3pXV5VOCN6r zWNvXb=rzm>CQYqEKXxAF?u_nv4^OdNoWSsWfEU)QUr{$(HY!T$l*qF~X1hX+* z=IpU@*ZEia3^>Rz@y6~u*;QN3bWL)Ydy$dDcBYA_Em5?v%JTq3M1SUS-IEackKR97 z^2Ns0@-peq3rt179L9s$H%p55;YZJ%UIlJt(tIQltdBdG2eq>mvM$@ij2Z@L%I9#2 zV+khior$7%K|Jxh-4Ph-8hV&^1ab&9fdROv1*XDO8TaLzB)soW-6~|rxnGbLI;BvZ z3-0>EG@)Kfz8IzdPrW-C3MaK}@(IIzE;kMI3hStiMCQS)zxfTuH7_JE*cf@`80q^q zUdUQE)(#}>j_m?8XFQwV^BGdQj+l$y_baKdIr$W&%X{M3Zu)KWGKN&x{R!(yhU1+G zHM)zN1`k)!VqE?C{TIAw*E(~J+UBmN9;qXhlUw`yvyqt?tpQVRTs|3zWb0>sa6gTG z&y1o0)5XZOpOr&{UpL3FBF<4xTR$=}`}b)hn0=4*H)@VP=Ni@;sGlYN5ySS}Xg+Cy ziwH@I+2O8q# z!Gy#QaiBJf0{ZsL{QUgGwC^ygTmr7;B4bSYYZ)@UA_S=&gRncYKH{C7-$0PB-P$6W+7qOAG1p>TX3p^KY*>hif{yf#; z`;qBO!vvp0ZYjV3!`URa!BS#{(4sg+(&Jv{Ml-J0jzC1D6N;R0h^}TOfQu1Cy!U#_ zgrbtLw|hxN+}>VjX$nV&;a;zSA2hM8a8b$3f1^avCG^!JWEarSvp*|&nw1)Zi&O^I z0>}S8!V%x)@*ZLM!RGlCbsH=uGaq&G8MiPenQsfxc$M4TlV&rqL!DlaYD9k47^qlc z8EJv0gyhc)Z8;0N`O6n%ANQ7O>{#cxP4K12cU@0@pgdvJ%?1XzU?0fnBtX);PF6@}oqs_%C3=t|#X%&Q0o--?CbQ$d zg^@cee&r(yWbiw3Nol7pcDdcBCB<*6>^(-|w=ZHfd`;BtcZMgF%5R#}5zmO;4LSNR zSYzb0a0)FeAkjK4W>x~{3+IQ(TvcC0Qs&S8OGdMnpsxyMtC!lBfw{DpJyob zz`b6-^8w8!ZSU{wLe;F*FDHtAuPwk_w13hVeIsNOv@zO~Yiq;Q04sbo(FiP+i^d#G zm>O?N8fTv&;OqzJNbYehVP5hx?D57XmnVN0folTRjaP8@=6J9m6f+JeWWKZCcn`K3 z_%GNxQPT`&`1yV#g=RUN$)Nwbm3`<&f|QzW8N0U65zNt zs{OOnH{jg=Dt)nIe9+Drlio+FLRP~@K6MBYoSFcwY<1bG8%l69GQ))-^XHp{+wWEg zZefRHt7NIFAx2WacSbYCON+RygnmR^NwCs#P7e~&6{_9{aeT13nfhiHHfId;3r$=g z0AHpd-{i)5f@CyM4lz<-FhS?}oa_$yXFKb3@is)#3Mxa@mG;#=_47w#HVLVrsezY- z%e$l3uQae~4}op4jTz@3cBpGuaY&gYUPlaEKuVdb(1?wBi!!2X{w)1aIN@tvh`!ip z#*+Ag?3>EpFHIhzkEia9CG3LqqtXD%!e$NQ;2au83KRhyW6{vf{jXuPJD?WQ>=ke(KHF4(ebJ!78Q)8M z+sF{!#R?wdfn*$scIj>Zrt7BrRW}C-4@)_gukV59EVN4O5Ciq)1|#eAl}E-Lm>UkU zhZjW3^tta{+PExQ==h#|k#u7BxF9@PG13PLFiL7vbai3*D*5j-z`L0WgIwURYOr5B z+iGE&n+ueI37&T94A5FzgE4zaLJh0F?}~0wRI;Q8kjbH@ofaaW@WcVqDmRD&b=-C# zsfP>Zva<)fD4gu9;0i)TCQ+#5oIXEgo9e=&mv=>n&7=@oWQkEsKm14pg~7Gsr~6=# zJa9h(;>-!*<#3yHa zwP)qkDj8|d`Z8YdKCIA!QZ36L?`_0{b=c}(=mDxMHKHkR{`McAb&HiTCVk3k2#{82 z{@Iw!r{BPI9&A(QI3Nj9H@(&o$HxczCohY1qqyZI8LH~Tc|N{lIVm`mp*+6@`DnAn zQ>=|mHj_1ZX~EI%j6R;Z(zG<)?oId8(-HZs!y0k@@rX!L&5Ul2-DdGM%X8OAnCh?{?Lw~CPV~%+8yxpTe0OTi~8~L+(A51MS=EHv;rV0Ab zf$58P%B~KXPh48rQREn_#G6;u5L0_n>*5o&K8E=7-!Z3Z-D^BDdJq!YD}fQ;FaEd5 ziTPO(7vG~@FIdBXo%=T}@^|)63EL@|J6m&hr|ekRWYOdxeAC(8e9A_aj1URkd@GI` zhIrk1s4p!J;CSI-VT@jV;y^jgGRIm$&`^_(FocK-zaa~-Yrx}@Ub`gkD!v2FaaBNF zp&4I>>4-HshqvRwbX-~qM`p*P)Z|L-NUhM#k~_GBm}1(`Rvrua({{YVWy7E>p>&`P zT>JZLeLi_pJI? zfhkzh>Z=P~BGoah1@$D5MNxbPRq9^jUbmVbN=r-Ei!8o0+qSH}lI?{ssPNePJQG;+ zSwg&|Ug8T$;d?gUbo|l_vs1~}N8_15w&-R3&X}%c?>;sEm&f9VgnNij4D~PDYe1z(B>M9PCfB891sg5QpyoJ^q!?=>bRUM-Ni~ zABr6f2sD=0#R&eKyEwsrt?%?G}?C0YtSr^F&SM1h7lpiI`;&D+fEu(=$G z5$!`V&%h+AP(5(c(qABQ9M3?)hpVgo_|8mI_~qGq)z)+b=;xTix99Dn#-InZq=NQa z51uo%-M}m=X1@^CDY)cbf#u0X>!6L8=z6l^XowQPRIhozoWcLw`|QMJ(4<&CI_ZBd zV@_E;Fmz<+*W;q2pt+yMOVZcN8r3>9q(fBjT^`zjU4cMjD~uD0vSH*Ao~u!%_XQ1x zjp90+i3n>bm$h4d;N+#(Epv`1Jb&Q10J~e$S3UQ+kr|Fav)`l7WF5ViW&QE6)zVz^ zOW*Yr3QX&5v`Cqto=4?hFqUy7pnjWvZNrLg{=-J(+cEK)W{>eKFA($b%q}~-(<3IX zbzb>WGCGa52IBH%Wt5yfRKuTGykhhGD-(phcXcUOCoMI@=Pefa$|)O*V=tUL9PIHLGjzZ})5@a&6a*mqQw zfux!k+Et(4^(UN1M)hC(Jn%lKC2?>5X_0pIq=6vc6c5}lf>7)NjpkP7VUzSSjO)fw zI7miAswwuc9_fE-6kjG<)!Qd`JRd8>Zq?Qpdtj4Y1pkRV?>b@zvHm{SvJr5JA==GM zg1!WfN3sHu9I6RgqQr~kgoF1KBE`jn(ubc#+-R{x{L*z5JYQd5O~NhqkO>#Xhv%&` zN0kmg?*^8e&xUlKJIj3U5eZHxbxWDmw`Ew|!a1T75EmBlux2!Txz^p>kl0B= z{5GXKB(;z=Rl3QfWo0YEO(Zz_3Mikln)E)3*61B(NM$HqxtExEBZ?OWm!7H@` zDq5?_)eHpF3qH21k~fi0HSKfIkx2Q=d3;Yi9)jW=W1X7AHv*+(_&@kZv)bR6yNXu#*wcpBE_t(_{qEqSoL-YL!Bi`l&o$)!-0M4RU|F_&#=~FKYryfv zs2)d~g7FvsU;Hiw3;ir)Pq%C+o_j}V%{R4BOQc4))S-m*u>?J7C(X4^<9Nf$J_76@ zMWslYhoe4tsdCUP>w#XDp6!ioJm5qZF?liSjWmzJ#IZVtgP}_j2ZC9&xvW6}nVt-5 zVr~MU@cY7wg_H`7cC%h? zo+;t*+mN*V?xz6diI25;Ri0jHJCqn55GK9zZ-GIGEpmU>rx^g*0S6QQ7=788KHw{l z8$ex}6UHFNglmuY@AKNk@&h>SW5Q;% zav-5j##KKTG0kD-SX^&jRoY6-0r117tpjK$SL1`x0~XHblr2?v=EkPd`5F@foI`)_{=F=UXG!pnSdkG7b7h zI&t`$qGZZYz8mSbT1-%Zwi@1UyWf&eK$5F-(nIMZET>z;Y>ah#|SGgmNw>fr6xD>AFOWv0m`~{u|h^DPhIj(;;M>3kT zN}uf9J%G0w>u}|Ny%+3^ZTTQZ&pRU!#CC!$it}$-?CL9%K@B-j0Fvr4^QR9k!JvT) z1i~?3Kjk__>3YTzRl&q{tbzL&9EdRag&Qe1%oK5^qp)n;n?o<&27vi8q<rlnMmFl&+k_J;nOa-n4wq%FN#@`@+4+WuBCSvY9 zo5O!g@Tx?tzS;MK+gHG8D?{RVoZvR4LrrX{p50Sv*Y!FXe0Tz)W zyqjN89KYh$=BAPIEQ9;u@^5|QhuE3hs$f3x4e(5eCFdfWO_wj^z(OlW>E9qKQ^X>< z3VN&8Cr5i7;C%3K^;cCq)wCVORtH~gksy`$Ygn%ges6x2Dp9gxdH26Uk&8!EpVgCr z8BpYnMob4&lpS)lp>diFNu?;K59!-4kgP$6>lA_(TD$baja3*ovfq#u{en@_UB=^N zbVMj@sqAM03DQb3^ePRmtz*yi zq!iy)ZA#306ZJUEvQ!(yff zI&SBXKfk%eN&f~9Md}Be{Zqmw^z=mby&7RL7D1#ca|Q6e6tmQlw04A1vlz+Xx0hr2 zEwqdX=l~vyq7ubj)G%%c;L~~!PA1LR?uEJ#|X z__o>U`{PY{t((MZi_3k*(c^`Ue3S%G88ur?f0k|ReO1Oc8OrbKiCFu>hw&-}0MWNxe1&>gXsyZD%{Pw9 zKS`8bM2tejK~si{Fyq}|nq)Ey$*hR`JI0prW;>>CrY2_v4y=%3vhFS~yNF%=9m{IN zmt);{J|-sL9R^MkCEv9;9;?)qg@j@9l`YlI%@@$O$V+$O9YHIWMK4BwRJKhM1C75E z!`mw&W>_sJH8nNuBiFAHzDO;Z9iOan`>|AR=Ume{24k=fUvtatty3q#tt<9vB-8Rf z@y)fnG1TW&!Tlt~UZR0^I)glZEnIja3g{}??uE|8H?jHk{$EfKJi)a6?8&B)RX6{6 z(`VvNYVBv&MV2ybJft^t4X*%S zK-hg`{sgb*>L&t08@9(uDL$uef%oaCWpSoo)O~+1_X~E{Zve8|0Y$M=WdIz|eYSZ` z$I#2vk{46`BEnuL{9^eANA82b0SVz^%a>cNMboK2#sFT_I7qF^;JHm=q{6)f6s7VM$x9SFle1o1oExhZ0P@K*xXLLlCNpgL-*R9@8ztA|K z(2MhmU#E6=B3&oRh51>$L8@-iviV)_wK}qxA96<4n|ef(rk%Y%4~xACZkxr^&@)hd zCs3m_T2XGs30qklpDJ6+E1l4sEbW}<9A5G2E)gFK+Rz6z*WI6;G~E8~Ov6%Z*ZN@A zK_g;2#S zqM$+TfV!pi`QL+=chU#Fd{U)Y;fIgOFn}sHB}^Vb<)uR)!3Gg;<{I|#wV`f2BYE0RrV-H>Da6F>rL@(psnUlUkxKZe4a zwiYig8W40%V``#Cb$=>?Ozdq`P;s*`u4YkysdAvSym3&gxpDWwVMF*oUfGMYK13T zf^f{PkV|U`K}pv3Txrw%_Em5&n8}+{Q6mY8N4R=RGYh~mjco%cVu4iC;3o13Z=X)s zCR=y?FOsmyhAkq`+K8BQhD} zlSE%9j2V6XPN8qj)W6^TXWB#GX!c#wgRcB82j{_NJ86C`7V%i)v8iUeg||;%+N-o) zyEt66>wN9PXH|9Uu1@+E`-N&AYdb^PxCQ&f*X6aCH{Dqv!f9q~vI8^nghM)_+=uE% z-TNP6DBAd2D_dL?ixzqeXj&dfGchq~-&1`z5H?IJ$u;;O=MNPTWPiwx4qCaTCIP z0Jh%Umz374=!V9vi<6aGXX$`*f8xcJ-&eF7v5m$wqc9O?-W)MYA@Yu#DWyfL7G7Id zVHtAgfhNs!T6UF8w@l(@Ty!@nA(z_9Avt_j=r0eDzMHT4AOh-tS@* z`V0&NfH|>q4wz1SZb7K92{MfxkJ=Gelg>^Zok5H%PJ}Vz*m$>{MD;D5BbgNU+QtQjNc8Jo$5OZMVyW`7j(8XIeU`$SsR_~9MeZ;NuTdL>9p=j ziL~7eBJ+Whf?1k?vw_fW0XP`Hf5jPcZxK`D+dO9GmwKY&QLOh9^!T5zsM*5)=ntnq zZlo7@62{#364uL#*Jkbh1hPn z!VbEnHF;CkG;+hM0KjJ_BJ%E|si(IxF9}m`GLTFzVHzt8IzQ>PmlV1_3L)0nA42lM z7SjF@eM4eR8iz|WL!HJjHwIhAOV=b(4lSWcf+3!?BB<>DEd#O1ZIkNGPWPl^+o>Oz z(Leu{if3B5KciCDOx)>TQC>Zf3}LLkiyjKHmrn9*&MQA{K9N}Kq)uec;}WCD!5v$$ znn28n+RZdsuGpC@=03BvPVBl6T`I=ke=*2el-XZb;YMX+d;2)^y%mpheg$?y!FBT& z`!DHky8Ll@-xF(mmOAn5H^o5*!fGR7RoYLhbZEuitg7x*D&QU8ddC!FczJChMAX;g z4c+l!^i6Rl=pF0y!lg(Hu$avyU2l=~9Xjgl=`M+1cuZA}Yq@fm#ii$-fJHw!%{u^! zPiBlI+jD^cKAaHVfEID7C!b+$SH4vO;?qhwwCjXn>s;+-n3+Uf-aZt?1|Ya*N65xZtqC5F?ud zEnsHB6r0uu6`IoOvMUU{#9}O_bbWssbEvyx|2h3{Kr#rJ=I6k-yLI_aGClWSc3R98 zy?Z~c(yEoQqFIduFo?}lph0EQ)B7v4ny+B3s{9Mx|8okwmzkRWVP-$q?$j1{*_1P^ zn+2|O999|RM7ZTFpylM_5`UWj$NDdi8Z&@XViQKjuL@e&PZl+4B#1zl!RI72pXs0D z-F%5^;M3cA)1K3L??J>P0t_ zTqdHS)zPEnNqf1uc^Lc}yDu54k0x?fXB4Kv5XYNhuLb;T_b3mHqMd`z74b>Uk`-LY zvK!68V{S-S*H_2KwrZ<0G9>>n`o? z7>JKn*v9G++g?Q{kWnBpo(rl(BE|4Xx+|Cmym1+;B?%1tTH!{V17wO@GF&?Vkpc#Q zvKCnoE}_Q|bb=X|y{)5ma()?!B*G%_-~JC5V6w`L%1z*%B(erVoj*D2ydY=KfRKwf z_ktGN;9Tthr{F8Qdw+L)-Tt%w^EPsVO8$S~;C}!_X1`WhUsP52b{O4Yh=^Pl zvj1&e_{9vd3+8fN<$z6`x9Fc$khU3TKKIYngP3|&2JrHy?tVb)wAent?<7S65l!ni zz&-!{oicmwmF!QT=E=DIq%B6ECU4}1MZcoWc-ec4pTHs-=)1qKwhfMO3d4>&oLv36 z4+MVI1+KFFXR*eR(p@w1ZU6KJU>!2aV`xaUcErD?eNUv25q$;aTsuImRxJLFv^76K zX*514^8YMV%&R2ZB2o_|^>e6#G@1h#wweNHqD&ufDKZAwmGHy-#qt3}ivobR4a}n1 zfGy=5Z0Z9~m6W0`Tuh`d5m+$*gR{bvphyBYEFfWXpZ80G^XTvTk!Yx#n-Duzn{njR zD=w{^&weQxS&qX+m(xSlUttX#)69aX2u74AI**mO_dL6V{X+K@MWX+_wEd;56fLeo zV1OwDOmK5&(-v_C=XnMcV=Q7eXj)!?JAVHed)5JXkkyqO*pirv73OZ7Uj_b!%>HEo zViBQ9a44tcHYLC?S|0{AX*RiY9dUQS9D?mx&B2FER_0<}3xdb=8J%#>yvwx1psHZ} z#jHv2fFI7}bemBY@|I6tKwxVh;efmh47nz!@e30a+m|R72gp#KH$MIl3NYy~`+1$P zL*`=wZ0nB*X~ge)t1$Y~L7f7nDc5oDPzpo+_;~p8BF|+2K=uv~?EODe3_C0)JJNXG zr`5X6u4GJu`{!Cp&Pz5E?W(-EL zqpY(LD%*4ht25SB+GSmzOFh+>=jSlsRCOL& zN*cIA9kX9p;o%LF?|wK>iBgQpXq7)ZHp@_LX%NGY7%6jn-g9ZPHe{j_}sKgQ_PDv z!zN{vKoneOU6$Ej51X8~jd2-*eZT>=dmI#*${@`dI2#A2?Q(JAF+dUjyT<o^T~;EyPLbMbzcXAaL~|=!hsn1!lj)7EEUI)$PK}6E&0} z|0EVpB3&s78^#$2lzCsqZ@{~C8G{)x(D@sgln-Rn=}H41%-LO{(IuCE0Gv1iNJD?N zhTP_8D4aBEFh*l~B)<>6i82f*GU3Vhr7Mu~gh&hqqpCcznWWuVc!c!@(5{9`vEwb( z-3AkfGXpJ})xRdyk0Pa5a)=2ir>l9in;zjKnXvP&e?@KeM9zlzm$)j}qJ6I@B zz+~}hh{lTn$?1TrO8cWo%v&+LnLv6;_x}IyXEub%%+lOuyZx1|ukvQwwRgCrDmc;6 zSn{r<$xdi#=EG&UMdS7+JnqQXz)w(@Z`D&}TB%>!R)=3qMa1mHj*3Z^^5Ryc0|~9c zOV$@!`34U?y}Yz<9ETuTc_SMm;Lkm}o%-yhBsq`sjf1JzTAFp)NmeaWouv?6^{bUI zdL&eSM{yqzLuK#)8AylG{#BMf^X`R+HlD`jlvlV4>q_k=QlEreK9*9{@;9%V#)b7y z#r+!X;&>(YJW}JGM7ZaDu_YesH~E#^wYim-v$_x30)l(kOQMNbj4Ouum)`07KV5%S z3?|Tg4w4)EyUxl7F0U0Mc(+W(!wP>d7W9@XK3@7W@{SxFzyxmvs2$A>n@jJyw*?;a zJDuV`DE(E@6iNMXly>_pbq*XIP1m!)`j^Qp`Ss7EjqLIdcNp}Ry?27NxL@-cbq}95 z?i6wh9{wckVKW{bZZ7D{xv@3+{mHWb`QI%OdiGoGtZt91BB_NdXtRue>|n2{FL*V- zdT9>=pmK+ecyuI{ye9=Zj>vFH&MALO?bEj6x`($>v$jeJ4$*IlKx zEEz=)ipHzYRL;$*FdLS1z|8VQhufkpUnjh1W8v3elG&@tVZ*)jn7)FuM>K=8c|+Y! zw$zb4xrdQrS9636gcEJkvlmZDZ*#o9b?c`~m9U;-=w`~GQo82g#<+(9IW4cHj><>W zDg+5UM*sBrQ|kV%u1w9|ES%%YN@lE8{7el$$1G2H=gF^EyeDg)5g| zh>y~l>2mI6&(dcR?H{C{wKtc`hgnY6+Enu_h>A>1o1c~0&GtX5hj{VPWTC{;+QVfY zKM@s&aIm%($vFWc@@FSR#sDFcXDXgsMn)E9toGlJj4O-!&e)4r*UeG!KZ>5X#`7;I z;UN7nD>mg8`WqM)>HfXQTr? zx5Ab<29NdDF7Z32Drc*jZ27fV;aby*G>nsEZQJvT%tW&aaoIr=3D>-d%_rHCB!s{8 z7)~2b(%p0(6@IKVVBd!OA--qvwPa!KrE^Heo7eHy?1OL#(N8*y{p}cbHxeHdKPX(P zV6mOJ7}BwuQ~kcF`!T+k>Bp^8ze#}yyACYufff8NA%z#DCpq?*B>oR$ZyiPzGDoB)F6RP znPs40BVxGxKn^C2GG;v7PxGPVneohi>?)7l@%TQOXsxrZclqP{;X6lXkE|mpq~o?7 z-wn3*Q|N$g@%39tlF9)%&V0`a6QYhW*u<8Mx#OD~Bs;?MDWkAz8VKTUrv}o}U!doh z+2|g3Qj;N7xFm|}28s0Y!EtgsC6bcwNEoi5jHt33^%X5fb(yVJKm8Ld@pM{*npU%{ z!DFc%<2jBrLQwWC49??cs%Mv-@4ejO#mf73&`-Uf!BLs3iPSnj7bXQdU2t7i?T3 zIgaYc**LrqWO4i)syt>5X{nTCuS9rmHP8LcVDU@<&oG+o`M@ai?p?c; zSTaGU{`Q+ovj3{DDCnV-N-lkMlMssv{^UW@0PReDm~FQ39J@Lnk}&-uMi^2zyj zp)&~BgTbKbkOqJ-`yw^;I>48+JQHYw-=QUD;}c9gA#^**Ev9Gtq}q1nCvC>S^T2M7 zB<~93w?w4!Z6n(;ZkigBlQ12^D?fP$yUj`@*GcR5N=5EIBGoE%?49Ee86fcVC>ZbR z1plY+d#zLMgtKGbKl7^ja%o0X<*uiTS@k1p%w*F}drjwXPd2A~C=0U}Fzpn-EFfh4 zh{BC&v1NGT$*E7rS$NSHG-KwYF3d1;V4v&l3;(;qEXSoiV=b16l8gQeyUiCA3%4lt zn)`dF$FR0C6OL13&#PRqw)5Zbm80qs&6Z3Ulz8cAajvdZ@5hLnFUq_8q>~5V0`IjA zg?+#GTus(OT7w|v%YK5`)#i}3z3}{tKa_@6d{MdiR+{)?tJ_D%!Okbcq4FEP>DF1S zbU>>tx_`t~BF5=-B}m}ITg&R7q-t0*N8|i__^||ni#Ce-F5eLws<}dl*arXk!@TRx zMjY33F!-2+fV~#k>>5Ppt&nOy8VGr3HW1QG%CFB zbaAcVv)A>hIIEM?O(X~Fv;N1`PGwH}^J(h<`>9>MN8=!G|BS-DupZz8Oz^{uBA#f% zYo>m)zKc_O(>v0;okEB%O~zb1w=P#T>3iqQ4~p~CpVfr+oNaY`3>ts>G1rp^H(d5_ z-miZ}nAvP>Fyo7bf113h4>{<4Vbvgohm!Aml(E@KnGsM2Ciyrw?r#Om*~yU>5tr z9m`Z~Cgzo%&>%XP)VJ{4g&U1YiPYF{UAA#PF+G>s(FpS2Kdoj>lVSG0G8$j%F2#hO_M!#MM0glIN;P!sC>S?6JKSOftQ{IO+csm4`m@F-=>&ry)RMsG zV)`UEqNJ*CifNI*39wy64hKccvR}&>JfeLYO4?*@^o%xL*LyvHI2H>ZPvas&JBE0o zuZ=mo`GvW6c?&MtO1Vqy?@r0J{o31mH7weqAnyYr(V0sV)8e?V* zPBdb~?Q#}rA z&mT*T1#YfL8Bn&)_JqFd9`DWH5WIZWoOr?!myem$|Kd5)Sz;xd@vJ*1vLRRW_+?Jf zDO?ey5Okmx^~c)Y8I>las$bk#l2hdFI8WoW5GJ^;;G#o6L#uUEAu;1Iq4V6v;i%Lq zp4B=3m0B7|UN6LQtO*3nyutu6Vko$*;}kTN8binLAz~A<)DXbDT7b!! zva>!+cy0BgRHof~!J-VO^{H6r4+B$iD#*P3$!7wJWxecXcn8-tW14Qjp{7yRsl5 zzv~EpiNjx>Zrz@Cww2mr6c6hceXpN=3##p9Z9=*QMldey+= z-L7qH-i_??i-I(a?-pM`C3kwHhRj7y{DjXJ7@k&0`0L-a>R7&=%t1!>osC0RY4{uU7&flhf7NWbC5evZtr__~w=;O#bqUyfp)kr5n2Lr!&Fp$$^tjXy3MJY* zvu!bHV!Oc2tXN7<6yNoq*CT}-RCc((^V#yuqSc6ZyM&Q#x4swkmqP;4bcJY^kAxo( zWzLK3g{nzih`o!a$NIc)2Iyf{CDd$6ZIhKV8b16I@C5FC3rCXOyK)XDel0c-JTZ4$avGkxNE&9s~?qUj|%8+Hv(@i zQ%1MOdX?P9w0`XrqBq^R#j8;d^auL|wJd`^mOEx9Z>Y!V;Zyp@CGY7zryxqc6sjun!Lt;v)oi;aiR+aeb>Cy3P z0Y)sx(2xva?r}{$O;g)d|Ai1GP$}+5%mHX<$!rCP2L?(llV$XEU_e_L$TnblWbLwL z|6f|g@3>a)D_lnTQTFgL&?XudFj1w!5wL%y92d$g(0vX#{$W9LUmV(&&T` zPoNUPNOm!KiAMGX$}lPXAf91jI}J@I?q1P*hCIYhbz*hYyGUk5^;z^88YN3r0}Efy z3EE67P?uN%jV7@>Pe%uc!c&B_to5!v)S;PgoQDZIA2MtOcPymgrdQ;izh}Co3u>eKh(vF)nqd`XoD&gLmyj1Ofr3vMyE3etnuM_4|PHh8NbpZjVm&oG@{} zIK0KT9E$Tc{bX(phkowkO?zVYDPs#MxOMTH+$as=Liy|xn9qCKH(oKXx~lrVS0*R+ z)RPMQ;c%|1GkHyieuy3zb}bhvX7A6v6WJ=^yy<5(#KS;#o$Bq$lUNUg#Qw`~Bjh*% zJph^Mmm+%bJIQBr6<0jFk`%68_QA_i-NK#kzvEm>eQPw4QlYss6u-MT=$5m|RUw`< znGG`}wsm{!gN8i}cU}(qqU;zI=5Xnc?OGcv=P?#KCa+z8B9`oGL`Kw3K6u=3DeJ;f z3dcYZhFJ7aD(qzJ%m*1l=}g?`$}s@;M?gw^qx;ETrs;Z8rm=3{53|}|D(sj-1MI~NJiRnor^W3x zt_(!~=TeobZNNmv@v?W~rj6InFOPVvql#Yfp!I!B^VlXjibm~sBV7s&Q5sYJ=kvaornOtpL%?9 zGP%X8RGqk?{Zs#rGK{xVBBIcjUV02;lpjgE9UUBz8~p0W|GR@pM{oraf=Vv*;i^4@ zY1Exg4b5_x79}eKeIQoKPeAgJ@$?e0M`K`~_c6w7_)?7OYob}>A7!pm3K;c(PB2OA zY(L4j)F9|(S1+V`_niw~@Rq4DN~t_Cm7oq61i?JDIF-#p zcNf@-%maBv3iqp}CLPo>zysv4M?E2NkM`EM^eK9yCzh=dSW`Ya{MnX4mEq=Wqb&kaj+Y0>qHo3vNmRc+wYX-&pe| ztJsB080K6=d^gzNoR@h`2?F2gxfUQh=Yjix{sn*)-@=s4&-A00Cba4=t&o0TI9Pbs z*UhV_bG#~Q7k+9iU4l8B0sTFs(WGXKCb{RgFGy$RzJnpB4AbeoHLk9GyqH~?!fqJr zhkMooz!m%d^&OzcKn)%RksxjzEZ;kS3)-or67szy9H84|e|$9!Ufz>8wR7<&QWq1L zLR!+lYUX4Jd@zUMi!Ej6ZOoJk>V0#xDpbN-z0)}-xXnjdK&G5ZIDx404xy;KD!x08 zCGLMzb8M+qy}webF84B7!5NJRhVO)C4K68ZvsO^tBL1fAdEE_KFfuS6QXfbuy1aqS z_7*xiI`iEbp;qdJPk{laI@WN#e&|DUHDrJ5LCh9)9!(MYJf4`@By-NiNTK{RcYUhI%sLtup zW3bWoJMkg__dSxgA3hX)#0W|YDks7#Lp%AOeMq%JQrCl9aF=>Kb;_(#(Lz~p=%EH> zOA#jKQ{j~AR~sKwk4H!6fjiss^$D=F-M`U6ZMw64-P(n2+!90T)AFwHOk6EeH0aJM zcTM9I0EPQ9I#rZPpFwbq*Ke@mwaPu?yx-3gQI@dcG5h$F-dEtFcH7S^+vm+rlnMaJ zIg|g*(*(ebTHv*6Up?>qFja;gXl$=c9pQXV!#v$Q(~b~&KVMr(ou;xR0Sc&uHg@j@ zd^_)nHCE+lr(FF-%L7ZusGF(G`2aZubLha`rF+NWm#e^kSH|#XO!3z={CW83^+TOH zosuXi{}a0BgEF*WOY_ub5}!LAEOTevqBjU&mwXTzZ*r0Jf4QkdXLdOEAWu|6!%LWX z@1uMS^Z5i&Cp^Ap48l}k283ggVuOXmt5>sec8@R*ArPIdzbYsTEmgK#dY={Xj|SHU zDk(9jxA`2O=v+$n=82~!zIVq{WU9e<6 z4d1%U#|XOr2uR+mIPP!U+FGQO4rvDFA4b4(Hm;{;n4rb5feXF}0ZqQPNAmY~%g{Dc zQF|HdGhUt$A6UrM|F(O80O!n50+>}^?#vQ5)2!G(%|KAnHneFRn}>1s0#<=sRLOkb zl^t--%-ig7B{i?wiiqX4kvXawcN5&d>Kel|mb^1D_}Ta|TGqPtZ1ipQlUG1fe%&ER zCRx~Me8_6}!!@#$YfVe0rmyeLF_Jvbl8mG;=5`u}zh4b?;rEr@YpXV-nttd3Ss>7m zNfPs6an>#j<-|Z!UX=i487T(lG7bf78I{yj3p^-*W&ckB98#Mka3Ym(xu*44+mufI z(aG3jAda|wXB0*VZeHEMLdO9rct^xo`lYu5rR1{CeZX}mc+dqhfRTlpXhAYTBc{tW zi_Zk)$xA- zr}>fhi}uf^aziYQcdNayc7ViJ5_hTgfnN+Sb6;@YUX=^(#FeHWtHtT*V|*9F?9Epu zO=?D;k9wU*H@l4UJ65pmr`rDJ78QYn)O=Itp(ty}y59?FCiEnfSh;vUj zZJ611e|P1Y;d)FvldN`^*YZWM+79kFAB=-e;$XnwIr-RltWR0^JK1du0@+1I%+`P1 z)}PC6IIko6E)7LCP#QiwD4Hy`eR)f$qJs5lqrVkg6{ifYO3p@)k4|3WEIG{=YhCCO zP?1-hd>>{wroxf$%GKgyKPvJs{Zao)OkkJ2dbhrprcl52BiTLM)lt*`R-gU~X!i-P z6dHPMYE~<>H>b|(b|huFL>Q0RKW>d;h%g=U>BEqY-jc>L%B2|APFO||@4j((b8*MqJ|8lUjQ(O_(2jFu z%(9};^u^@jVph^hoR(0X*3*Ck>U9xMZb=_LKPh~(8r#>eH(nRMup^IKVR=tJ6uaEC zLl8yWX2Unqdq~#JB}=I?QTYR^@$lQSa6dQf$<&W+!U*Rar${y}6L|_et>)@{<32Wx z$^3t_fb2tRyM<07wsBNig9VrAi_M+xQ@n4Yag&mNP2-dzc%;b>gcK zX8gt;PXuii9$n7B`h-(|F$vqU5N)ILpwfO#b?`h_9$cW%$xYoS?YbeCbO%}iQK46{O1PA*$FACbwWr4Uv&tmD!_rg zfT?(6Jy7Ua(a;o8oKD*vq(W??tjnu^)9l>BoieHOOTNy~ugJVAwhM%lvaUC#@Yt0> zwNvH+SCuln^Di5Y()#6GJq9Ml`h*u{=F$}v+6mPh_0;`(5pSBxoF8@Vzq1EzsZ(P$ z=kY06$oXuGIQhUI?)i*$_NdD9FF zMZ~$^jwI;&j{{b{W^aacOJF!azd1-Ggz-KbP~GxxU;Hb=``s>oEs1?-dFcP>&@v+1 za7b9@ncQsW(~T!|1kun_2>fP{fL3i`UB>X10%3b-<`6{`wU$8AG`m(g9oYAvy(7!+ zM8d=<=HQD;TLi=CI%FcWw%Z&Rar{O0)%xz6qJ8G99Py=*3e{2P9j$0^QTO#O(L<+G z?>UVhc$Q!U{-UPCn~Farw);WU>q50`g)zWP!Egnj#f|h3sMZ@v&6@!nA0dDHFC0{g z>KoRf^Kn5XpOr6*k&=dBvsoUCUKFeQc?LHNXpcDY@EgW*KqJhKNZ9##N~)x(zG^nK z*f_v3L-oB37#mGN73ThE8|^J3>)h`R@9Y3ej?`s%*Pr0k)Q;xhW(i|GxLiv4Ub;?$ z7CGPx?$&ncb(R1PC(~bIbFP#1UEQyI5AK`s9L%4M?{0v9S0rG$6$0W;d0rx(_kkbg zka+3hKN{FyHBLQ_Te7OTl-q|%eTU?Q2ejtfC=S#1P|SJ(DB32DFIk;NZvZJO@cd&= z+JvJiqQLH}M1U3!Fz2M+czfx+cYclQNwu#->hJRHhh%(^!F}y!566zwqeK%VvE}BR z653IbD%A-j(5&x@Ehg?|XjO25bITuLwN&j8HXV9M2n>MtHnQuJcy}$8W@63XL75+V3a0TZ8W9$g!}OpC<!+5rXY9}aQ2bT|+fFzrge+HXpXxOE3*!xdX{ye_7zO&y^3 zL(47;0HnFDfy_>Q^K%0P?t;Nhft0fTS8lQVd&xih(87H3Dya19doRht4OTs54m0Ne zhY%XiU~xDo|267Puo$O%J$k3cQ$vqkyd6}b!B7`SLjz26sqQ)ai{Ln6xGj3*%Yg}Z z*7qmOM{u{zRR@D(5@Dv+Ss?D(ZS62kH`Y{@`{|>wa}i4l}S76oGqy zY?OJyttjsoM;6QX6y`Qqooa|T&^lKpQ-{+(&?tWjrzIFzrOXHyFrQp`sjmTXn4RXP zFU+ge6ehwGP0|j?96#%GP9bJ!^%$4%A1$;0757d94ZN8cx+AaC9R|f$%E$bT@lzTR5H-><$V2^OSe7oaoT+IEc`V6lw z(`RfNx6x9c6cF&B1X7_d7SVW5U=MDY3@Z}g>_jv^`DMX`4tOdU-cKlR>3QCH@x8Y9 z61eNp+_jnt0?VhNYhHhCQU2?JX$0Y%nZ-X9i~15ssRJGbbP3Me(J@s6u~0y95PIu% z57fP@0pTGgix8CV>znfr>jdqx(NjA1*7eH(MaVq)BO9JMvR9`Vb!CVI&KE_p^^iJd ztQ9)kqWPVGwm_5y3b9NSYGVUD#(b!LSpdLyz3m2NS1USIaXH#YevAeV;MX|5l0yjR z$VOeTN%*f;FZ$1J-uI#38#8MUs_?Fihm2^$N?$Sb=?@aNzy#(z9Xf~&!&u`)L#SNz zfy!BD<811Rc5FbKM8j6LsSx6WI?4XL+u6zctw$9dQ(e(sU5n=-^M$evGpQEBZ7PhS zjUe{^M)vL+=Y!PSpo!5py!e}0507L{|9Z6@Vv;2v&gI_Qf2y+0#lHK5bW({jKcMgU zRU@+H)xWOFUl)x%Hmo{p~`r=?yaY)^R}o{ zITuy*0&@yXR{VWEOK5&pIPxRW*AO`d4r6@qK}kYOS*T0bY!g2cZygl4J|yh%zAZPj zD@#*90k4Jr9Vz!mSZRvCE6oSO2jDD+1JGM4(9@MFnfnbG|bBejTO%v(|93 z#O-nVnxNK0TSjuY9y`4PR@WOuKUyzYfQSOV3}NXL5XP!z%;j96v4D}d)aj?IfMtF| zG#@-IUy~3#=HJU6uG)Lt9(Pl0H(`D7JwoulVv{pSe%C>d2-PK&+g&NrL?Vv_JTLsM zX2s|*n%CV|Cn0{?I;sK2@GuxfFggjB3h=Unfn`=IfQNpA|M>_%CPX=|KQh}F2)k2_ zU2k=bp9?J=yEMTXzXB5A>!$@_xS34QFM=FwlO&0EnKTo?8KSnB`J}g731`F`W{@kp z94vGH=h1n$vWLr$#S|#3+0k~FoU~54;~Uc*LMM;Z9J2cXKuQ7Z0c@{of7@PBoz4Ts z0!6ASV2l&nn$)!oK?4^isTRKhvv>sPz5fanVA;5-UajqN*FtQ2$A~E6$M`2{st0Jb z#o9}>2_-s9P$ogQ6PQ`af8CkxG_PZ90dG*@UV-SNhus@QEgJUP6$~}iO3dO?Z~fu3 zvn04Em}$DuG2)FxiU`e?2v9p!Cko~$!@vtS2m_IPb}Ld)MVWMXo%yqTuelMzjZsik z%RsGXDvvW6#-!x_KQWkq7_(ZvOr-k2IcAu$^9BoE$I}qu6gs#$8V%u|DVC9q1VJ^r zrTQ+oKODV32%5n+_96tqAo&Jg)6q&b@rqpSDs{9=(|^|Z)<&gf)vjW|Y7L|4^Go>x zv|{x`(vm`8Vr$r$bXMGc$iV&2bK29M_MT;8R0ZoijUV8Y^cE}#eB1N?j}icFgNz=w zH^@CgXX`(wd+RJkC?*X#oH{OryO_J&V@?mn>n0oR28#?${Tp-9mz>)v&s@l7gdcpA zGkjI%;^M+gNRc3fE~8P>Po`N!J!?>kc&lYML=8xas=R^UkRfF+Hmi1v96NB8E8R}^ zm;MADn8Z%~nmdNRosbc4PD((>B-nayAgTFJLJhwRKDr`LT97rV4fQ=%mFDNAqp7qX z%eghtfqMkRwYG=QLbMYxau7=da&qg;izUeTe`9$BSFy6=9YxsU)D2s1vxJx7-ob(A^5=4j)Z3Dzx4z5VYRu1B&j3r%2q=J{pS2Aq8i2y zxh7Y`$^b&tsMPkQC)P2&VJ&5&>vNjGe)MieH(kCDe8F7=qKquC{d;czR|1Ven{V4G ze%EoyZE3_-wpvH!GZ&LL4)7Tt)W5zQbH^|QuugCjzHGnr%gvc?d<%2wFrk}4;lmL!>yhNW7}k5 z_Qf7R!m5cr`r?XpoT7IQVO80{t}_@a;XnWOcdVni<+dfu_H~|iXF89STYt$n2`Sz0 zgKDT_RET8)Pl-^F%E$ncZTf$L(Up`SS1_3!iAT>A`V3}mVE)C5w%|?GQOVO@{+}HY zPj%CJR?v{s?#*X^3V9#Y+Y9V^Vajhsf_7ZzPez5PC=QNdwpdPbo%ZS~;-eS>MKFSa z2E9wb--ZT{v(*Y^z(NHLl~D6=c>Pk(q0sM*fJk)<~EKd%%A&I!1 z3zN?;EYwn#SqbzfSKWEEH>Z>=v&|jyH9<=qb0{7cmQfQ5=}{Y%I%F)Tam2ScK2_k) z(u40a7E*xppPweCfN^55DmQM*+$VHAd=3%LLDa@X*crPZVf|j#s4;GsC3@d!F zbPPu-yHIQf8lk+LAr4Xtkyumor+rZ3PJ^~%&UQm^Rp&r$)(c_~8Z)gnpVwI4zdm8- zFeqz2)qZrY`qt=_-@IQ{;`#yK=UOoj#`7EXU|!mnTe&X7SRZPn%78%wBW9>DQhkIW z_-N7{o<2_&w57|cR`40LhO=Vm%i{K;ZPAB6Kl%61pL&0)6IC&Cl)kKDt@ZLCyX?B1 zq>)4FhmNfQ#RL85*J8Ugf(M6JRtE~xVCEN=W9$-Mjb+q&`7pyr2gOj=6y zw+9YzmNW1RNiYwoL3T8X)_pcnx;m*`f~LSzXpR<>RzS8-pT$)a{O2L)!cPUG(1O?H z5}oY<17cS7XMD>=q2%`CZiEw|4oN-ie}4}wzuOijo70+&qjgsaa=$~!^m6e8?KjD~ z`xV80%DIZ#n_|M4zES8WJ||;+$dm_CP;#qg>Rt$AWJ@CQzJHf=>b6_(>Y>S)zlKrr zTh<~2LAGlEYmn#^5#8s%b%Q04f8ATlak-cav_Dx;l>F=MyUBA6e1?iz-*L6du2)Ml z|A`x?N&k28A2nbyuU3+bD=v%)kMZY|$buiP0F;k z?FqKmWy#1Qd<2ss<}%t?VM)A{o+4lJv*)>bN$}CzyHp38bhq1hRgCjt4^_l z*5DrQZ2YnDNwI78!A_feoN0wAlG$z4Fp@@bA6v?^qw}udblAgT{4I2=<(i=#XN)IfPXzZf!j}p@#KjtQ>9K~ zHl0s(D!^jRdNfd%GPj|@0w@7}TL z&6iM)DSr~&R&~IfrTM`LdrPk3vRl`ESYy17HNZljz-3x=`G!hA+~L`fQQjSnc~AV$ z<|e_I6x24M^*S9&(*%XggEEz0X1r>@<6Fw^O~bxkj#cFlMYJ# zS-cRLQ;6fmyc#6>Opb_?_2BW&Q4PTgo6LRx@W1D=6npsg64B!J3gh;e4Gn+s#Lj3g zGDq{k>p>^Uj?eew7BUFQu#+P-t$dDPU#S2!`l*%X-_Qz2X;SFui$vw!vFezj;gzfT zX{Ktge%uooFez+X@?nh6x9$+&&a!Rpw}!SNZ<+|lZu8$za0k)RBBT~V6$=j$SWDDX|&2O4Sr{W(Sa&{mKmy)l6 zI+~Q<-s0AhQpG=UMvB8NPm=x9MBcYTN`u^@n0>BK%st#U9@%{@-2ge*(B0YCP-VWI z5xayDSXza|bg20M6&v6>E~r{6Xk}Bg10IYI8(}qc3APuq@}7?baQzcK(ZA0vga$Q} zGH?4O$j;=;0w>qrOuJg2#|dnnC8&Xm z{<*J&&9GuE8?V~->v|1P`^V3zIQKu8lLhB4-^Wn_*QYwS0Zhs>hvZ!_>#4Xy=VF>so2{t56Kx&0VsThRt z{Qwyhm`VO!sn=MS1Wsd)lbV&*=_luaYY9<-D*zxG@XZ|79N|J$C}PlHMe2RIpiyC) zLzMk$t>V2Y!4*UnqD-yHZ^*>sMMt@JulHQhtoTboe4L-h=Ex0tC}TW$1wyxTY)FXhtF=-wLhctK)vl zwxh2MBBeVdMbDo!=_1HIC)e&(xdw_(0r=0o%fhrCTKoboYlSI%zk@>%cbIBGvu=#+ z%nOeWs3y#yxEpz*5mD&cetNO)opCL@LA1d#`K9rt4%`GUa*|3~ccIzpcNTn3DqWU}d?=6H*J5fC?( zWrDN!WgH$X_kzs*-&q||lH45e44)a4>yXp(83RvKY&RwT#g^=^&7UYom^dW|W-ZRma1QBz4C+VP|~MZ8w=l6C2G3ZsHXtXMH7NL%Jz!{hkS>le0MzG9H!SGQn4>Y z!EJX=k8@FP)Q~FHr`C0*@GRzSVlh*%*K#$bciKCrf_DZ;V@ZZd9_L za39o_(e|G_NqE)qYWn?WrO$lMm~l7V1Flsz4V*IYL|Ltx2*-@B)$uW4#*x=6;MLVY zwYpHct1sx`c{0Rz+n=1T7sxrf{=t@&L9n64ey_U(2%eWc@CEZBsty1l!_Tr~F#K;+ z6%N6Yi^>0{J-_2|w7sal=p-o$%A*M}dl$^Dxy}Z*ilu^VybUPXV4*778Ura;85p1a zN%9omW8L)mifN@Qlj{WObT5>1#@*?`RqHJ`Qn3RA#Q)q3>+z@V?OQgH8vwFvi74kl zG&5rGyugg(x!~(9`F#hEQfUCIa^uViZS_M6>e6o|l(yf;&2@OG=Y$bk$H1Z}MmHBy zlg$Bj5}Z)c5l?)K4gxB}GYrVR&QVbK^X*NA`n_5m{O76=Cd6_&*vjMDT~k9EO(*oO zecV?upY?sSKr9rpQgh-`v5=WnU^JUsv0@dx>5Y0L-4oN(Ui`O8n2I_DG-Z^SwZ5_3 z9owb;&ZBtOz3Iq8Q;DH}vbCw#rx2*~6!wWWE5ic;0^9w6a(+x~{>|^Awm;;#Q@=4# zsl!5@&{83)qw+jq+)1al{lr-`+9aPKSQWLCJMzzuqjRhEm#!J~3v4eC>OIz9&Kns( znk!}~j@H&j7mea3CUVrlI5One?L38rUZotS7*z>ol;{GOU><=}McGKO zI*A)UBRn&v0u?7mx~!-}EJ%84FdY)L{;6rg=-i?oMhv}#TOLq!rl50FOr!^Yh~U zN%oj7V?q;q-sl!!Bg(A-=baG)aIoOUJSVp6K8Q*n$^4;j@pK^z5j=l_Uo*SL!jvLCE&uWggH8N_FZm49>J-}VRIPqqB-C262L1?l|Kz34)(YjM5(>ZeA zFaPzKi-N}PtI=2J#7)6~0$5r@oPC;4aj)IUkJNU^$e!$7^ZQ*i3MXNKVMPOS*h+RM z1_@V{&q53@Ee5n{=yAU`dN*XD-x??6j`S~W$Xp~+3Jc|hD21~K%glBlJ5k_|x#hq1vzpPQ^u(gl9 zmhQ9mvb8>RWWpvIwRM1}Aak&FcvAOuX58Q`Yv6mu=W5-pt3o}qb;v{BdHaY|N1?Ax zyg*saQvBe)T=YXhdgXcWrs;M`joQh<^T&G^NQb_XFRFP6AX~~upg%GM`_dfC&HEz1 z#wXNmu93oEgY#o$F{a39$G0hj@(ry=w{?e1ONybexAZW0h>FJ%)aepftlzngICkp0 z^zvk#EO649+^O2@0I{VOyXrA+Cmd!GyHW;(M*XLiM9-B8_)acv9^h=?3f6dxUCj_{ zL-Ee{4!!&#-*?g+FnGU|o!{eVr_3OD!NFU-ougXRLuhFdg{^a2$-jOqt z%2u4Z{@{Qu=DbMxBCoPzY-IY^kVmGUoHp9HOMqHezH!xNB&~wP9=ZET#o{2{k!9GW z+*#bT!@41`jKff|>9eGS_NJ)5x!d#fb+V2eGT3 z=?3N^9g{EnEV{S8R5jWumK#`!boBCc+-ql?ytq(iV2qwuBU`3cq6o~s9M05nvlI|-n{Pja>};!E z;qfI@=t=Z(#~X^_w}U}-*_SLCh9Amn?<0@erusy$Xuz(U5{NLX=j)NbT8tYYurbp% zB+QR7e$VspcDr(Un(XJ0Opiv!n(I@h)em6nCXmSYum|U~{Al>-A!OP(TcBe6M)26~2@GNDEQ#LF?;}0%7 zPR8NL!S2D>s+{Fs{sU=@%0MA-JGNshecq*opAmoLFKhp?#JH~=oQFfU2Yt(RcyyEk zxO8XZ(l_E)2C*)PxD27Jdsq3GHa`+-HuPDB&5`z0>##Fz?jDI_f7VvS#rZyAV2VxX2%Y>NOR2bVTN_cmVc6G*`&Bf1qmGjJ;zIHv0G!o zN__H+PR}>4?nID8CpQYruY52Od`dP|c=lPLGpd^9g`=2OHJ?XE1ZrE!^s^SRNnb)q z`Yof~0hOqlfe7clS%u7_H+joEc&AN>ri;RjzRj#Aqpq+U4U>>jz8#Z%H*&%1+{br` zMENJIQ;})=Bc7qXF!|5~tbLHJY!%O(bA~v*G(sQJOf1ebbc(Op~}9 zw`xTFT-c42FvV(#N*8l0hWqVtc! zEm0*x#0jQLwmLPfwwtTX(%QfaGKSiDV9uSrZP0 zT*PgqOP8)!dN+!xKUT!KaCsi{GJ8>7dr8{?lP)#W6ES&Tai0R0du@`KZy5V8>{led z*cqRddr!mUDc+xWigZ|~{AKy_OteROvJln3R>%6+#%dE<3 zAX%Ja`fg{mTr98zip;lBLRM4F_pmR~I}i+({l(aV&~C}gD;2A@R~-RNBq;g|uDG#a z5!inM<2>#>CI~&v0;e$;;a!YwxoMP*C2vz<>`4PggB@Q2J)t4UO%{@p5n@txH*Oz* zZ&^0pR(A z@|J#+wDO|8n4CmdsCC{T7gqkJZNt z64U!)X(Gi){I+E0U>nh(J8`SOB;DDd;38vaBGa|m(%XnGkER@h5G>)>i@4@i+C0&o zCz7?2bBta79Y;3~xm$naYz!Lb{VI>K#sGkPfa^M%9lniXeV8M$yxzO;`91QWdMIpe zEyQ^ozM~Zl|CrW4aJ?HY_Qk6lpJD9;hae|j1|;|r#XtE9CISj?@?objXk8O^->?zK zlwo(v%BvncYwfx9zRU45FJ#XF06H&~ULH^ZTo7sOH`n4t;gH@u*tYZ5+mo^W32;z8 z-N-Ga8N-)6BKcKcd5a_hkby=nhrm%W8(_j)$mTI+m;DL5#U7i2o@|XeJhO*C-jgd9 z&>Y(IX&H3zZ}_*mcP5H>9G=aJ|C^NrU|po(?*}g`s1C@^>HjGLs4%`#f6r9DLBcPV zAA?Rzb}+#|P7nCb>0-OX$-NZovGBZvf^K3*@fP*iZ0i+h5jm(02v?LiY-{?oAzqDx z197R=%TDpHb72nDzjD=}hZrQ<_8+Psx!VS=Lzn}?rEYjEr>Qv9bcmd*DP4ATLn<<=eS|ZpVNaiT1XR2)a)5>N{Kqp0HyQ@WgT39J{}@h<)HXtMj5zCj z?uAAd15HfolgprC+5kXCv~<6hLjSceu$f0vwgGiB1gO|CL-6OD4V3|BRfU4(Mb!dl zb>-qJybSfECdVFS)v|O!1{O2`Pq^%7%l7xF=xVQkA-$Dj`yWgwUJr`i%8pcXEd=jaPwP@!=exVy^x;MaF?JEVGz zbzf#5bU4$JUOlpiW^0TlGHZXF6g9v};wgQ~*&$TD+9fDU) zH3n78pg&-W9C&UE=%EI5yJd$a=IuGCP-7>@efGhFjWz56C-ZvcN2s#EApjsFJrrHwJr!4<|wY~RX zM3@OcGe}^MGTsZ*HwHTaqDqQaQwYBC*Jt#Sg~3O#Vzt*VO?QGzrukhVOQiyUD@$n! zE`!2Xj-jaI?#~-zC^fYp_mn$sbYH|Qp?%i{9@xhW=M1bh=f#$QuRjQsXWFvP8u!n0 z`BD_cxUCC)dFBxH-kGph%A-=OS?%jMn4Ag}y2r4jKrdn|{~7T#`SZXd(}QgbLI82w z#_dAH+`OT_E!sCAnHjNp@s z6F#hxDq4faTVF}s!d-&OnxEO$<8J2%0h}2TXz;UM8Z$WHrkLiZebHLuq*13oKkC&7 z9)FpvI@QA#NvJ%Sax?;kzP|Tb*b8iznX$q^)H;xQhv4oYuJ$K7+9}ZK?Pp!CDG=Bm z8?-enN+o}3@{VxX9LT35&%W@jRz6&iJsrmMVp;Ro3#Bs0=HKpr$_3I&Zi!cbu~?+33ypl*lN!lE4z{2*gTT-lQOKuqwd5RthhDBPLNjgIAdqOPy2abs;5$0vn5$$VKONYURT%K}$fusZR zY|QbL&RC0e&0C45R>)-z3s7T9L;8gZu}`suLi^tuFt&?KIF1O>y6?z}Aa>?CuA{DA zC4`<`mB>dwNJ_rVbR?KVlj%&G4~@|Nr~eZ~h5_)K zAGyQahizz2Fs40R@WBex!}#V#m@j(g7j}b20bud>cUD)sQ0u2|v}W*2iTaaVcCR(7 zSSCJTk&ov10`Syh(Pkgod+_y^+Fyu6bE1-lpyZnfuVuoS?VtI9Pw;)4C-lqI8Ha4= zg))wu6OKM%im$p0LX;ZKP3&k4AYUYV0>+DG&{6a6!9RC^rb?sz^CY%|oIYJ{J;^s7 z3Mh8gP%Y%;Gcn6i$W~6$s+mjji#^qG1yhnI`Y+a4`xcXlv}yIBnEgV-e+QE%UgWkG z^zsK=i;;it2{Z#2JdkJH4gZ#=_s4rg26d7?fVR;5*DdB4b-Eg*$NcDHjFe`gY>Fwi|$w5($r^0qI;?#>sSbSP*}dm*xLZnz#aw0x#ZIQ&?{p^lXxutsCF6r!(Hi(yFqpl~;swC*<)PJ@0-3}R0a37Sfwm>x??b)&V zFk|$0ZWneB1IwK;*1{b)j9Hf`;_##~LnITS$(b8A1p}8u`|fT@VG@c#>h7)}k$(fm z%I2d(F7Q{Ll4x3p+;fbsFhe(_Un#Z4@%5$OqUfvM{fWc(^Z(=Pt)rq`+xCBAgh6r$ zsiB6nXkq9YS}9RbDG3pz8|m&w1sOmAMGQo`Te<|KYXIq#?%y?gKil_t*So*JwOp+E zN0^!WzOS>6<8y|yM;D)SXLy)SbQfp*^?k|WLD8NIsk|kpK>qO992H^q$mUzfBdh?y z@xEr4__O;LR_Sr{W?wWvT0OEc?1`1mHdj2{>5=G_K3-ziTP^(@wbejLxva*M^QiZg z=}aJT8SDnFwpK(WRw{vgK5K;)G*97U7SilDHonYMVh-}8 zKN(aMo`DT`c8@O9^m^FKr{1U8t;9;)OO1C~pGlcZcN$^dSwB3;JLmqs3*wC6SW6kr zgvf~Rq0iwall#OEFn~|pF3JPb;ZmSz+#A<6z52AFE@%@BWoCPb7di!x4loyIZl}H5 z+WYL~HztqIi$HB7_Wsff|I7Pf&MIzHr48-Zs%-}ahw|=!c`YGw;fcPe(5Ps6FkE-) z8`K#HR#x=Y%@l4`VtPRG1|YYkUP`6aNFnR-=&xu{clD~Ftx0hP6d^i=WTE~JCQSI*lG@pHQQM5EYXgkU`TGs%OGK=whNZc zj1k#@%(y!f-*9-K(`-mHs-V#kCvDfG(8B$qSMnyAwPI)*E9 zL4GF(V27QPsfU*A9nOqU#rq?UygEc>%VXQ)P70SU&AH5eGp60X04b@%fvFsxYx&ts zX+v-0W|JR#@v+Ynu)zj*xKMP{fnv{nOX%?MjAkB9;bJ;-NRbiLIie>QOV+TiBX%!% z++XUzHn~apqs5H_4elvb_MQur-Imw&Lj$NpQ81JK2*Ed*5nqRst>e?~nqt7$Bi+B- zR}|4k31<=Xd^uac5AxObJ_IL=5q@?hHlzL0U>hg*eweXArgmq9@Z8Y~6G7lb*z}HQ zFFyPkvMdix9@t9{ZlfLETnRTKCgR?P(fNYB{y~ zY&8&$gM%`){pKgvKdK?8+a|f`ZcY}40Fjz8Cza<3t}%2*vU`&}5P_}luq{E&t)D+C zU7w9VNwc*9rF`kL{0goHshh|7>y@kdB7oA6oP@~R58uJV=jBq21>176Hd7w{JqmB5 z$E{rjhL~9+N5w;MvPcBrIkIPh`SP|+saWTw+S&<_2LtA)(^&DT`4`a zcT<*%)^1GXmA!}y z98*eWH69T;oC2|JiXhNCIXu()f%vwxh6HBau_L_5aI3tc?XKA33{Hrd!r_As5wvlg zZC^WAdDpb8WYO|7_>R2zxWid^-8qUn_-wwh) zl_HT&hFnio5}2qB0|PDetJU`&JTB9u&12+_6s?YpVwkVVmXQGEL=@9c(D{1?k_CqhnEph0~Ne@f=6zUHSoK8j76}&r_zS-QPDA<#Bp@u7otr2wL#&Jz$7WBYE)G7#ifGLgIuEA7FFLLeBUYXm6a9 zZ%X__$?4BcZ?o5O$o6ai?8NQ4rI_Te@uRzbe-7^diSlx9fD+wK19^3M2`oR^z*`05 zmy07g6*h(|IuOL(`@d2NMyrlxJi3b0Y=)87K8_mraj4yS9E zK&yGNt@h{JlUN#xn0<-2h_$02Pvx(dkJdoLuWM03dm4P+-{JM3hiVOuL!ilK?i~mI zv~J%+Oqis2|AlLb*=q05&%m$G)M>w#2mx$nraQM5(;uPV-**c5sRF?b?!dND+jL%2 zOKdk&8Gf`2M2~?=0`&jhQe}mavurHz^N4+~UhNj0Mkn)H{v_{Dq@ru^J_WiHL-Rv0 zTEy%|W6$G~h&egHsW6=PuqwtT>hEH(m5xE`1XiRVGUZ|Ben-bNtD zjJ(!wndL{DRPcnwCYm0BKY;D=KEbADQb6m*`IB^A5Ar(@oI2IWI}2F}Fr_@)h(3d{ zFF@=Knku(WfV@mld=T3W0sX)G2k-nqrD2Kv>Y^7J&cJR}7j0oZQ(JxqQykZ`z2Pd{ z#J_5m$RJrz?yrMQHv%gWw}2&Rh!;G$?%a*s%dIM(W4}IHZft^KR!3)#K7E;_k!YE` zz4}Lv@V8j|@1GDTtCJB>>|x_pvjf}uDR|?vD6562pwRHqcj~k!fTi{sG+KBQ#ep1j zzMc2d5%2^|7<7TITlCnl@HWqwxB%AzuyT!?^+X8y!~ea z;aOKsK^~7mGekC?AFuK)(trlRqssovf3@kMR3=-}fUFdaz@*)J_5^^ggmfPNN^=!= zDo*dK%|APxyC$*tG5zch^YOAdl50iE1{uFrv6!*J0L3olwE0@I@03dAp}8=_LDxfLnc?}qbIajmy^Q;< z`brR#<0ZHkn=@Y5xsy1i@?Bqwc(DHK97;L?6P7T8C0$1GFI`%Fh0?!c|-(n5OmLleD@goUa zr>wuEv!))_j&cHR`CRWO%bh1*i}!~&(Dsvh|8hK{-ZT^OyQ|;FF4N%Tx>d@z*M2JE z{0SAvo&`E_-L*ivA=I^0zabw)g9XN~*SH4JWyue{DzZ#6H1Ru8Cd z`TO4{U9RC-q=bD^eW&3^G}H(KKM#UofiT_-Xs0VvwQ%S+zkR!i|U59cbA^Gobt<26= zN2=E~QQjloGZy^pTY&Xt;D33i329c~1KJCwMV)(*NCB6nz6QmUqyYJUG8V}y@k7)4 zq-bFlkB%4jfvCL_HmQ=1zP)CAar|m;V{$xV3j}QxSDI3O6yewd%k(`_$FS$pB=L<( zbzt@RFUX>|NdIMQ5l_d#@83B{N3y><-1|af3m+=T$j@p%BfG*z5YG8{&@9W3q@Z-n z`jhD|5qY{!KathA-&SKEQX&_P(LP zpby#_>)@O##Fo(xBs|y5Sdy7$ycOwVi6K>r-6h!vpC6WVh+U0Wed*jfQ6kzu3UC`~ zyyFTw(dPz^-)S`b7bUS3Jyt-OX{kquPn6XJgC7d=eYp>eoT3zL@<^&|UkzZ2j|o3) zBA1a&x^q#wks@FgO)U=18C-fCAK2FG)W5>oCv-@}Fy-rU18iDO3K?@(wU5?A1Z95+ zxK~*Vi|CkH1YIh(sW>|3lpO6q;-K(E;r_cA5`R%P4F(dr$SO?(W#Hv-qZ<*$camg- zQEp$e7!~T3oYSGkv}p~7ZuM4#T$=ifJ@d3T<<}!*haiP8jAzSM(D+4s=krC*0?fPC zm~V4ElS5DLoOO#&>AJ|AtIcTrk5KC$87PR=JTN>|I@MK;dTM8D)dFm9<6TQG8B$)t zxg8=2YmYUF>SGpx+{r3|jltTQLbo)o+z)=3m76Lmd9qeCY26hu;V~D(`ypV-JQy1p zrcD&$8J*)$FN>tE4#4)VDnuHB5fra&rRW$(!3t=H8S$A4g*{*U)+)X&E@|BB+ZL#% zMXQ0U-6ART;K`_HUKdf~!*YkWk7%T^`l6N-Zmk4Qjaa_V_!N#)fKTWiT{htqm5!Zk zI^Qf==@naqmqjNQDq0`|fZuG`!34T`X)UO$1(K6+b^uDIag(wo)~;vo3bB^2KFK97 z9TGxPI7_A5`ZbvJRc^&Il^dX?8zFToPvU-=o#CsGO!n-xr)t^Se1v>_I@{)^;gP5Z zbD!(?M;WG^KqtRwXNJ9me9DimQ82^VBlqZ`RD;jVgYW}AhnYlM7BwdcI4MO4eYpTn zq#4Qct-j_@FtWjr$9-YLj~(e2A@(hS(as&R)NF8jm5tn4;f)6b`Gia^@l_f@85!Sy zlB+=q?DhF^UPZ|2KxaNHT)QP1bRM^uq#z~vXKD$=OziQyyNcXIWm(sT-$N0;@ z7t+fE$)z;^bwU4=g#`$z;%HMPylQ(2S>s5dEmW0=3zCJK@xEC5*wsl1W3ezQP=xHk z)_tX{<5`@$1oAh;1Nr5povXV1GJqo8?iR`FsF~TAw z!YZ21Ks;vOM*6p#)DTyWP*Vf>Qc9si(Q2(c`^2uQTQ;bkN< z`g*OZ#76?7S9RzXv32Bj{x4NpKq9xl|Ls2F09oz5LG`RK=qR)!yg7$4&aA6JkXzlY z3urDyosvKswSs!1i%?lnNr)TUFBgBC+UBwFHC5RnmJahO+-BixM8^}})Q5AsNi$p! zqrgHirCbawq&SB8*TA|pFB*SIMGPw4CMWnTpr;asd2sL7X_?5K}SY-$JGg=FP^%S2uOfkc=p|n1AGhlV$%6c&GU_rL;+>eufTCP zOmrp}bTgGOC9Q=m$n)!)lpeUKO%`0lfuC(A&iQptg*jZJPbS53apa#~S?|E7AgJ8R zx0y>c`AvzrVa0M~M}o=*oD70gW)5TUrlRTNZ?rF$HCox6~5_&|le_gC~9qkh>?&E+rWRBcLJ)4Y#qFQC#sL ziBr<=O%a&0?{+x#P-|?<)xG?+xt@wFhu(CHM!2U;^HH;G{L_b$hjgrW>zK#~-UuMs z6)CSKsRRMLz$uIvc<)k9*iXzUqr;!{7YUWYMc0q8 z>9tNtl1L3;#U_M(1L6b*3skBoSx;I1gQAvhwkfQ!=eh?r1*3?8g2(;5Pl3GRsjUu% z%2sn%ByWl6DdLXgj$hXo!QqZ-ma|+V$yi8Nr;d)$M_w^Jk)6seTQImzu=5-Il576? zD^)L?d{#(ld=1}>j9wu9jbJ9P%*J`DLf}3Abm`VcIA?>rRJhj)dpeUUyht&Q^Z zU06w)HJ6X4UzS(pTsw20;Vtu~DnX>2*-YmfVYTWOc1lYU zf}yPiZw%84Vc0H_z3xlr8ze4by^5pCJus^bZ!SWDW%GoYa-IrVCe9vwrJFbNFWDm_ zW@H<0g)1C>|kHya>Il1z3(ptcLohzcXjP*Fm0op7!nz1j=xt*OLM zq?0|LEU2_U>5xS1RELjtsUIgFN{?UN>3m8ADLB&IuN{)uX$=1zKG^l3Q8@po%~_|4 zYCVP>*Jm8$s^}t|g@GlZuVbJWMEY8(IfSaUN78-b%=t#wg-{%^Jzk^l>9(?{p6VNl zQQI1an3>olg1yC5b*_)ojB?uM%qHjP^*M%}w~$1w*DpMMT}bdgd1xx>Wh(l6x-Vp> zj_+G&!60C54-?-lNH!vkd+1!_kD>+f;iASp;q4?OTg)glxtFtGIomoh;+Cs# zV~Yv-p39B*z`~q$Q5q#3l5XwccP?iVtjnAV5#+`h&di_69Ulf2=m8Q2Fhqr<6NlQpv&yTIrtCD*cWnQ4~Tf`X1d8|D(cEh>9tO9B@ViQbj@s9=%F^G z5mFvnZdnmK`?AQSptmL^4hZ@zql>b8KG;$z-TZdFa`Qd0{68b@f8;#aZX&A!o_U&i zM%+xs2eA&4wCjmKVM*<7?cM^;^^Vc??0CUY3iVg1$!~JN?7gEqO;Gbe&2v{#O;wbK zgGREL*+NY*ak1f>syTY)ZhDM!s22 z<9%Rwp`nn!G0SJ1Ll*>*q@h=Vv2Bv}%0Wy>5=O|44$~Sa@>XHF^b?Bo%x=(nj}x|V zGgJUO#hg@FThxfUOZ5f%g9FOz_>Ld_DXRB z>29K0=WJv@vgmMPC>n4szelrdhP26+095V$Tn(nyAlw^)C8CHCSeB!{ta)o^;!Xoj z%A8z%D?5!ZQr#h=roPQTdM_w1K8vh?=cKKks#hN^UhYOW<4ERsxmxi#_M?P^2&m=)LB^f%gQF*+=vzIpk~Oh0Cm!Dn z&t$W`S8I>2|3?P-M_LMAaBBG1(=BAD?Ck7(=A6s}jGM1~7OASnDvy|Zf?wNtEpWnY z@6rdzIif7;q?RW&kr)WAL9--FgEDGdNHqLP!^Ewiaij#+qcJK;&n4KpJ62q6gHm}R`YDsKNAE2R+Ku2Ka)oQh(>5!BZZq@6>?*3d3JEd2KqobSgs zz&qh8zS9CJyi)(tMQfR)l^D(Jefc0T|GG5ainLB^IxgifH~tf3lG~JVkuOCII{_Iz zNup=ZOpU6pbShrC-KlH4TXAa%9q%QI#lzxQs0W^h;SR|~uirV91Z76CUHDdQslN1& z>g_eY^{oNxaaxv}rO&*;Xd%}*xAu<-?N5~D2*PRP-w8MW%|W4jABhK@HTT?;^OJts zTGlbaGHTT1x|+JlkrKH_Y&N=4$A5BT!`88sHSM~*WP?rtR}hZ#EK-oaPD6`f=ZoXw zLevf&kA!-Kd7)@Lz5bu(TAg_KX?DI)Ns;QO#GcM;nPe|2N3h*=^T<7yR=H~q!Ou{g zCdcWw22cpl$-AxE2QuHoTUKDM8&QQ-H*i_TV+;6Z^0b;kbuP~RW?mpNlTKbT&*QQnoViWjzZ}?N z3cvY^7DeMNT|bc5-v7bExSllBCgB{Pio*@rWIF+KrSe7M)tMg=RD|@Thagb|f3}Cn z#^J-(Vje5Ou>nK}ww16YIpCr6T_EG9q}k4P*nan7Oj(mgo<;WbBlBUIL1Yicn(1Fg zf)oKbxC?huojI3+cQ*?gsaSufdp=sI0w(DtymCNepU10A!6)m9p!K!)vwxd&kQrrm z?T#&F`F9`Xp{YQ@>X+~5hQ5W%iU2-}rNYg}wIA*<+~Jp|Q2RBzfOqhOp1Duc~RclZ8Wh+D(FY8qNhE7AQ4TZy+KV9Qgc& z%z44yk<7swS>LVjm2vhp=S5&0&`juZj-T9MtRP<_`@GxApjC}1lW84orRs_1Wv1BF zV%b3#xwZ{JrT9ds8_%X9ghQMgq|`;Kq|W|v`&XCr2ghK#xFvPcL)}oGn7afmCJe2i zx}n+li~dkw5==@1f;m`D!A42Hr2&_BP_rn? zEhMf8w$Cs@`G$iZH+YHP$hd8t`DZ0`o)45aM_wC42k>q3ge#AcbnR)qVWF~zpkNdG zfuvI4;dn?7gKL-bkdUjb`i(4*2V01U<$ek@zoh*$H`Tn z+el|?)6&wabkF)75&c~(uox+gkntcfS9`;J44QUx!5(N*9t(>7+2%QCPzP?HP0(ij zKiCg3bpCV13ETX0$2@H#89NaQmrj0(b4Q||MLb+Zl2R~f-Y1Stq8--bJyv;nsh^&$ zV21>AJoE4}<1qgsxiabb;us^K^Q;2ZV&rRC5rkwfDj(w=9bEUNNpEOn{^9;MBk|gT z^qokC+_678{XbJuiU1u=|9XanUnxzy0b=jY-?;iCv;rZ0;yj$~lGWr|HySpc4kO~9 zVlyS5=dO6(0btWqBIS==1PlROZsgX9Ig-PaT5&YCk(#vqs_f;6gDu-<7*q1kJ;#d>nmcqu=LiB}Uh>p^crpZ+$D$ zipThyVd(ONGFPcLMt22h%yfd2Nd29g1=#14vY2zJP~L?~4wI~XCzZcd#f?|ge-p7b zCTZgnzJ8OfRhhxwxbyi}>T7$q))1<-J}RJ8-|k!sWL#Z*bnx@vWC*~7+;vruPqvTL zx!~`+7}Za8@gG-4ZL$Fq)N9b3ZRJ(*?9oO*@Zkx_?WzVhQt4cWREagk zlduBtH}xb0}=^DhS}&klrVRP~CgPXA6K_z3eaac*IL*g=&z?tcD9@cnLS zr5t$oY{5j3_Y$(?`_Aacd!L>Fc~c2!EOKTy$rf;yh+l06^V63r%Wb$J9 zDwfz2Hgi@kfbPZ?AdK&W+T&Fsi!y5mB6#4K`#Uw(nTMUT=W?Dra0$43gWs4SRnULaAl2ouOXI zT(4@N2C-cm`DRxyU^7yH-njlMhDuf4Fo8WzQZRcG4uEA?@R;7S8ZersxBc`&k(CKI zYtLsW5I~grUPvH;WZxFRF+hXw5LMq9Ce4MHES{hb*j`|HObOPU#aks0$0{l&-^j2i z9&Wt4Q;fA{+1G4kPfD=*F)hxbxxf5jv;6Hk`s-%}D^LQ7mWx5)$Elh;tMtmw4f?i` zvhnp;Q88Fk>@F|);D2n%A$qM~j6O%eJVQ0X!p>F6*fy*fS+K1sKV0 zx?P*QJvLlv@d_vJx8quU!#jftr~Lb8e&;(Z5@0&C4o2YWkQDDFGlXm&RRrgZ;vqm^ zd-=3P1d5LXY=~Z{IdG|5`trKGceBbgk;wsd%WLkaV#vp2r+H74=?|>rzfQ~?D&*?t zA`@=t%5FGP>f$PY?4$jky{8%E3+K)Q3(Bck*MMC^qxuFrGVG4asSp0FH=gI6H$eX-^0*Q5s=E+O$~}E*;xH7i0 zDf0>}RD^e!)F8Mh4}$IFyAdZOzzCg$WvX>{y6j5jpSA3tTi#!E z_X>f^q2*_Lx}b&H6w@yyMq6|2?klV=CcAbMOx^Q0dI4(07WggHmBy_t{yG%{5;^PR zo^(XS+hze**f;T{Uc_l#epWYun_3Df@u zHU3>(Q40hBwb#Qs-?QkkthmTn7%paS$S~Ab&xf-?T$m0Zi5D?@q&yW+>*YHH#4T zK0xbUov0|1ka2hsA$tvP@R(WBqaYm^A;G@1i)zTvB1=s#ObD$FQ2Mp(sJ_go2maZ` z!Z$N2KY>wIznlVdRLdIUQ~O{u?Y({xe(AS{xZct!1uvx{-Er_s1$)fVRYrLhFcmIvptei$DGq%CLb*XKLM24ZD`IQ~m$eP4;rw}Np7k@cY z>*aR7hRPHi+#Y}Xn3Hkj#LH`!^bRHUu7s1S`k9_!VBd@$_(+!nRzxriC-@*zRWfl7v9D=+nDT5n;T;G+Jtr(ms<5x>n zF9fn{An$IX=mYk~wD$LmpF3YcO}BwQg(MWv(t9euWKG4-d4arb9Yp!w6iw{|&Vl`K zYp%*8Aii+>(eTK&4KFh(q=nLW% zFS`Oj)I0Y}$8uw3qg9rgQhT~y%kN$}6=wGLF9G(LJvs@*D1NsEoN{pYmwD0?O)r*O z4S8gKS(}dQDwMWgfAmlzJ67ReN{`48oCf_ap;{o>`wMi2#mWG68}Cb>XTYR-pGX*3 z@xf5i0tAPu^_nt4iI+cDWm}ozzjvDm zG;rUwacO3()qb$}0dYw21gf}oK=dia%25pm!utU!R3%kb$=WfWMu#!sXW(yYpydT3 zf()Pi2d_M{fgez8lyotvp8@P&cX+@$`~E?%swL&r5)n7rBpLT@P8(o3QGO~ry3r`( zwkSOA+S{=|+a$F9?eWQXaicH)U>X0r^@iAz!Rtz*j5q;!7{Ga4L_5baIX1%@K|GIF zzyqz&5`b15y(2m~FQ|oO{QSlFBFHC0K*o!$%&;c0FZ$maMJ8PanptikBz-v3wYZcz zEFlus__Rt|b5USjg&06^e#Tp^zW}$xYBs+z*2W@|WgKC`m6gHAVXU6fkK++eL;WD` z0K04g%k0FSi+2A0!jk~`lFkH1ar@v}SO(^zh82O@{}78sjAfE|^79q=`;!)~_3g6& zedUnFm5Te4*Kj4+8nIgOWG2pk_{$?vz1X+q>ww7Tra?JaG6iHKW}AQUrob+RQ2={G z&ykFmTL0`YRKSYxCU0q9iDsAv(z(|$EHYCTIc@K>?U^mZ0H9{E_{j)R->l_FU=^Z@ zhn-nO_2H|TDu=gGhk%hxe_&WUO6BhNjP$jg6~32`y(Y;kXr}$5=Gp(1N$`Nxf0xie zIcvw8vi5v1BzG|V?=u&>GP#gq?$Bu=)pEg3+4=!Q4HukA?`Ak!#2~?3SppGnHo#j! z-%+z+LfPtQ0=sAjRX2e$3@w-fwx{JR>!U1ooy@vdz??VM zvV#@W_bwDTS9SycsbQugGi!~^jaj-_5~ve&!$vt+*wibzfgbRBL?p40LU8>I(S|h3 z;=8T@Q{X6rLzkQ*l`Ip}o)^&I3F$>0-VUi2#si|9C;ghTjbI z#BgF5C+~ZW^w+Ntr%4AIWrnY99`~L-d%t&Ka-rY)>BXNk>Azo`EBoJm4xYdAn#0*B zH?RO*LVGJGNds7=DFWb>taE|^cd=@jPOclEnpU|34p90E@efzKn_!PAe;^+PR3aT< z>$io44_KE{61h#}l@$Q|;8*sUbEN{x3tO;>L z61b+Kmgt#eO@3j$z5s1JpCt!#iTKDR$U+Dlrz0J&_%;#mSOB=##yz!YU1eEQS3F?~ zh~s2GpRqP#bp3arq3lh}UTc_O9keQNLx|DhV)w9Qlie|REHl(YT+t8g$LWU}uboj- zE}>-`Kq|F)A9Gl4SH9se!$YP3Ul>!3T(7%0?->pBqO%On5`o`$2xJq29#94HHF}It zbhWEAoEhq3{q9xA`$*aubJi0HklST^<*VKbtQwvJO`~4LjVkY=^U2(CjhNB`OD}Ir zO+V_*(G>gE59j~41`C(XhhmjssaTPyW$~MYRE?$d=8jtLp{j3Xwu*;tgh37fmNPaKp@@G z8zoNl>rT4MiZKgtIl4cT#LVGceR~d8 z2e)o8=j|T}P6NUBTCBu|1J)r}=LgI|TU9AbJzJ?WBYB7%%7X7w8OE7mxBvb^(*gv7J@@X8OzSQv zC9qBogO`B?f0-~-!7hXYJoe+LS$Oqszxoi+s$s(sFl^=Vygq%z7(RV*JY08P>J2oI zuERWwjF#ab(NNm(1JQS*QUPrbC}K)`D?t3ZDX3o%r=6|zYyR+ejW)^yojP;(3hFR- zGxM>;+Qaq?&lw4?1vAjA^F8}`@xS87UzOf>^%(#IcHDUgajH39`Ghs4REqH~7!pYm zog4QF;(4be-QgUULWryR)zCLYQhcDOy*&Pbi#jB!>C z)iD~a)huDky5-!8(fj!8^Q&%4n;>Rcf9>Qq85$;w3_HWuE(nd!kM}zp{m5CQCwPH! zp!EwQB!V%36eJscw@p20sUlV0{*=ZdbVuul|uaHnkeptRhU9Y?@khw3ne4ZKxdoa#nWm zl*_s|NMm9Zbdx2#t&+x;FVWMJ%*eXjlz$4A=&Z#nkU7GGdMLpn7ROm(#PboP@gP|k zF((d1fvUtN>3fv^fL^`~#D3*b5)E7WH_fqGxCxp)zXqsXhAo0sEW-~_dQC)@m*7a= z@|nQBv_6^Gq-Q)&NhSd0*zMIF{}0YOn7cyPZQxYzU=A6w@Pqq(zQp1z+L_Kwv!AsA zMSYQ9OJ6xy*JpkzkZPE`hYHnL6itigaBWV!Ni2xPeagY#6I2B0Nw*CoFu_y?HC{UX z&M|Ew95ad@!5;a`NF|O_u!VjyZo|9Ck_G*E&n?fKrB-CxZnLbll4W>X&2@g<@nhZp zy?YSx>Ufa1{AG&!dWDRFGGY4b#+S}OevPFp1W?gEirXQO0qvPa0U9v~19uc6Rs->0y*LMKy!+r(91U zy8q8mNzl!e!Xiq);Z;mr?Hz_dpf3I4dGonu{_W$|SA?^ILGNt_JWrlaL0h27sYpCl z|3r&7ZywH|1}I6RW)8uTgT&CrmFO9Hevin86K37O&o`0s=*8p4X?E5{bo|9&7eM9lo}v{p z0hDEY8!vh&;vZwwq5S3ZW54Akb+o8S!F69Z~ROI}x6zgx@g*tjy;m z0+hHyKc6qZ6dGz$FVDQB_aJ{5EBD0os~%x7KK z(5wIINK3{&wLK*2DLJFPktBKP=6CZtGF-#1WiMiJ;({C3Mxq3ZAonaxyM_f>;8U7| z(x=j?d%4zt<>&?-=vu9}){ z?bYOI-e$~;7JdeP%6WV6HVp?tMIbZ-S2 z-I^l_(1tl{pJP$(=p}W?F6S$QS%?Fg%^_aagmjuQyeflc_WC&Mhko8HaWWE3$?afQ zz--+k{;F(U)x`LgwzfFH(RZVIr>D(MXeOJDqI`{pH-spOzbMd)}+1-FeHhgB( z0al}rMHUM}_$d%8nP#Mxp)==MgSg|fJOPJH!YJQhE7bGmW9&T#Pnu%dS7vfec9!Rf z{{EQCCesGToM{FQJ7TdwD{m;SL}97}-Gyb4Cdy*C(oxSQ%*?rG&TB!LJ7;(Q$q;}U zM|l}JlDBlD8){m27`P+6ix$1Qiy2%y#%EeB$D6)x+NQmjMoyE-{Y#1PclVLU43bi1 zCVeaAR&g|2A0|@v1{lkjJ-%jmJ%Ksx!y^b1;gR$N8VVhOzDy0@lAe@icJ$yy1U`8N zOb&Tb-kh8ypDCbobP02)@+%yTNm=PUuC<@T!{Y>v)0o+={NQ_v3P(64RmyZ=F+H!u z8QD_Jh@zH|*W$-AT$TqPV%VrNQCVSAL%&rGk8Gx5>)?1g}(E$qg^|()I}`(DaST;dbF8214hR zLEI+_<(!l(FeVK1R|B{d^6PSc${dHBCz$=f)536_qr4YLxhhK z5R&VtvduXA4tR1nof?okG(V$NXy&#RKRfub?|6{5?70AUBA<~`ZT10+Dgap`Tw?FM{^GUF|;=Q~`>lExOf#`!!7K{>MJgV3^{^+UnnqlCcO}mj^b>jw6GNUx*iVjU;re+I4Ing&fEU;9V*9voEr*u_dAKZMxUe!qY} z#?oZ|fBqd|YdGJ`y3byk#UfdZKgv;Xu67Dz3MwHQOoYB-v7D?14O_;tBcg+?G_m6Z zI8zI;^DbK|lsI-fPQ-hFxK^#;U9MA(g{Qhn@sTOLRuS2aC z`ZYg-+|-dETy3g<_0%2ZJ?63crOe>Di*rcNmjl# zZ*<~ibNEbk*xfIL2ZXPPsJM{0Gy)Th2@7z>k`-0ao})eOwY;%YU~)$f;I%eO1@kal zBr9yq-U>v*dD#4qino#KeSGK}Dpnfl4uWKdf|o6IGGYH{0dlI9^*OR}EeOjs#Y6{< zW~-epD379wAZLom)(3%5PZG<^)}LrOn4Q1&iNYbsUZkIvXvDf|S}g1xvwK}C`dRb+ z{RW@Cesw;J!sY3q?$S8v=NX^zCac`t*N;!aq#KgQ)Xj&>LGBOF4nzkznEP9KNC|Sy z@W{Fmq4@+O-tbYQCC5^p;pWmoTr&XH<|J&~W zxL^Da<>bGs^*^tFr-4r0skC3n${9A%L&-oMATp9nQ3*+=j1AX#m}v)Pf;Fv$srI@` z6)lYPb8E1(y65|AsfAWqy%9H*%DEqkzi{-1=YC~lF3E_y9~b##v+r#XRSc{z;v=?L zH=NIp>uU+XWrJ(i0LBh4r+DXw)aaC`r+03;lq!ZR8APkK#z2(@rLy!5*gw(*qzo=0 z@6LBHh}x|Mi#EFkFX$@O*rD9ecq%oqDbYCMz_8)|#bJHG_|PMaK&OK6SpngAEie}v zFu6tVs1%E7&{T8pNi7tKZGp)zSVv$u4N3TkjEdLDyiJVSCdeK=yS4cWma2I_6nKU! zc2x5S_QMop#dq>{VDYdI17OABZmkfFs|gqM7u_nF7g;`x4yPgq~Cv{Nb@ zg7+dPKkN-zHkf>gFNPsoL?)b(zvk0wPMdAUKQ}6goc_vO%g;?4yFYGjti(ATYxw_+ zXV|C~(2r%(SV>o1f-~ar&QjvhC45yC;FBf{X-E1zDt?-P(f2O^PN^|uTC>q2P&m24 z2IzithKFe@)!180frj6i+j`0HdmBFh-*9W8w=8$r&Fdec%EOWitCl7l+Hp|sI_*hL zna?*-&FpIk6?O(wY@3p5#HYp?kpo+R5rIRpuVq9Nj6Bfco6iDIV($*5t2~EHA{W@h zfr5S=QSNL7eFHrMYwfQV=;E{n4B)|*&57USi;hAl8Xm8ah7m)K0ESfjhx7}n^TWA~ z>MpROI_Mt7Q`iL9YH#Wu<1j^mX~M2B;C_DBiy1@Fk`v86JDJ!nfJ)#Fxgh z-f0;S7r*;{_1(?yHPk$ckyR4Z?%XPU6nFu$tQ<>amWOsz77r>!&66hQQ_uTfNVqR_ z`E6W2fq(nnF8QLm#<8Re4{UMQjm1H{?>txM!ig}TwWP@Vl>e?=ltmq7`*;wbPubsp zs-0b8wc7grSiXZZe1`AGnQ7Tz04+BjEL!o+t5vOi!?nY{C+0>szf^q@`)-BGzt?_^ zEtVYfv(b7ESlwUj1XoLgzD%}Fp^Ntqn`#G4AmyyZG^a)7TE+jz*IR%^y?x)qim0H7 z42XnC3}w(tr_wDTf~0^5N_Ur{l$3Ng(%oGGillUd2uKbM625!%*8Bb4`@fHm4_=Y^ z%$#%f*?aA^*Ye)qtfPI5^I|Y3GnNKSwsZ5&&M-GsbGdnM`PIlZ>j^gbMP>U_KgIy5 zfHW<6fSU*O<5LSF_Ao}S*%{qFsnV%3b)Q&X)197ck_ZMr!|GM4Roeft1^)F%a{-T4 ztw}=`7pg9tm6tgmLwMZocl|OpoDp&d7jz-(1*%l?A`v?&nwo@!1Zf6Q>{nBhf$vq~>ly^BbjANy-%n4{AFuw(qomr)2qjy}-` z6;oY$|G+pZR)B*3mmM;SQV1cyccbcn80Lhu`~uN3ieNrnlT!XX!tE4gazv4030} zIG;ZuR_bF_SfqFHVRwk;5ci4*`ke*K!eTsKcfa`AZMFql;=(kH@?`#)4xR{S)(!xf zPvWaHv}a5D4}h`33Jp}ek&fx3-~N>6uq8-d!~2|tMxsO%TUuT*ye|oRW@cv1W6jJj zRR`s$yC~X#pL?e!ycR@%Y$9+=LPg$=0 z@^y4~LhJ+VP^jN=Re4HNN`3M9Yzb{J3O=`WRh#qj6!k49xqH11=B7_S$3N(PH{$ib z*5SXOGMUgL`{c61{z7KtU^2N{_${*B!LASR8CNiajpZ#79Un3g#D#aE zSHFT4p3JnyVaCJ}3@Io}L%*a+dx1vt>pfIe8>owtgPNh9s;8jSZjO}R&$*8T<5f1& zd(JDrZAYg`(S{j5KsuTQ5}8CeLljfWoC|zgNLD~ibt$8?i}{`{3saoyTiatxq*1)c z|MQ5Teso^5cjdisoTbt*%@Y>)Kj%a&m=yy9BZyI*qa~1nqAid@9G;C@Umt|l8v&2j zSB3o~A({OXjLr!~KmK<0oYulb%PhbYEg`tAOT44;VfrfSDQGc^30}C#+4Z+=M$3+H zO|>t-+&iE*9N^_GT996mn(w{(f6U8&M=27F@8<*VZOxu#DU&pbDD-)QUL<#ZaI1E3 zsS%!rc2@I1wn{J<&3Y4)ldOW-Rtt!azJm25iPj=J4_Y&?6B8**&vE?z`FqG7P&Z@H zta=!BWn+80U;ha3D6qGlQ1F~8{6xgb#NXI?`<~$>`)y0Y{_B-MVnnb%y6oXA4X#-_?-9gu<>bn^qnD3GhghXDwh$jH!G%) zqrhnh5odm|RlI%UAzf4+dojV^2DksRfbK@HmDWcO2{H%4k@_s8n61E2fg8vls~FuR z{D8JSIR85t6PA2|^2`W$uVynH<0Juo1%CYegS zcZRk}yV_P(#Z6_&+VIeYY|@5;Ilf-(Th()g6>4~?62THUQ2#h^|MPW|6x@rdA4Nm9 ze6QSm3Ep*If%tL{1ih%A4m5jRZUg0e96ywLnQ4BXn=mQ~io7Sb`YQ zSm?mj;(twLr~%|#+JL(^q39{7i$A?*R%xcTa4j@Dn3VSR`<~Kgwtx8?qtl50^UK@> z!jYzv8*-uw5|=02i@g;#%U}&!M@F>^8v4Zemwp}{SP5@QUOEIHU`n}IuAm<%}5XPEUt)(fdnj^o1Me;%NJe631vzIyLe zAB`MlAXlN!ujSW>KP74(F_qlTSpYMKU|`P*mFU~u)iK@)N}p#DRVXpheb5cinQITe zgr-FQ7 zw8>ZFczjO1y4;qw*zh9yNUG8bA>xVNFk!%2io7LeeE{t+P{kD=;?`O12eCVuNS4L| zta{6DQb#~VkqEtk0G9BT!S4>_zYeqz4PvoBlK`l99&ia|c9Zd=nU`5~HN3v4_m}xw z(%oEwXdKAqZU zw$^+GL#^D@A8t3$tZmJF4Wd(MVEeejoXLY{vZ>rW;=YCzu8J zTVmxi$R=r;BhWIpN%(r}dN(bA5_ zRP0H+4ZgOkoJhI{TR9vSb}PC^HW7G2SU5N$N%vo~ST8=9SRh#)hPiV>Y!WguAyJXaDe|7KMSk7p|8j;x6> zQGFWiw9s`u^o9b)>C>m-^6-NHls`L4=l9{SD}oqvD6hGT59 zYKQY-t(~ne%Vc{JACT{93T|4m$xnTpo=^%e~8X72o-0&Z$ie1n$oFyIsZC~JF_)nYtJ>euUK9y3 zp#HVZ%$d;`r3&&0@+<3WTG@{n?VX&OJ36F)J<`&F(C2GTgbu#;NJ-y{j4#Mn*^y$r z|FX|ut2NMK-;J!p8&RbBu%B+D(I-GEiZ0hk(NA=>Ndz6_+vE9pVUzpwIW(2;N*`xFrKZLWQRs&2<&F5vs3nTD%x z6m{)`R1M3PEpMM6xoqFHIC=lQqW$xVs$fDV4%M7YW>Nibxf>=kMml<=|2x z^ zKAxCV$ukBvZ?gGJQ{>&}?~<%WqJD)K%1vy@rPR|>IBzv2@Y;11e0?;Rf3?DjAB*6e zme9!3&5b2O)N?0zlLgEr9{(1V{tQ(Dp{GRmFVhltcANF3Vy{is`hgoFBy^-R_fvpx zGhAhpt%Gm{O-0obLxfc54G(lx=*LLr`TIF$vAnUEag`ez8>{GZK}OuylQWuMbPrQ* zWzlJiq!pLTE9=siS%-F`yM~?C+j1?PN&g;%em@Rn6Ba}qmVO1RWLoL)Cm;lFzmK8{ zAmheo^2^b#7XTwnCW44T(|~YOmrtYhFIIr{Ul?odiJH()RyM|ByIo92Fx|l37nnTcxw?{ihBE~nOlst z5fMY@$24J9er2M1r~vCHD5E|Bk`&%Ou@#-aq6xA|K8EAcy?ghnE=BA)ONzU}?J}`Y zA=P%?m5O-s;K~b5`O816yEjDb{$JtE2HXDd{Pg?MG1lddH{L|N;mf2_9T0bt+2Qc? zIvzHgZ9sEg6m$`La~|I*F&bKXbq@F&9Iz&W&5)rbY8UFBMPr69`ip-D`3j=b7Kx<% zjwMN!G@A$1zmS#lk;zl%%LP3g8Sl)ltv)GPiJGY8csuy-=U0dZZ`kI5XGtzD&?c^4 zsa+e1+D`@|BdKA@T3XhuSLLunbmw+==!7LU-?#$`)$PHSUGdLe1fTjXTDr1CK;xae_O#Md3O4e3_AH++Lg}hh|GU2E zlLL7PDHbON(5jjM(NX5v2j<}GfI~24cmjQc8;^@PmT_P*^M4yo;|D67I58m2(4pl zlc#`iZSh9j!mrcs2w&_?@lxMR=|vx2yf|!6QO|tPrj`<44?rFjC#oz??Bmtl9e7!9 zzsXv6BA{YD>6#b!@3GXHV6obnVE($m*?NUz>xAZCu^%rOU02Yo8nO%llbcd^|8~gv zXonE*eX#UNk_blbgT?piJFILhK}dn2vN#6=io(D!Pd;tJ6pVhIj?3wWFe$yb6p%XxY7pi0cmp`#o&1rS-!a$x4#eE_CpRS62Bz! z{r6P}pZUsO-D}d2lh)1rn3QLAWW6M3hvYD5rjyah!eVR1tH&oNC&cV8u7KFAH%;Z3i4utaIeha30u80r5L>y14ucx5n zcE-exq>DCU$zSCDCwI{GYz4||Pq~@4FN(?SY_AZy@UTx10$_mE<%hNK`p-3nri&hB zVJE^7Ne<@rZ7$Z=!y_Wj{`+zVxyxZRbF4HG(O#r7$eQEne!}^ykbxok8R`I=&}b+s zjIvs*v6e~Lp1SjeKin=;``(up%_w4HfCS{AUJ=%av+|w zK1}9pcY>gb%bY{yz*-0ED)*M&sKrNnJYwSgF7D9-#W$1?Jg$dWFqnnVNWO$o`fRT*s!JlE6kEv~Lv>JLUO9so#c5CAm;3l?cVWMGTTp7092PwNI?xw2I zsLSf3)+(^q;62XS=>|yCP|#^8vgS=!f?)nKTWBBdY$~#IJnF-!%gxg|Cp?z@f^H{A zpKx;%#z(8yzkkonyO^7htKRvDJN~8;-8j@HVfh6sX6xKh%V1fVw4l12?Qh`vxw-ia z6%|#|Ruo0Ct+o@BsP7ditTK*tJ_9o{huZdKiDi11(vwjaqrlRImS+yv$9SiXB1;b; zeN)aCHL{8sGh4TZ^t<2YnW>fP`f^o;Y}`2<;zZ<(zc*_0z;b6vxjBKv(ZtfWeph)f z=}XY~aQOzgV?Z+2y744g!oP<>2rKXl`4Un}??kdo^ZP5s-33NNOlW<+NKUhO{BND= znTlzlcyzs6zzi1y8kDgj0-=>m>YtwS69%dZK|uEi2CNOi!d)q2eWF^$M;MGzi<-)2grC6RurfwT9z&ro;=k$2*E#%6f^3Mnfm0Qh%00qw(#vyHS>Zmi-CiT6T{*B`ub1w9zYU*QkyHp zvJNjVaz%VM@v)1ZHk2PPzCz9TT1M8qg#KJd^y^x71!V%k54N&0<#eh>-I9X2M=vWS zay2iHTCwHTvRzVht36JwchEKh-UzGqtAnF}A05L*5%Y5Ach@?2I^#%;xrGclK3YTx z=V=-(j5x{h%EvTM*37ptEELZ>>#TVvU6+q!NphSRjqaZ0LN=X0d$x9gPxrfwZfB)P zJi+->4!NVtX0D${T|PdrgpA<90-=!$nZH7Xxel42>v-O)w~W9)U!djv2ve9gwe)`I zOE)}$YFEzI4pAq(bG-lT0n>`svX?;p{LlxVxZi8`U%zX_0BkojNl{)ThwL%vjD8KW zm$y$p-OoBq2c}<4KN~X?siZ3NkUC*Kw(p zL>m)rZS;2)l|3a89-=n_DFj^&)m&w32oMka&Xr#|q=9_ymimRqNKY`!e)pyt3Zxt7^`+so>ko2aOBj>bE>oK1;>s|^0B#3A7z|}d#V(qx zF*Ug<842>>e)f;ccIn;Q!*Gn~bp3rp{nzhIGEgU8Gz;L3-i1C=#^?&@Gzx=PU{9r$ z0WFPmOS!kR;<^gvrzByoQ4KAqP%PHQxgr1d<^<9BW*D$p6&rQKQt`oxXzu~~s%tyT zjBTF&4?6@y=?%Rtz0DfBIir{LX~OQ6)b0+RImAY{msDG1M^yL`J!}CE4@({TOLMn6 zsdE{-BifMU(xJ7bdZQY%lsc1bqZ;8|1oK}XPY$UXcE<$Lcl&e>CVwn-onP?L^W{RS z2I#C7mf@=Be5Nk@wzE6kc&7UML1!C6+z*5S>&m;!l54;HhzQ+uOl~PPk|e`jo2Vw@ z@k;yZQ<{Mm9KYp7)?qX_s<*Y}M|k@Upyy+ZdQTW%s&_oqkuMn{Q_tB!yeFL@58tAl zs1xaqF?d&CH#J%a;&|X)%Bpd)FX}^|!-q)=t2Sb3`csb+AGtQwJbj&eI6uN# zayTo4B*B|2Jg|^9xbDMj@Y60-y#YbIR`1po71hWpxFA69p<6WT;Wo`731?%wr6Zy({o*Gil!+BIdj>x)=Tcy=Hh=M zgnw4>6)dn6Of}pJjU_O9sJFW^a!a;A#L$!v&|`y-fIBrc7&M0QB%sVU<=Y^g)*bgg3>6^Sh(a*h=i$ z*xMWO{{kOpygzQzYB_g-X`{&zS6adU3sorSKBSu`=kv+zTt=p6KD_Y@N~*X3&zx68 zpfHT#NXV1kp)ZTa*+=1Tp7_cmI?w_bu3GfgC zfWvX{N-oU;I)H|`XB8ED%_eI|USA-z{KNs?kV4oZ$R>KJP>F$Z^o6Z7Kbwe-;nQ81 zX!ODPV3P`hPgd6+9_khx@PkBNxF>=-i90 z>wpaZhE`35lqLlp-gH5}!+FF^n!q7o3Sp?PZ|S}-?5$4)LKx1%l9w#){!NlV33veX zNBt;A6%D!-J^lC+FbVD!n4=fGDgGYvVcwN&n;`3LR=w61*p%-N$yAoOru|Y3d)co+ zvmpRs6&v#5L)f6S`XsdfM%gv$T_>2Lk)pno&-?6@mZB2SnV^^9b$?dSi|}SFi~bD8 z<=3ihxk!au(IY$)tl%@Yj^a(MWt}@rc{`Kw(J3^Sjts^jUPOq4#t)TVdv5L^NTgKj zwL3t2QnWX@?7BIc&l=?QG`h&ZE1tEfIRHg09Qg>X8mu^?{@5>Np2_xMuG&DaVn6$C z65om@g=5qwqdWjMDN2}UblSYMmNy0HhP-lz{Aza7`Xg)Iygm`@de!bd70-7Ow{1Vt z_4?*iRHG8vZM5zpv+k5slRxJ4)7yLR>EjR$z1?VP-fd^G?*}=d#&%`VEc_0eshYe{ zJ?QT7Ysv`#y#$;)*}DtI_ZiE8nrJVzeEJz8iw!SgvmaEj!MxePkYn>%4_>SfkMB=Fc=0BA-CI11PE zZZd%NbdkQr@*v~RLcr=_cw0}6)m4Ug*Xx2mHOH?<2*n5nxqDLth}a{Xfn#B0MQ6qA zrXC0TLq|y@#HvXT2XT)K`(nW0tRW79sAZ-v+?;Xtjd%F&3SJxKcLQk=0*hvHGye@} z1g`k1l(i^r@somJmTa){4j>621!SD2v6|$wZ)wL(*!lI^>$ohhS`hIH zSTkB`Vtu*%ZKfS}gjwfB_v4mcb~>lqLz%flu{okE-YZ5E%36nID=b82J2inH<42sB zt+!ooIjt|=`P56STO6-wFNN(FV3^LO1b>u3Hs z7_(whW;{YRI2Us~>+pSs%Ez6#1j&65;riZTT+AflH?^v1&Hb2OXw`Qh{7T zadVhElOI;X(tZ;?mrr&lk}8wQs6b) z1`m(g-_XcY_m5PPc`e2nAyL!%fM-mOnUF zWEevfeE0DK?eSPoO1d?_(3qu6tg`!}x<)PrKvYi*gi69CcK9|1wnSC`HBQrvE3IPD z_T+vBn}C*w39Y?N_d$W1@x$j**VBBg-drLJK^K$b9t|+n52c|SrNwLuqzbQ?Le2w} zb5ki@^F1`O+)E0rr2u zJ8n^Q@y?^^&z^1y7NdI-TM7s-X;t;oVjm;2{t>Q59c^29Is1eL;Lr8eI@wvTjXzf5 zwXlGtqsU{N@;I(- z%UU2^iSzM{>YLskKFf8-6cHV-=FKIE&!Zn`e5vYiIk=+s8>KIfq5qL)G*4^{vPM`0 z&YCUu%2YiC;LObcmaYsYaZrdL#er@5lneg^GCkAVO_x{g@h^=&ZK)|#*=XxitjS6y zIeqfvS|>+r$6p_h_dCq}I3_96+eFYCoj!*tbu2)jRkydXxwE|IxzCyYawGleP6d1N z<#&!PcCQ9{M1Y@`ApuzF!*OL!Z6yzkvq6BCq1!J#QZTRK0}LHB>nJ`x{!Q1w*kj}j z@Q-p|bW4XC*q2A#YaAZk=H=saBK&yanfQ|8S=3y30)C)U*r&iHX*cY+O`!w}lvdfq zH*nlMR!bHtf5vt92{MmiGt6di$pWdZ{Q6o|pQ(jT1OXV6nj3OD~ z9JQEv3I9yP3ylkmTvXfqiMhGZosUVF5{!br6EG|R34VztFxZ1s2g&MKc^hDyEWiXd zPxv~=Y-M4(#5T7)jwxDiM8I=WO2`X6Ru;Xbo&^mBPkEIz%*h(Z*I7StnnyoPQ7Q~> zR}^n$NPDBiIQ@_{3c4%u80(<-v_^(*L_#zAyVxCo&Sjw41+>npZVL#CPluHD=eBSn zkH-jQR7DY5d#&QZ0Y{A-zQ!~We|4zwa5H}Q4y?qVu-fG%g>=hP$aPIth zYQU!C9;*QEM7iT72u9J?OZ_*U7g$`BRJD@Xb))&VNmt@MBGVn`cB9D=Cw)`P78ZlI z_-b7)lRtk)4f+-Bhc?YUIth~%eglNO8EqCJH1|K@J1A6iM@T%nA#RwUC;V)7c7=V( zVuY1{I#;6g$HP@C6M2tuql^q%-UR4Fn+>IMzIm`1q7Ua znl^@gP~TYWyHtbY{AL*YE4Ad>=ZwN575(+>pas>~!-@VaLW3jsx22!zUhPKZ3*sKL zoVDAF7pb`te}g^wPC(R3Sq?VyGM|oRsJ;E#ty!9Fp>!TXndP6K92b*bY!5{?1 zWsy81VXv*N4GE^^9$#L+#lyWX83TO$C!{VUA%me7v|kLRG12$!=^*a2e_sGxQLts{ z&&;FHS^5(%p@!loa}=y38+vUor$Q3#9BngIFVu5n-TQ^d;5|oU)1;1a#(qL|&Ign) zeH9ROaJ;g@3-p(wP&AB4XP_N#jG5IZDW;(a?r52&8O4a3m_I|bZgsj;7S;yue93xo^gjLJ-U1ynyK=k3t0(nd6~%|dx-l>2 zoQrG}GoGy2O~3so8y9xrw1b5vWeE$KVqK|)MCMK z;t9OJD=*kO@yZ}*A^RNH{;K_ZZ`3|KYrJzoXGo)udN#y8B z{dv@g#g&nmA)zSkYOl43`H~Jr>S`%$ViAHzrzjH5C%*j)Bv8#;FmQS&>u z`RbNlIeB#<^*=iMreSUM(Hu6`3NM&DG>Xlw4y*1+YL})u9?gEdb60?EPjh2xhOilz z2CoqFDx;bwSPSCk%Z#vYmjNGG59qYLQl1nO%Q!FY%6fqonCH#qDI2`L4*%ov2w2y> z30^g59Q{tn{$#wM%UMcG>>Tc|ue%MHMYBY(E!ICOrCn~XFDEK9^8InvN<7aZ7Fv3g zYUtEDw|t77i23^>Eun>wkUQ0xYAE9%&6tLU(wnY3ecYfD1>OG^$OM9JDc_=g&$Dv- z;UxOL*(?180fzG#xR@KB$4K{w*KsAAQ~3rEZ@{ zCx18Jv2+)qrCy&yoB7;TTo!ZRWx~x(-qX$e2OG!0@~c?w$XE)oS{qj^t_A-~HjcLr zmyMk}q>w}T`J=gHSMc@avg3I+g02*`RAeTjm#svG7{gXXN$5IPgFO)&W?wsadH1J z(~lT#6T`@p5{eCj?twe-Z7IVuaUN9G@(WH8xe9fAuxR|&C=H4VS<{ct@$Sd)A_p+z&Y#C6VIxn^A^a-Jf_lpS$0<6uU1?Fap>V{SaE)AF%& zO$iBAGHGw*HS_R_*z;cACG@4#Y3=8z9D$9IcHDSEu(8pEfRxF6@!+;p>Aj;XlJ`aJ zB`;HSZ&x(6xF)&KIJ(Mv1xryzJUkgvS!=3`*Lz?oLO=In$ETU!`CvQhrvAxArO_X} zpYApBNqfY?@m0JpqEvT2dRcI!(F}R6VIO;epzNX zN~^J)>1ykbNtMM8He7_eheY<@mUbk~Zm&;WQ_04LuEtHjg-e9nwX!z{;=%$>cKjl^ z1WmXhdCKWqk|v<{z<EepOVGA~`(aKD>6bT-GuE#g00{-WKHWmW`s1=)-1Zz$;5DvA<%x^+T~f6N?p-I^`}`%fY82F&(#rW#1 zl?I9hd7ooz#l&I>@Nbq&HJVKt`=C^=8aE}a(|d^(8!f2%qKvYw*#^wrUMULNxtbk+ zyf`m3p6Vk^J>z%zMIhhmHMI!=M*sTM^zQLQ{Mksw!Sj(t=VQsMd(yGA=l9~gTzT*j&Lv0K=Jo`$R}4;7lCFcHG_gVKP()F#`~F?u&LOCnsJD_O5G=O_vuG>K@&a zA$LX;+dVR$6e0T$yfc~0WBW|ow1qjAYmO$#)WGyQgMxE)B_F=I0~!c-^-E6hfLCk+ zjY$-YrU?7NEcO|=rBYv%@n+M?MqhzBkw$3Gtbhq0Zj??IYuzKC*$*=JP2Wbcnsiw? z&lzh(7TzClme-;gaylM#8GoGiDqlbAPCScJ^j?4w_G0i@W?eS*X%15^bYS;uhI=i?yai&+=L}5N=5`b&n;UkZO;n zIV@YuwLQNw6StW)n;0yb2zjv-S%l#}G~5^2r~$PkaI0~*WLMPvqDz4jdsP39xXQio zzotZlZg{xiCfpyKlMLdukxJz_Tm5T9KL{YNxyzM*)zNUFLLH4R4PfvT2H@83 z7Wv@QHUX8c2WsEcU4bQ|q{sCRr7-$3^LQf%AXYwG-RS~uvSKD^Ge zzG~uCc$htdl8C|he(S1e!A<2Vkpd&cmnHi{gg2oe6Wib|Kg@L3ZVExs7fM;XIq9TJ zit5h#QDx~1N=R`(Qv2hhvv~}Y7rKo}D8H|KdBpIhFQw|f;(ISl%-Olc`mjq;%PO7* zU2of;NY&@DLy_~&Qd6-YhBNTt#|MexRudZX(MyfzHEK+=^b#;xbx(N=xmc|CJXXG4 zJK6R8MeHG+$MIzE#if&GD;4Eb$FlT+AR4=8v_TzJX(l;i7cX~fhz`nUBV9kGzKEqv zI7}x+#U~+auvYryE7GzR@B34I){Q}q_-&gJzSs-C_r>D*8c&!GWo>4{=PkITl+Cm% z4kLH2~%VE1dh=nP3JK&GfMW>hSB6! z#V{q^r5?Y?(iA@3*1NSy|2h5f8{n)!(*#XxT@=3T7x-+LewBq8_6 z)iYp6%y!Fa`l`oVp7zNcboe}67<@xoZSE-fs%7bqxwz-@KMPMq5Scbvg|ILro6QJl z-w1wegbYZCDj`0huR(2oyc3qpzdq!DU-avkI_~V8*-uJT^kqIpdhuX0_1UgGJ(O0R z@THBBO1Hv1F;4GOCX*sp@>nW=@@^R$vEnHbXvy<98m-_Pbh|t9rZ2VLx;bZ-GvP<% zaWDPtX;zt+4*03J!YD*#dspy%m{!qHg;`k}Q9 zwe?x7;QTM|Z}cBtWS@HcC0^p8OTkZSn$8eqZr+tOmiyS7A1{0p`j9!Lvm<_e@=kSs zdsq40+h8Dl7dj&3(~_XC&jJHfYcI_9xi2ap-vxT?m!( z<*wIk4Ykon(36YW+l`BlLjUN`g#G`1F^@*x907R~^}DK!@CU*G$5Zv}N4W)ncJKfA zLCVuDu|S+&vw+-w+8sa9{(R-v%yqAC19Y4=J`ia)06zwqqmuPda_C1hKK2yMD*_UI z4)}F_>|WV6KeiU6+toi4xCPew`eaK{$BQ$l+zHJ z;*=^8i~$m&LV#u2FdD(gQzVbgEf~m2%dWSE25M*)_Ie)jD5p0fDBGR>s?28Vus-cf zFiY85>HpG~h2{u|*`8uSlf)PJNg{QuC4VMt=m%}k2R^l3RBj>_smJUGKjGvXFioD| zcEY{8HgOTr5!VZ_nHQ)G&p@i&9(Ft8PWp5tdL{5^VX{OYCalm<7hsVjFqt(_nunfx z$NCBPpS^>__vq#Up3Z|~wxT&HPuef}Gv#j5NF7~hYnF+3mAnU|PGUZLmZE2!OP`-} zar|9{2e}iVnYu^~T-m=@-xSSp0FAMS?6ts@3mRPpxr4hUEpWkt&i0wC_?kPT$)bKe z;CGF_?;SShELyMphKG|eJwS^@2L>&rRE_&H5U}mv8)>35fp7+&h6W*x8heEyvb@3X z2^K2BRN3dlyQ@5cw|txLJSRghH_#)YC#C>K)}T0fMmg)D#h>)ZKi0Wn1hye>3`-kO zP5;c&_RwTM{yj!1bw(H-x$B#un+iD>4#@2&?99dWf>lv#MH z#m_DQAm#(z&m@>elJXw<0!j-0z+)nn6K9T68h`95A97P*u&esDPO&d~)Wh*Xq5Kte zeRRPX8S#&ELI)X%1Ia%(s_L)J_!>52h8RNWtuG2*-~;pt;vNy%17aS@VM0pNbO#u5 zOl|(>&{S;qY&_M{7la@fhXZ3h!pI|F4gf@FKVEGozo$rZz1CSvqr&_i%%AjIG>Vv| zN)QlTyL#{hwcR8Uek=ZVl(>2;r2Qjbcj1zq5&NJKDQ z*Q$5Toev&^-P!Qvi&B_J$YI|caId=dU}nG`4gjwkn!WY2EP{0|a{R?!v3^IoYyCj$ zP{7CBqj^X0>g+jT3$z72iY6`q5uFw!6EMzR$iPolbL$kV+(DrlfP5&t${|Rn7jat! zSMgD|v&$&Y^$GZgN@2>S18mPeFS7Iqy{uh1d$qkv-=q&s0N*LQ|N6poIV0l0X9+fg?dA0n{H6WoBG zTOU6L`+ElZ9Z5!od~ME5TYA8xYKFO=$>On5HHJMpc#BIKCT^ueLEoHSA1uo|xpc7a zLIb2{DE-^k{QBkj^|PZ+zHe_X#MFC5*5?}@&sA1fe4T~G&Z}h}CRS_&<4}32FYhG}-EQ?Y7^^ z4tIENVX1{^4uOUIwYG7&O0!O_t%c6RHXj0UY){9Q2Syl~->ldI?i%S>DTZ8m0r^Zj;yVg8uHSaQ4$KjQ>f2*y;=tJMf#?45^nZfa!&Ep66d8afI2L|<+ z)}k*q7d?`#2s2WS^v%ffr*iq`u;uVM@MCPWVRq*ZAn-j~jB(N>$K@+}ro{*Qmf9j4 zFdr0|pw~gSHI$@LJjy-4+u`DH{As!6T^QeBq$jVEfljoKI=m}abo97K+)wT#@WP|cIQrlgn5 z&ru)FJ_a&MFVQp+O=)AAC!Tj{0Nv4J9iKmikhgsZnmWcuZVX0!hXci|ad*L6jaB=!&ldoA|^9 z{6LSHPfkv*W6$qmimXPi#OVuuWO}l?XKB@|HD<-%{jDobJ?vdrI(GK_p1(i@i_ZPh z^0MTu(!yMB+F?hzB1SYwjGgY7+;M#eK_3M7if4d9=X2qhj-$cxkIkDE-W`?`sytcF z?Rp=7Q0AYJejj<+!FcTo^2Qi~x-{RrrkNFm$Wi!vKhhkVB zkl4aJ?nCl$pi1)1b^`kT{-QR_Mft*m^@ak}(a9Z{qWK$pvGC{e{PKM3= zyrL_fe>r@t(0bk`WY0=wWn|i`uKt^N^jmV%N&vuPVHLlAw*T`pCZ%Vd#A{FWbjkZZss<35nr_ zyD$0|Ygw}%<@$pDIU0S)3e*o_O*IZFfq@RS>%weOA072^zUw{r`fMif=9>k{7=k(F zXd<_srEj;mk&xc(bZkv|1mM?tLlD)8pRGhdWW zQmfS;m-e(9rpSIMz63JX_e@4wd!EbSClY8vz1c1|MtXM;6B*93^9W<9*!sQ~>c2{(6z^O-|z|v1Ja7`7Pb8ETx<4*eW_VLscHm*K*F{ zTo4^HhTRK;v8waxWm)5JR?kXya$ule)btavRMX2E(dkGMvI^+g?XtwN^1{GwFKB&8 z?5*Tsj(7fok)B~a#dA4Q9l|gAf%=!f1^Q!-2fH6#tAV>AL+L#(^ne!vq!T;Qnb`Cd z3}l^jcu;=*2`LC%yn0&qg!(i!nP`%P5jgQ#biw^6 zH#8~={*lh1hfep`>wiAXVuBhL%&EMx*we}@tFh`|tjX3mZjg%G9;7UzYTARswC8u$ zYOD7^oI-%tQ0sD+THO7P9}Z=G(w}@y1|&;lNWL!?a)i^0c5DYb_DPOrWg0|1>rorM z*Re2HU6d+V@`$KM;fKW5*TJPH(LzSM1~4NpXml-mAQz*`X+T@pi%h zNGVw`HXF*8P*dK|J@UabmaVu-8C_s1%Mf7OV9lV~^7MdUW>jtdRA-V<@}g)<`Bhow z`z79Uea76hhPbE7$S&Mnz(Ta?%XQGnVIy~?L)^>^<#6YZMIwhpY>g7U=H*CdaVTxQ zls$($!Zjrt(y&3c<{}W~?{!AA7Xo!ZqAaWxoev3#;N!L3vyfYvtmPGPg65)vh+KDN z@+pXF3Qq`9M)7Hmdoz{ZKV;OQmQ|#a>`&`Au@eg!W!@AHvG-KlDza^j{e!Il>xf0C zK)?AlrdV_zW_E5WbUcP&I$!^6Qr45t$)Dhmd^#qj)wzCcTF%NB~aC+>Me6}=CIxG;? z`X=Fw3{90e8&Hg_`x9QhCkV7QE|@@YvbnL-pMQ5<30MHMv8wR~oj#3y*!P_n$|P(V zJ4oY4EsWT&F@A8gzzy#vaQ8trXMCON_QaL-UwZFT;_}(dT~WT zhRyz?Rk#~leD1>B&W*B98*taF@@of3ep2(n3@#Z{w0rl9bU7a8?tQQsCu7xzr!&d7 z>|UMcXB*hj$5^7%e}5$7CCT zg`q<@1BOlnVfZbNgHfBpsxSjag@})Cvk^f}ll?kRrkE{{p93>ms=~4i&5TEngmkH| z4G(tKzkct*Y%%}f6wG?jvn?cx=kl(lWRI-Clo8OlrorC20$tX*m|(Yr_cp0%Bc?@8 z-4$e-jlqfxM|6Z^l=5BGQE9O;dRdHO6Ku1T;WoYL8k3L`XSB+kp0j2gx+%?1AXl&z{0P9y#?QfE7~J zA8(Lik9k&91E}?%v{whh*2D?IZsy*MEgMSN#~6wgU!-FiC|I-?{dyYP7+Y zD2oN)FvFO8_ASHDLAV6}5vqU@d217W2>=$fftnE}gZglMH8g|yfJ&v0?b3CgP^5DO z!Mr>CC!@Swq?zsYX`Wis)dXK$tS@_)rGi(V7HZMT4nZ4^nG^|9^I zUchRuH4q%bY&o3{1lg=dMvz+frb-g*INmi2AUB%TQRSAl&cU+j+rIE&{G=V%?ge;DF-Ga#N-?jPYba!DT;6rFGac7r|zuQO{$v+mfkQgY#Vji+F)4kP@Cf| ztTnXeZtJx`IDT`gE5)+eS|}+jSzw@mISWBzb>C4nw=J3T^HBs0fqp4Kt(r%cRrZ+7 zIecuP>_yBcmxfn1s9{!>O4u+aYnp;)l@IW{vNNo=1Q``47OkKhd^GfKCGF_rf#Z72 zUf7LZ-0M6U-FvVKGvHcvVOjlb_g^go^V8@P!es*YE+OMk*r0%j3aL=5PAx`-zz}?(>HOu>r-ZPcQsv(kB%sJ z1Zn!v^WH(*ZGVuq2eBnCaUXb9dw)k_e!9HvI11GwB|Caf2(3C19EHTH&>O31Ils4# z`L!=fsAMVb`p6coZJU12Ga?$wE|$$`{G=S%#!QA(vPh4`dqN^^9YvQ0k^Q-%>3&~5 zxQe~0y$%wtW}0que`KNh{*3n3+dtm_0NLHS^BKUfxh+odhj0(68)|3}qV_%#`}ZNr$b(V^0?(O}Sxbc{w&2^A0qDBU654P$hR zw4ju9hp-``f^?_SpfH;6_IbYd`+e^}0NcImzRvSJ&Lf!j+?@ZTzKh}m12H|sXEa1y zXSI9_f8UxI0V>xpTEU)|DyJX6?k|lT!(RM(8#CH!tug}SVCvOSKqWY;-yYkhqOT)a z1LOg@Df5X9(ZAQ_o2L2xLi?}DD%K)pg+Cj^|D@9GrM0ZkFh>0H`FQ}8N+kp8|G&m% zsTYEAs$O})-HFwBBl+L28{7z9Yt+C(aKMIV<@9hBwPW+YTKp#;oJb!13($B~DNS?Nc>*{krd2&l>E<%vdOo`*pFpu~((G zvfNxzR&PrRurnAOs#5Ikd9GZ3UibDpzH8yrrvw10aKmk{u{N`e8juawWo*n?hgw!* zj50$7p#FaKAG-ZPS$@Pj^K<)u>;Zt;5tHF!G5h&mC9p}a|KvyIIndz0&OFyf9mbt# z_HM%F_rIQvtEFrh9n%3|dvxNCOtLqZZnX7iYoHc4u?Ii$JuwGxA54ZI`_^RDD2~o+ zu8+sOF6v)h!2o((R;#)S+M0GA8iq`3bayPnuCK(zlvIZ8Pgfz1Sx_97;~F-3yiX#lNv7WLvk z@#lM$#VegkUrNBQfCPkoSpX2PBABJu-ln(}KeGpj+1OO@TtZOFXf(5Q1NCz5Ve5xk z{DSU%O4Q<)e?dY3SXjOvKrP>$GS`N9PZA7>aF>9+&z5wl1UulFB-wI5cbk+O_<4Gv z9Fb5tkbdAVpw@Z=R>%bkUr0>1Xkyg-yv55!^Eox|9w&Zq>Ntx?I zlUntI%U_aVb4E9;7X2~dguXU>RPhh&!KUFdk+>hb2pb?P*jj_8LE(Luv1|OJW*=H( zh9@zg3g|4g*!W&>mz|fM)9h^y3~F!#rXY)3uDcSb6+JUj`KtB|Kt&k;If*HsPA8WF zc(&Z>o^0^!bBfH&+pB@N;XRX?%IK3!Io?FJ#d;qnb%0<>K?uD4v6brq-`ZLy=f4z> zCAut%gO8tEDPL267oY7V3SPqkFQWG$TMR9K0)VU1U&Avc{(&T2EP&b@7*H$2xX#3l zvc>iTWs_n5xzE{y|9O|;8Udd!I-&ebdeUVJ8(L1-WFU~}l%u~frjo8z=T7|#@FoEm z^sF<_HK;$Db+V|$D4P4mM!eqi-KCG*VRZPc1hkt9*nENpqF|i`>H^ymW0nwh~ zhJ~-Dd0>?sto}oGo@tas9(1wyXf1+&;>k{#tzkOT!tdPGiot;JCUxq9krL>ZkGgao zu*>6nT0hz5!Ivd#*E`4~RXs1wTh40M5uV&R+&h0&3Ii3}oR9*PLM6^4`Zun7)9SG< zc7GH&$u-t|UW_pyb1dYp_Xm_EJ?&5LpYA=17B7jK6|o|D-}fy6k;0jqwKq{hFg12) z<6$;3eCVUiv?YhH;B}eQ@&ca1ILjEkAUid8@Og2v@OtbXu)4aL-4SJ}@iDK#)n$xfrkma4=-cw%j#NZ);Z4L&RS{~`F-D93(m}H?idpkm(BeY4 zlgP;U!-FHgpLk!00@IxwF+NPpA}^=cG9R%I#&3seA78)>ze?@D zG>d39%7NVof%}^DE=}g+LTNVNDu!FO&6iDNG9jM@imOE zvKs`!CX=mzHZE)Ecfu~_(?yf(L&`=ik7c_o+a6lJ9={Iua8KUl&vp%GZ;h za?kZBpin`PeC5)wIH!r2|KS3pgVaf=7qpo`0QtG(htL(EPU72T11>1wN9r%w=PI8W zFAm&`3Cmp6&F(11;@x3hJUy7xqdGcFoi)O_*gS1C*)r6md1Va#F$I-DP2WED1-A8b z3!ZeG5$+<XK7Bje?KH5ug?8Zk z>3h^8nOc3ham-ls&;V~b`>A1Xo6i~X_6Lv0zrP9pe3)o-xUzKK(vHZ0i*7mQ#?jSS zWc5u+Yk=>%h$X$LoGfwEzYXO22Fb6#(<>XL<(aCx^~~^;L2$KSp`F=4Nn1wF|CY|B z3?thM&5urR7QTAPhiw78%+%zd2LKKH9#yfIpCkk-Tp|YDTc>pHW~?%6(|-YUxGT#4 zNSl3Pg4BnyQyiohI}$NF!*=h6s^(Mw5!pln#fo?h^itU*fHWj;Ykh4U0Hn>a;{@pQ}{ zN3+ms@&lTmQY>aI(Z}^I748T0A5s;&&g=$DK=WAz&Yg{-2BK68dAr>>kd_V?+}pZk z{zp^3>mJXyV_}Z~fK#J1_M_A1nBgCfb@c0tNyGgBr@(xg50$xvqJp@tZ?8j|u3Ho>CqDXLzDC1*(xOuz4HAmKv0LF+23yu_P${TYT;*c1fivt zsO$i6j#=HzUnGR7|2a8%MxV4M?si7TW7&@jsU#RHYyN&AbG`Bb>>nBsAI^7-)8bYhd1-YsIzW z=6pxm?at;BeMfz8t&6y6-%j8CEuS!Y5 zDPg+w1A;2UD}WCrZBg76`T$strU1(G2$h6uK#q=A(Knj4PhO9%><<2z%JZoK@^1rFM&U@#cky5h= z?Jj&N43qk1qmzf`|0)ocK0VjPjcDCiUDS4(k_L`9jn@C&dHfhp?y;Yu31z ztiEV|i1{P5J3p%w#lLl#@Pz(AOH2ERS(x@~&V-I}2z)e`yIGe|8fn*@_mS)sM9eJ3 z``fsCkF>ekXNVhc~3$b^k{q=heEg z1>#gcHyeS~(bmfT$?C`Zb&-zKOKb0*ClP@{{E5)pBe#@wU$z1((s{h)*&Qyo_c!}!Q8)2K2h>Ctw0ei^lt=$V#XFth( ziJQ4i!Bws@m=P+HR>s1v4j_+|15)Sc7_JyurvTsCf1~%&xEzQMq z4;9aZz z`j95dJEzw+CW!4F#9Ji@{S*LDk^t%j=8KNAi$=Ax++;C0&Ye6d5f6W_t=G-}11CS| z6iC^d0Wve-G>-swfogG(SK9gUYQa`U&~Hk-u8tdA=MA=QG|!AJ2?-g=rZx3eF!ewF z3Eoi=e+p)!4F({56TpNBi8uu^IC@@C0RNm7cw2CxX${)c;IdtvB37Npa;krsM>YW0 z%Jb=ms}no@ElsZJILZ}_K!(d#Nfkw|H5bQa9%HW} z*S``cN*t{}EgEcnIb`F;%;q?`vbfe^CL0gWhcDONx%WEC$8OB_bFzqzs!MLwMRsDz zQ{NXlOfnXT?Jx${O-x?$u};5G81K=kV`$Nwz{zu^>m>g;1ty&jQ$0zvPJEL$f!0F# z&*^&DyBO zK9WTpX!htVqX_+DG+?eeA&q~(rn_bFDo{yTwOP-|N{v@^IgEiacnig>^ zxxXhS@5s1{yf&0fKTD*tM|J;RwC-tjiHZ&5|Gt0~@G4rUp&QDXSstg{oMSZ0dKc8M z|M^T(^p^cG5sjAWMd&uy3z#pRXG=_SV)LN-x(efdfi&6tf-W>pirwrO63&nF=x z{kYtk27O2S+`-VqBJX(J>xx_C>X2v$>@?IWOG8yHON!2T8<&An=v>(yMQ(rgiJ;`B zd1MyPsJ;6(TXLkS(O*>V^(5tQ%B>05RFXqy;Fo3w^=?x2^SCotdJDe_c00M7Xz}8i zGE+h0@jZtXw&m3x+qrriHnpA)JG}1|`&;a`IwP&8*}_3J)1oizb)7A{sYqCLYGIY= z;RChyr5q96rRxAM4orD!LW-RJ&IBm2&Q}e17gB`ZRpQ@`qCXz(?6@jNjrp1%ZwiX; zuJ)MEm%XX|Jo!!HDVQrQu#ZP!s*CEp-gxt;TmUxZadqZFP5gYqCdN#Mu!BbL@nsF6 zYcI(IOJ?h`Rn2FQF5HQ#`aA=^gsZ$*{AZT*sr|%6ft>MavgX6)wKi=?g3DLzJ zdhDf7+yln-i9pL-%L++$P5`bUXnULYj&Tl7rqus*IkKY^B8%Wws9w!Ev5XW}iBWdhoTS-G<^w3E>5B zrjYqKP(2C!;C^JDeomYMIaXu+OTFwMAOeFWGO_yplcVW%GNzo&#H z_7|2|da`%Drk^x4aWl@ zdt5I|M6&eR?@3f9H%$j{On(4g{=dgIJBxTb{onYi0D4)#=`@LyS!z6``x5Z4DgqeM z-=*#_dkMkcarg=0SGAJG<5Blh{ti$-&30_TN|*xu}F2cQYsGKp8Ny7N>7UPrH-xw08E|*MZH|~L#&@USGmV}o58wsHe%+QUj zcgTy!?U#W^xKT2pBOGG16q~Z#rfzVPOX_8ZI#BbvVMO0AVn#37MQVHA#*-Cv-J9)v zxkgzC<|=B&`xb^iNap*Ds{BHXh}XSgzq0~dxD^W%<=g?!_}UisLR?Huk8IV`AJlPE z2dw_0ukGs|w;r7x=~3xn^{|}`CEz2Sl<`~k$i`W#ee5nY7_#n3eq}a<{Y+t1Mo3xx z%^X{n!PVJu7JVuED>lRASY5k^|JOIe25mEE(~Uq0_0vMN#Qrv(BVV4nV8+Z(Lk=N~ zvUH*^9S^Wu+{vUM2)X!rbMrxRA}*zX@OUtedw#wXRJ(cS^-A`0{_+pu)BLUzalJ@Kl)8f4Bs0K0z}BuXDf( zBQ=_!xcfvVa}kHd$J`Qetvpm4s1|-zdT^QEvV3DtDcC5 z6S^Ee?Sr@flZCq7z?y^a|B>EnVxMT)2Fp>w=(Vs*fN5%a61!SISn_*XCx9g(LHv*~ z+p&^(-s4TN5ok!6KyBdjdRr?ED)@jl%(@yTT;)Hu324wc0rpvm(?((o^>HJ%iQ?Jb zFJK@S4!B`gOF#hOCUIFBfH6<o>C; z3QIsddlIl{A)!79h$A91odQn9pPs1XR4rc9gr6gD=Yzz~oVQb((s_Yf%i{;vJK2A1 z^OfA$C3Rjp+qPBt`F@_;MhupYUYn(~(o(Y+@5Ni%Xq=_bBEH|f!Nzj;A4+VG8)vzX z73y3akhK>9%uDJ_@Kx**!Fy@U6b`fiHjL@Gtg~`pRDwFL1na4;Lu) z>wre1Cvftwio%b!>mDjWiCHc8PS?LD88ekQbAl5zG%@U|@*+QVJNNCbRZh$9)Nf1f zq2?qk)-xfI$^XUln%VrsNK&L6M}2O~-{9lEs~mAP&z&@E0=nDk){(|;`PD<7m*y8H zmqV?!zhkEb(HDZE%U*k_zJb@j+OiH@;&Wwb*lDgC@pa6#7InW^uZ->IS=m`_w7=|S zQ`5a^oO6w>1UNsQ`wPtXhQJ&{0o9Svh$&OC#=lt-iW?mwQ&1B+ejA;9v7@tiaUIfW zB4SQuZzYIw;m7kg`C6=XFNJJ0?^*Gg?IC{LgR;gC2+CUO5L4%Ac_S?H67@eUH@#x+ z!grrB7QVD*)SrwuIY8&@Q1Lz$<3mC!qx!2V9~D1!$= z3C^A&EhVA%{v0wURXv^~!L5ZFY?jySM=#P0@zx@*NArw1FA zj>Dj2;30*-;4FU@wmN%&`6*;O&clw=NTcrrSeXy8C2t05eg0S{5ARRvS;KneWM|Lk z(;E!IJ;8XKl?Pv_kws~LA(TO1{@huY)VsSJ(B&I3YorVB_6aPwe{Wsi*sKOfU12zW zHlfW?_oi2P`(G2E>$M;lCK{}Z#I8%+ez7p z_Q}Y#-JJf;_7^Z**o26@%ZYr-)jRM9(C%h>-6OpZp$5?Pu(ND`VaF(6gzvDH&iNlv z(OoJl;*_@nA%+$PiW=HYzh1PnzvF20s{Cw44R_ve#{nF7gCVks#6#*hcWMg0Wr6n; z@Z=V+eMdGL;B^VoI#GGUpOj-2s~(Xv%_Moke6)$vB%3z1Tgl$Es^Is3Q&xF{Y(X}k zYZwT(r&XLt`vXUx!r@G_qkPs_N6KTlTr&To5xi9O;*I1MWlke$z=Yc~;fssqr1ZEK zvK~vLIWnBShL!0Aj16CZQv6Jdaguqnp?1(!sX9t8B<+i3Ls#@1yVfY#GM#O+mJ1!w z3(IW{?musRIuW16$4$YSO06sA8a443z8%4?PO8gV;l}UDEe{ zJb9>-uTyJgh&Yn!CNZtUdJSD;%LHDa;khr2ThYj&Y$Y9ubs|^8`|$9+k9*tt*Ug)h z#y8&$(Qq$M?B-Hevzx^Y++cCx#KZhb?5xGp*Cz0{c4n}@-L=5x)9BKind6tEKR@B5 zJZJh!6h5qPo9gE+{cr1-Ee!3d1!P2gl2J*+`S)V zCr)*CwPkf-b)@!u{b=rYvGSR{t6OeLb`4V!5saQF6{v6tlsWg1A0lR*4=9;+PX3v& zT6#7DSg3JlKszOpo}5wI(>|_^&9yOLje(vblTE@4hJa~Y(i!u0YurvUUIg~Y$u;_) zUq6<6sjB^aBisNL*Zo?2&p_g@L}cLEIxSovbm_Seib2#CxhLqzeu`(a3DCdtkOr)P z|F($3%s2vl^eJU7A~bb}!&#qNwmC6kPp+H_)|z`C_ixXn2uo3TD+A=9a~1jms-3)u z=pFtm#h`drxrH&Yq?ssEl?9>|bbSPmMf=rKCLC;+!Dg-ImJA$(}14mR}q!xt7GNA?X$7&70zW6LI=kjGJJp03RanMVVEpNV7 zi=FwRyCN2HkDNt@2kfWNWpEPeL3<-M+5*VN#5+!bAgwXVGvFKTY*kc=ZE=91@>9tQ zPK$r&2wdmOdQW$=y_9d4>^u?$HbeSt-YzsvrtFh<#5^}KtH+B zoEt>&WU*@pKm#<-LrZ+M?m!L}2I?Rm$WppA2l2X};5+kW6rm5wmtbnMx05iLfCIl1 z^}>R8FHjrbKLT8=Xi^7yqh}<+u;@l|j#sjIG;Su+LvN#_%ON9eCT$T@ z$s+U}K1u=nRFj!Q7wb0{h!0X8bAO~g=C0_B=;t=C_y$gPgn^M3GJbjKF&1w~owpRq znPAfM=}QHv`iqu7O0~9K5}{VPZ}u}fti`upbx>HVuvBu2w@)w7CXs!r$Pt3_P~}Lu zkE6bSc`hCWg&o8YtR2r#88C}RIIW`!K?(eK zzJ^gWYV2Lo7a3*Vcin#J^M4yryzgoaw@22Ut+bZgxPnlyFV2h!G*N^QjV(eGZpd`> zxZjr=ZBHc&#!;&VGofT&?X>};LX36eNTA%TVjt?M;$0bZ91oYE7n70aZUx{h-gb2| zs}BVC$h}lkevVlnAR?z*5eI;$&3Nbl><9Lc7oUqz+^1j2!^oZwj*Q@?du829U7Ee$&vZWmUzDU!&>i&p1X zSXu1pnwmBhA|7(!N0elnAFIz!h!wT*V~`1@UK`vG21YokLixV-NE}573_fdI#q(8V zsZVtBg{9D-gHK|YEX4Ia*#I4<0;7aK5Dpo9*)aNvawmBIiOl|CK;%rpEDYllP?C81 zces@rEbC9%Bnl|3*xg3GflZN3lo#Wr>O1^Fy$Q(Fd!0vjS)bOeNlmmY{!$8pQH1RK zl&azHL@+u=b*TS}O1^{7xtx3_VBSXn9CcrD1vEE&Nk(}Q%Gn>yJmmYy&Xtbt4DuMJH)(yC&lR63dAAyG4o25<$rwWs`xZz` zqh>Lo5^R{7*mpa<1XcuALhTvVrh&c3Yo+68w|pA}laz#K8v}J;TiEk2iSp$Lq3}iT zRWceQVk(8*CL&h9Z+1BB0=WubZNc^BAKg{wQ{YM*sTlt11fWV5kaWSCvj6sDzr5!eM#>{)Bx35e&H^&MJl{$1w ze(4zhc1NbSCEmslglDuEYV3Ehk=H9MX?)TqKJSlft}U#@`2hzIecV>(0EFkM=CR%q zvF$eK0I&KmlcL#9Ws=C7J!t^5qy^10BJWas0z$fqtkn93#hJg_gzk?%({ zdEO$t<%ABw(pQHI;PTY+cifqN6+F(DD|Py3Uj~YdzP^o?O?38ZdULFBww9}{U;Ubk z*L&AWzpw*+u-q>rdQ1VOkR*OZ=@^P?sCU7dz%J$|hB>RzHlLZLJvq(=Py#uCSuVD% zOMQRRc}H(@u4+h}oYk~PkVy@}MkoxS%5#8Hce#Xz@|ciHA~Ec>2AGbLFpyigh=s#| z@h>r34ObTBSRp8Vn#2Vx>2X_#I|XUUFTn1Rx2Ee%#r)Q^3XdV+l!QgX!-X)CAYDDR zj7Po=SUQ}gx#k8pQappbk$&eKt{Lr#aOWz9%Mk}468NC`DVXgJG$a0=4J8wjT~Q)T zCD|*|N#n?3D1t`Aj88x{xm?n$>OZKX_ zMYB5PL6suctLpoRiTfwfCb6RZ(YIyPDe1Nu?Z`L8C59iFuAwr86Kp3Yr{b>kI>Jr* zk?FS>ib#}62HDDn->JSY0nX*J0-hCj-k$p3$Q|fNpI$BT$;jbqqMQbxfwLSea<1wc9j<%0eFF#{<9ldF ztB~(nzd9ZUs5$eYPr8wdt7GD$t#gdiX76knVl?Miy-%!}<}{m$1auZ8p*qFZGWcLH zis~S)<&K}?(jULm#GE~NF#wMVw$UiC%jl{U4!+coQ%#bU<6LIHf)w{0CK>ni0x^I5 z4|z0F`K&nfdQ+i^$q7VkYmTUAPya;V2_1K~$^IyR+HPF7spEenonvzNyH+KUW@c84 zz3Qi9>9bQ-nh{Aa9TrKxR@xhDh<9xsHbCd$#m}T(xn-n4zQ?soz~v*S=A`0Iy9xPb z5!^afwDNZQ;_A&#`W^pKtDRhz90y-RhZO%2(abtOmvYepiu2ZJ*}x)S_%hs~ZN~1= zTa5+X0Q4p2r_|YjcKDk*>ln8z|NC42!vz@g)ig|co?w;iH!y}Mx#yN`PQG?i_~qKj z3!!>4y@c$xNxKXe))@93O?qn^BX0piJrP~TX7!(i_7e3f=$tV8s70mHW%`I2zy&v~ zFJL5Kbn?zNkRrM0HEFNOx)(>ypL>u){3;|Vy890|l7t*Cu3*01KXzA(S~G9%E1jjC zQW}jiCH)>{yTqaMi9#4yxb`u?p!4iDM0Fy0j~=VO<4*Il*+N zE6$&r2E63#Z4yb0A4!l&%7xO0%>u*ho0-*jVe*!rka29n%-PHL*Dg0N4;Wz~$uu@L z@2Gb0StJ7VoFR8ElPK-m?Cm(|>Dkc*$l(vXpmKctB0u9*?D(n_nN>=Kd`xbfjE0x| zoKjSEfILk8uHHmQJ>8G&CC=*zQk(h0HG&b-xO!ZsO*1W z#@bYR;GR&tqczH0)cb#USMcMMQ+N<>hKrfJXdiVD;`<-cleJ`BhNA`aDC!6D3lRhT zk<5kSih){g7k`520CW9aP+W8R!OmA#NqDDO-4vho5Rf>)#MDPDXFL<|17m%_5nt9f58=;#MgIQ5Uzsef%^_XK&VmQ0rE zqhWTRNb;t~Z{y2Bc%b`44;->NAh27+&_t!lL@MTC3k->wM4T3Hjw#ZXFjk9VK-NGp z|Ki!rE883!UbOC!)G;Quv2s7!NE7O&yC-R%`L_`3+5b={yMG+Nv*r7438A+H!m#R3 z?z3GN$zS6J1CV~q%fq+4^B*&j?F~Vb4?pMXf)n=U?j`dz2t0qe)>io8T;-J2Wy&+2}<`3kll$`#=e}COq zir>8kG3~o017&cScQ%sUX&)&k%71$FB2~e5KA#_Y@}U*+0J4M}5ZrInX)*KL7|Fc{ z@N`9WsGifX*vN7eb1sDb6z^*$cue7wuo~X7ia1^|=N6{0=ypI%DBfQRQb~=coXU-^03b*eaSK?B$NOESGmwYupBO-%(3$Qs7EPr)Xsf~VCnJrSzNtm3G z8|$55JCbK189|>5mDAA&Sbq_H+%>!!tHIWrz0*-7fbhnUfignq6-)qLyI{BUGtpi& z4n_E4lyYUR)hO0@)yTlu5=IF*>1OTmr~Hw*^l*=N&g?YqI2H{{DV2W}?kh9BB6bje z%s^B#Td5_0)7qQP7&hVf6*X5;d70p}Oytmd+g; z>$;&BP8!PM|5lWsP3MrH%QPag(bxJ;RXi~Y9LfG)r3pt(WaN|*TFF4!7kK-OV4I_a zu2zcLo$lJ7(`jgRKnO2-w1-;c@vLc}{+*PZ^=Mm@m& zl()FSB(ZPMXKb1-2>Z~6WNjXB!!-H z(>qV!0DK7pKrq(NpB>P!u3>b`Yt%vT$mA3u9=`4qpC+N@q`Bqg$gDU9|6GpJkUeGK zT6j-~ZyE=X9cbuOv`>>lBL~lWePu>-uH-ML<1Vz#hSE~^orJsC5vJ}Bi=JbxFw zsnGw!d0>NGsBDsf*T+%}>!U*0yb84XGz5mbt4tsnwJ$1YF2ZLLTNlUMHns+)+XGUByChhWI`)$6@!p73*l9SVk2kcA zk|?E>@SS1wOGv`MJtyE!qBZPP6(RDT%n=qE=AtJ)=KxlaCq8_y^Q>{UE0okV%HFnL zSQkSa<-cKBCk|7ESD+FXY!iu-XRMK+LuC2bU3Z4}6K==Nn z`Jwj!o)c3Nldn*KeF>lClGDiODW7a#c=Y1FWFVZNndlyLpZ1YMeg@>=a=;`jg+qn| zLLd5~w_8Jd%3fhYb%HBIzu9cLqRV-uHt$fJSZGeu$kJ#L^LDevVz-nu@2PKrEI%<* z<#5CaQP#nNqMKkIgdtg!X@vigR`^Z20nD;ii@fEbI-K+K3Bo*Pt+lij@d0#=GtqL z;Xx`P_&zuQ*Z%*`>Ng-Qa`j*}xMx&}mHZxr)ikQtN+QXpocvkE{TCPG0eA5Kt<+V) zQIwotsi=o5fR^kla&f8pkF5kWdZxicbl1}^Qa5c^_^#&gs2OhNY3=6=yY0t5xaP%T zt<idZTbf=f8e+I90#1$jl`CqO2G@kI?|6_3232wz`#Zj$ zwPqXRFM6cRo!}Khh+&9q3wP(J3#EOT*XcdMP7vb15*MiqTn3>-o?pq|rcSB+pt7I| z2^&(ZQf>F$UV8OOrcw6%SD|3AT3o*UZLeq1yV;0yMyIyNWfj_HF70<^gX#_xIC8(zCfkmi!t?As6`}!YZd!g z0t_$i80C=_24bo|<+LxkJ;<0=O+iYxw4h>E84onXjovAYRi147l z6u7>X*+DXfM>48GFopNFNwIkQG?lo@WEnM}QHzoHrs`_!5oYa9JelTi^BrF>pa$l~ z(xSpl#f=by#B^h!*K_nR7wyE?9KZ%H6*_vIl)D-(t+(*tQ_XMOf5=KjVqn;ck+92- zk0^(a!$i@f1Iz>SLt#Xi!C^9?@aS3k;(EWWB!}CCi0h=1q3WuL`w&$OUUwYIYLE1= zkA;ukL*gg4I>LT){r0qd(J#0v91rFK_kFNDNQXi8N>V`M?H%4xp~e6sE+t!oO_Y{Y z?V7_g79HQ(uMeVaqwUTQ*=xRfQuYMgA;_RDf^Li)*pP+7g4=>5pe1?8I*w_UBlI!) zx{K5TJnbb^lvMCpbmqb1o@@J$FA?q=TX!^TH{lQo9Cy#Dv|8!1H8OCPs?> zxB=g5A`1!LJk}!yiPa_T(AUxtU`Y}5QQRPUSP^{mF>O}?-4M!BH9lr$3lLYXB}fKd zYTTx2BH;xu(_|-46TWelk$8wQM;(CUg7AwZR`E53^sp;Mc|zbc(Jdls%vYI@!IH{M zVCE|U77(d~cvA#yWv*NJnQOM?uG%ZEnkui^{Ex9wV}|Od&IOfMszRWYQbqtTr)R@) zPH5a_6ZixLjm{N6tYq@5w#(>%u*q|V`7+nGp$=qbYa38xqi^lw#VY+PyDKfhf2MX^ zLS`U4%T5z_Cde=O<~DyNc8N`}o&U>aFb=Jh@mD;wCbI!qAEcl6wS$%(<>}=8(R!7aUTF|z*!k4zOGG5U4C_FogV5zxHf~TnWF0#D- zLvvmrf^7Zs9FKq)c~^K>9Zm6f?@i4Q)CjDo;k?rdu)FeGvHb9eixR(9#kuy3bJ+(Q zc902JGlJA8-?|CuM+pr?m)~uMXaNkZ@$iOcSB0joD#vcqhU#zr5ymusl%`5|Hlto3 z23?wP^;UyS#@{-s=A=GH{@c)$zOv7+L2NXf@n=h@O}}>?SZi80ZFj#O=wn456PXPL zYtOEuHODkFZL+ZSiIyQ+<(1cH1M6yS!cAby zxaV^&Q3>mV*|aevNP?>8=oL&G7s->m#xC<@ASA59(3KGwfrt0wtDDeY|NE`qRqs}k%ZDtq`wo;zyU5Vpb3I2cBp>fI03no&Y#>YRu#_)^$as45k9T*(E=tBl@@ ztdkSN%7X3Xo|`4z*B2{`Z$F(FYpC(@oR|TL*uxf+?lhkvvd#$ zx*~r{AV6M8#b7@YeMV1xiQHx@h#wc*49#LgWl_HK1F!QNWkd5q!ZtHM1Gyj@rgT~z z>7t?AqIxx<^%-`vgjoe6Jy#g&X4D$E#Mp`)RM@mXH-E15-iMYM= zCtj|1gp3YM7@ovNqYnb8#!iwV@a$5^P|!$HpQ2&IfkFU>i3u_U8`1dWEgP2tida+e zG0&Kwe*}j|RBYt_NA*!?U47Muzi3Vcvb85j6PhFto%nShK!Mt@|O-06T>LM2Xe^TH`qHvN7$^Vy;@ zY`Ia(69kg_o9aF-q?wnkshOoIj3^-d$+7=&Q&T;Gf7(cgMTV`OtAkVW>z&xlz#^ti zFf;0Qo1k$GrBndY%|$>Fk1A~aCoIre8C~#eKxa?x{;bVIbPw6+=;hxrHH**lt=mm) zGmhbN`rM60+JQCWzRK#SeO;mq=ZM!0sp*mttz0ri2N(ODPFv201|inm zP;{gYlD{iFec)(0AJhlSraVH|uPsNiiWvu@$S_B>rt(KWSe3<>7;2AJGO;+LL)7 zC~&df3hKI#cwRcKF*F+l8X-GSp#sHWkQQ2dT7lB65V?a*nd(W(eGagj{8UY1kw^&V zl=!|wTWD}W7CRsi{$av$u4tc zY{FXtk}a58D1g3I56nh}e@V|Xwd`BXdC2AvCzXd$0T`EOFrzF@-6wz3SZ{@;&=zo8yn$H=ik`?B#SSChX1FU3d7HPa*;171}zhL*ac@ zPAG+QJ=fo)9C1@_g9_P)kqwur_8e^&Fr807G#(D8K6;Gs7Mg~i?&n=~WIpG)07wC@ zuV;05jjC+Fabf*)l8-qOmvUE?L+>B3XOln`t>Zoh?pVD=Djx~#cS96xH*(xXHWlmo?WU%Y?`Yl}l;N_*O%J|{CB@1&se2=&2hUNw%%8FjC{aMLl3;>+ z)W8&}k7jL$Peh14hg&8?HEOD@PydE_Ku%fssF^b9l02FuQ?o?s!qRjo`lO8Y@KJ)| zb;MBk)CqeH$(fb_{%9VO=3 zwvlTm{2;a_h^T%StHI5KuT&81bCmEZz5)C~z-;4-j~d=BI+yDZ*2#uh7l*22AjT*& zh;&d~uRr(?TQykT-jeY7CjyWu9skh}ATWK*=D7ym4R473mm6y`nGLwy>5SNMWB2&xD5qABV=RPJA{-P={1}gdK2%J~m zfc?8yA|pxzmH%#9c?GZEDJ%{ismus$I_8_9GB4qkro`2x~$UhKY72{14pjaKal&H9!<9JPQTM|bQP0z z{VPVpa&|d)r%JqUsVL^bT8!;F@+QIoi-zvY21Tp%xaIkoRu;T?inDHBzp|lmPvF40 z=zLo;IOs=~CV&53>PKXE|HrrB+N_#xFjRMA8(0W!GY-EbF&zAnE+?{n&()BRw^&Lv zX)aasY>ykx$Ta1ppXbz|<>-{5tCKIoj;nu|f!cGQR8j*S~^`V7EBGGDeHn+LZ@8+RZ6a%cGR2Z3a`Y$ZdtZ zmbwZonwy42aVoLzQ>u0(2bb_{BH+k1MfpjBZ{E@chwl?7BWS!wIfB}xOdm@1QMw-~ zlyiofpzHV1MzPg%>+T$&R+Yq!95K=6WsB3{Fvf0r`GnnzM{uV%$LBD|f88w7(1 z9oe(@`*@Z;auHQ;vl4MIEd&h+Trwj58M1go7(0I~EqnlBw8N#559dvt` zOg4H(xPaVUg#?EsRz2>W$_qv9Bc0L9K`$ODFzn))cQA+{)?S+?j^Q)Ux%=hbiJ_ zAil?Q%aVF_@uh966~g-9+rg*oLDURZ>j!T*Jb0^56HOZ9ZUYL6#1!^|)hGo#ZgP*I zsD7@xzKW!uN?d-`Ey<=AQ5DQ1bwDm4BN01wnJq2)z+92*axL3L<67Dg{X8^lFbU*hX#d@?8?S+kFVk2-FRnF_7Rn(d8{NVabDs@FP>i%7P(RlRry zOHA?pkOWYn`h6ljUDwlG*G3ap8JPTR5jO@=jcT$1rOpgRLzaH%j+U*o!0IWLY?kelyQHl>WGRIDZsYvZ#2r5`BZiTbaoUxRC8Q)ox}-a#q(Qo*;XzWmLAn%>?ixC!>vxUM^L*d?e*gbktThTa&VAq4 zb-vU{D&alR4elqYV|wrls}VJ)VJfNj=RXpv6jV)qy)>!xQ#oTwyx zN;gEk()F(24fWHRA~bKnRh1S@&!5SVc5l>?qIXcf;b8km6Ob# zZn52`^kf4a)z^d(fv-XEy|0St3yzO21O=w-|J}_FvXTuUBQ(dAZDV*CJ*8wvEs{aI zg*Ehr@dg|W|3CyO;@&NW1R^f+4Eiz(uzf#oP%{4*u9ERZ!EFEze~+XL^QSUfF~s7C zCfTI51N!P5cg#{g^CorwP`#(v=GPbMsO>cZ9x z8+{(VHNlGew$X7#&9uQb^XjS%6V1iFZ(%q3AcTx#Iqj+4$QyF`uP)MO&4?#FM$R`$ z4^}4TuFI`fp4FBst`fJ+Q^ePFPlg#beWshmylXdF`&gURYe;8zG^_MWGI>2&VG)+{+?{2P z)*XyA^%Vuoh81~D$;Xr0%_y|Fk?`|+%zw*Eoc~{WWkuT=>7gMzIV0Hn^&3&Vy}+2o}ZsqE2y5Q4zi;X zL4w^qE@J{9-5xKo0`rI7`o2&kfByLM+%9drk#t^sy%NL*E&6+$x<&{b# zJLyP*M(GsuVfkfX+AH@P#^d{|%nqCHQ_ zOcs>8cXWMK2%fLiR7t(?*>>l6r(&jzBoiaGhva~!=&ORKd@bW1y53;klgN~{a!hr;?3NCk3~XIBYj z6FF-@sTZRhu;U%}=4Oo=BF{xFpH=>#y}DwUi`|)j8jZ=rn<#Lr*g_WDI zsfRMgSS&Hu?r>kF_SkQ?-14IBNZOgz5R(Ub>S`|>3x<)BW``_~>ZmK9U@7TCxDX{) z?o#~|D(DGXVMAslm1xy;vHrJBnylw;gRG)oq?7rwo@sNP`0jbS?s^c$!t@9{k>j3I zq4BaiOxG(*zfD)SH#iS1{fh0@zRT3wce=gvI?;aH^n$W+y6?*{cNvvP<2z~L>?8|I zeOB+N0Vt54enB$s*nM7pgDFebRjHG|rvwg|=92Q)nmSs(JsR0qH9zivr4(a zQ5p-$Xz~Db*^hQo3~S~8asdoLgWrxyL^C{I&r?(8Yz0O5<8I6_HzSLN*zC}=&wr*Y z!z3W2+!l<0q08p^s$2+J4I@uaV7Q?jhbMu`McR381bp*9`oxiw9QrLNfXbtqz{_I$ z?qgqmWq@C(R=Bii;Ya#Kq7UoLZ*uz|bQ^K>^t^618N{zGT6loLot&dnY{JYmR@emp zD5M^l-F(?a&5^gttT^WOgzxZ_qUUu(DT|Avchp-3xvA*=$V;QghU~Oo#TUd@v_6IT z2g;`_Ur;lBuy!bzxz)DRvdE43+2pkocdvV)BE0?dOc;|^Cr+NDi@F@oYKr=_pWLkK zf2Spnl2!^-pXZhYKS+`bAk$oyYO_wHv8UX60>FW&faY*+hrcZ88^+7v%jp}10(O@~ z*55<1I36{yfI}{i#5%r-jUmRF@(HM7Qkz$GpA$z=ApQ87%d{ucA zQPA*nXO#FIHe(5yrPk!YfUy={;8r^RZt?)<_JdqgMbQfqG=?L*&-*gfHqgiDg03gsmuHB;ZIL%CnEQ~u4~jNv~P|37w^3o?v!k!oG8_4 zDm?ecLZQNMNgB9s{mrTYPM1C?y?Hxh9u)JddBO-`7z!63v>4X!VLUqB+lz?TyL4x9 zFu{pt8NX5IxrcQlm(st*-^@1;do=Ar6oa)Fxi9@C-7b52ay&EdvMX|5w@YYu>T;!5 z>+{OmX?@4GU0*C;d5a2ZDXl=|u$Sk7Uxs*H!zh=k+tf6|BVL(c&e3tDL{KbJGnKcm5LQX~iH$tZz~i8kf8zq^N&pjmtE-C@S9j*SWZN zIaN`-k<;#6jiD)_Fe2QHHO2Z znM&W8-m4!-Aj1UCcMIK+s?bQg8BSNR!SBVw5RzX!o1-EYCT znZijnB>{yD{-!-u>iwr~tDn{7zZzuI+OKp|L$I?Q?@6|$3)pCMqvfq#TBbKX?$f>a zVMHFYs+h|zd40S`xy!WbS{Y~XWUJ@PSFgU&H`CG|=XQ*)^F|Z~kUyn!A;Y>5Znoc~ zyY(8Yr6lkK;3l?3)YOeTTH8BmA`4}O!iH(OHMYTqlH_f@=7H`dLt%l#=z=}P zS-bMHp)^5wbK_}kOpgZ@NS?f|0W2<7k)s!f3O$-l&hmO7ZptSezmrVZSYpW<&ap6z97_mfBsjpp{xPzhvp}RgYoZDL%PGCNUcN- zsxy4!pB0kD&uCEi^zez%aJJ0bg1sw%jy@`nD0-_){3{a=6)mw_tWdAHiBD-%g3(Eg zo-c7fDgf)0e`OMDtRsoboZhGfj+g=_1u^8eJJs_^c6XRsYfhulRm%)sn2s0g$RPyea%TI;O>phXAunc$H%}fX(O=A&~`T+`f8(beSpgnN**rlW%D3v^T_@adf z$u9Mkd&Gc_Cg}P*5+EAWMoJewR<&Su0UN8?`CzFO=LjhL#skzx4#-z2RMC^L7R;i_ zoN+QJ^<+N1Mz^b0Z4Othd8<%HYuXd8sNfv9-GOG_md@v?+WDYyUOFb-M%taDk|ECP z`{*Yr&&Ey@gwfAy5KIPddPZYdFSYO07Vmfvkv)bQm!IvO+vbb2V3uIrb{yl$`qUS8 zD|qlH?I=+SQ%7zYbxg~j%?g*qX@-Y34s7!5gr~CkKoiNZuFt^mKIS(4;`vP9M6>xt zCe^L3*CLctxc}V__$OIjx9}L|Y7U41am+Pk@<0ukd}{E>+c*N9qpr)}O7a9k#miT} zEO%DUn#5Jz?DXJ6d%woKmhvGD`#xC1R2t#~{AfnjVzJm{yg!Oob?+2v+yXWKC&0HP zQ)%qpq?R3js~A?9)L%n9B>U4m6me@q+A)#EAKtv6!tnXbte#Y)?Q3wR z_vAS&V~*P&urV<+?a_h-Hl5}@#_{1E7r%oi^fk;HoPfi5?uQ3HZ9w9SM;c#4rAVU} zy4{z|Q=;-dU#%co`MKzyXz+J-$b5v_dzFz1P@kInSu=#QQW8=n;AuzsiU2XxkB5d} zem@dS-b?nDejrov1r@W3U54riTNHeEy1Hb0s!|T9v@W@9@_Kix-frZj3s^5#f1!)B zxyq4=kqoC4wxVTtSs@DG-0By1XYBuqQp&SBj}kuZe(hIW1U)*X>*Kv@!#2#96ReG8PJMF6+>xUr+X%qx~OD8L3O=}^c`h={M9=f>2$c92C@L2XEzV- zH$6@=dKx_+l~ckf<+NwtS@V-m_l@DodgAgSk0i;4*UGpGHr1Up3!CgEYLeNLK6ALy zKwu0la^&s0uFcM-Z(jA_m|-wx@zhQb6z|rl&((4H&)!bwva)ea%Q=}oCgK-Gn=E0b zb@}Ut=rj4a?c5^7(7PRUaT1THvN)B{1pgT{^R%MW8{oN%t#2b!_sp)pb#u?9xmOcG zX^erCZ|VE?4U{}6qN+7EZ{c6$EhWf~b+>B~X@p~%qmqo*iyhH$!Zcga?=OXxy2EGb z;Z16J^au3IZ}{zgE0FPBeBHw9%bEr1A9gd93NujrSg7a;c(Ag7nXx=4@?l=QFCdR%Bh;%{k5tw^WNK2$l)rSq$8{%1R@~u0aL=$kk{4L^SWR}EVjGR+=+=C zkgQ%_z{1P6G$>*SB+S6-Ao)C?#ncD4GAh3vEf^Ch;l zUE|?N^ioc{ToTOLu5SaSs~uaSa&N&b;JL~9^Oy6D-F}s{pE$cZW-$`zUr9AEo~O2; zp<&1UcAnR7_3XaL3%@Fxy$$&2f6C!FRYcC-G0G?0ol22F;L$yW5`( zZAOxJhAw%Fm*szS75bznFH4tpcMO)X%+>kCZjAN+uCU#f*Zg@(h?#fC?xbIn5Ig$% zf=2bQKr$NfP^w&afCz1L%VWP4GYp#*B5|;E``I+;E4(*KL^*@EEorwS-$6m)EIT&K zDZ+ebzXz8@SQ0b{4Pu359n7}nY<-%hMzt8j(phMe$H}5!&knJoE~4MD%z{G1QJ7?m zh6Atxg<;m|3u!2ztOJG${uHDgfWxXli28omOKEn)L0||GH6|6VYu=WZ&|XNW0_+ZoamU%(C1YuU zBUOVmrlZ9f7TONV6mCuVh@Ed=Xz(AQ0duTqz{^fDmo<;usLcfXA~+0X6C0Qh#3JQW zLh(VLgAf1xj~12P-naoqR?RH`F7W&P>Awn1WeQe!pVldXdl94vffgq7+2>+Y3TXo) z1=7@*A1X*#n+6{PBK^$wFJFIaP&W{tWa#7?A!V?7q<=w>Q%P-%-+x zfebFy#cY-JywFi-AHujdHVXJRX|s9wqIT1-*4B7lUl9Fn#c-q~Wr-v+iP(J!t_}@G zm2cG7%T!lBLi)LEkOI- zzpjPF0tn#z{+3l?GIW>L{b%M2V~|SIs)Ci^v^vNE*mH17TdJtF`a=y6DHNenNP3o= z-ThKYkWQK##=hB+=CVdYxFRtb0oVX;lM<0x41}J*(>ZNQ${_3HlqK$xf|=3WbCPw( z^}(m_)SC&J_^8EL#RVqm9Id~p?7Hz-sx)Vajzh*iQ5&95I+yutrRi)4!A*yhSJd=wqnM{&ol=^6ZB3qwIs66HQ%GkxIWud+ zzAyD6rQ3E{P14ri47f9~g%IXqww$MT?=w=x#;73UhZkV~h@BYVH@0@`0s*RAp%o1( zEGs|Mb-z6OWq54Jp@yn*owqs3*w zdGeuykH|9rC_0-jx){3DLPYSBj1~-`XV!B9w7LE0$3FU(`nc|RUk0_WBKKQkNv={K zOliTSbs^4_QVRDR>X5P?E|^(ve?lacFMdl2(CC#FZ~JRlA3~nMND}gIX*ch>^@NbG z!E}UvNOi9(f-;^(vm81|r90PV8j9x|EI)*l@bSI%^LRH+z$h{%WO;R^yhqW~pg%p) zHUO^v%tUS9GJMgx? zUiU=71GAOV=eoO&?l28K*Dh3JTCT|y5}?f>$_fyWg^z0XrU}-^D})|}R<)+Mq&=4o zki5OAk|~d8B&a~-?cK`kntFaN(kfS)Y6eOt9Awc(lXT-);Ra?^f$c&!XNDWI_0_h$ zz;xy*S^Ww>m@JlQQSK!ZAJNV z0d>g!>>n{_E+XZLi#j*WglKh8&Qm1+_yV~oL5jMCWjwqBA}Kgnx83jv#-}0PtVqQ* z^muN?yO250OUn%~h-NWS+RE$DK%7k=fiFB7jhBZ*AutiC#-Nn;%>TJR;q5f#O;viT zkjIq!;H`Tcpv=Xn^v-*FhTLbO0kgYUbvTc~CwUC)R$A})QqT+#9JhKpc?Dq4g3Um& z<>=#~>o{qQHfeHsa42jy0wjz$r)#FCoBcd6CLMv$b;iz4!pyYKIiQCdoGkA0PwYho zFUb73Gk?^4dD=2GtsEo&Fi#%V>#Q_(!6o!-igP8$+w)LmXj0L zvYmE^>j{sM=4-)y&eO`3o7r%teb)-h8IQUWQ75-ce#;#$Q{jr~iCfe5%8vQnDxc|& zF?yigWe0RV?iDLdGZ>nXV>NizhSxkj^SK|$hH$#%yFF@m892YIIXM5j!z47=u&3_* zIMr#TC>y>dTZ$6z7qoHKCIlykFjB0kQ?v!|w%)5IU{Y!Uabi7o;I>;UrR#O=padk4 zN*dp7s$0+2{VGD}hRVCq+5!`g%^a8V>PaEPsD6NSuN14Pf=Y&_mN`D!g!GyPkot0p zG=7(SaK5fHqRhgjGQ}@VH2Jdmi~hrXaK`1)WNBb=eU&A>F67{bEK>xbD?Kq%N!*r? z0sdR7G7CBp;Q3SKQ0YIoEB2Wa%B2KK+g_4LSHuCxaZocYTFPHQ_vskz3+LZE6$KUT z@*#_c{uOin4(OwK;scBdPiu->2A_&~`y7$ZbJ&T`tQ*QQ1aQA(7I~x4c1vkivG3w) zngjyH^VD-QlSf8z#afk*>66p;wj%~|BepoZT{tAgjpqYtQcUq=FnusoOs3HZhOFAg zZ*Z&Pij3X+?`7e6;z_P(aZFwdPnQaQ|H3sLpN#Jilje?V8pPsA#YY;#GLXt&eFZ*1 zZeu(p8qrvZo|mZi$K)ah!nhRs6tCSOG(XPms*aEcoUJ%M9_a88(|XtA%_B5{7N*FA zfPDXKhnOiJ%kBBmN?=f#=)->>YtRYI1APX$Gr`e{W}brKk_532oQuiKz3`jxyLX#> zkKMMuR`-Glo56d}q1e3oUZKKOM~8J=#hl~sEKqTc-rJL*4Y{4pSEcJC8aq&<$%m)I zaxOKd?EQwQ`eywb+-AK?*T|N%7VPEpM}*IGK;^bZ3YJs5$(4=Zk8lqdw4KZHmf>aU zk>B2|c`)G5uGG;@Sn>;1qf%gLDp8|iQ7zHUeL7I)x0qmqf8MAh!$P2+hA_R%b)I}7 z@on;VhlS?JVW8dN0<88hJlW{5NlUeQ^3~y%uUF2_aT&IdlEijvOO(?7kGCa`>~C?&Y)51%xML#nd*~LplEwnl9l3NNk8Hr9{3%SG>hlK;4E9G^ zt(MO4_Q%T|i;=GXX{E}D<&; zPA03Ez1zSTwFkd!SKggf^9G=kl_cg1j%CK%dJex{9ggdHbtJv8CNCIc4{0Dzyx1MO z!?`jGVc)@{5Lo9sq7eIhM6VVgfM~kLeSos*GCj2Iu;}<>YJL`XOJC9c5f$oz+(N+g zTcu_nk_}#?R%*7983mes{>w^|>0Ob#Zo2y0U;(3{Ir+7`Qzxo1dhr z6tUw|KJN0Q+&(fduI-GCy^C#Nk9mxKIe0C2DlePyeZ08gu=gmoY^IR^B(Y=GY2kDx z#-#~2XOM6jA;4VY$LNQy6>xWq1+d0laIqe~Cv^^oq{#t4lUU%|r>A`KxY)!f?Ni@p zW+Fo3ZQSk3p#`6ulI?kcWLvy7=YP=v5i_X_aL!mGSp@M>W=DDbD(gO~tXSwHp27SP zTOp~6j*}vaH%k%mE2;XHXz2q6TY(5prypjeEo0wrSoY63*`k6>Y`0D;x&(_&*7b*e z>84dSS_I+^UC7>?ka{;lq@3?Q2@anKl>}p; zMaEIw4fU9~as70lW>etz@1mq<-dmoLRn2cS?Ji31ftD)$qNqQ&4cE@tj&j zOXppJ=#mAhGVXSErp%&}_~5=JThm|lJo~^bCz4;$$loWh?=-casC7sU=;WB^>sb-< zCsz;DwHHmPx_=UuK2S*9-whV+6rOgDTA96Q+#XaFOA=;O9gZqc2i$c3WdGaU`n^zj z(!qOUQzA=j#C*ib;*TTymr24Fa`k=eeR_qg3?dx3*;)o~1{{V2e_neq60ldP^6&1| z42k7q#I0bBS!?^#%@ck=S8SDC(8!a?-0I4^kgW`VpCEt9bL^d^ zO?(#?_P5r_?uw-AGCJD{j8)RXEx;>a7~8L;1PK!~-yfnv`@v%1lVo~g#*leOsS?@% zYJOmsO1$WGU*ww9cpWNcRIuw_epdlG&nbXoR*-KQ`}txX@Q$ zL9Rzt40&AhSyTMj(4+3;to9Y_9lxyE8@(pOxz}I%$)D|5nRuBfHZU(YhMt&;Qwi#wq|mdJ4c)$| zeSL+oWl)D?S-0H^FA-5*zpyG!l37!jpfbKa)7TuMe2N@lpE_S}X{5%tB1ltZ;T>|e z&HEnUvFJp&(~ytZHwEn{D>HlEPNc4fs;1Rb4RHhrVuNy~l?oA+6 z5!&6R4DqDGqTIChtxJf1!^Dtarl_5M(1`Qw9_^?fY_F`6S{fVsPO8c7LY2OvKy}vO zbL?5gFlF4(38B37mZF3jvJb6I)gIqZz1CpsMxT_{>KZA~$emk#neLEVrMRWow{m<% zyTk8Sxma^Jx72|}KGFHS-Da-#LFEjn0><;exB!bU{rPlUhwUvMZq)~ch1>r0a-x|M zV5GETxj6Fr?_ni#LKh zXGW#Vif`uL?C=}txA+O3K&+;-)AdDvoN&g{&&mk5qA^PAiuv?i>ps|a8Z=3pwxry3 z;yL9f4Sty-4oa@-MgDMp!+R&GZnwXQ@&nA~FVW_4S_AEc_T4V$jON{DM6tO`TRn-+ z2A4b;LUvNS=wHBiVfiOpo(yF^%|5*?C)e#?G04smuLrMd>xEBG^;L3YAGAODqPYB7 zFU0aEeEsaNk|qv-6en{ZHwtZH6g)ygv!@3U-IEcZxh=4-5(65(tN~fIpq3LGBZ2|? z15(Kvxh7FyIq4oJi7u%A8m9(KKK^^ZTx->u86sHDZzx@=ykuBM#<-bKd~9O&qp+hq ze4I??G3*68=yj~8r@48dXw#T4$FQzg_N{d%Y>hf{k_gv>?yc2yCvxU7nG-&0-TDa0 zT_G4`EGgvknPhb2PvPa2WOJ$tPM_DbkNV;ChKh-De4bJLiDvot4~ukiG#dQ0XX-^7 zFtwAp9p{j(Q~mI7H7PcZk5a%&rRFec&)mF2i!aQt8#J-R0G1;cLtDa>f zqc~!6lz-*l;$wrS?V(|PpbN@6d^Re|j27%G(&&0@1Y8z#XLJmRPaVgkdfy8H(YWBB zIVVve$j9`6eEWUkCa{Nn4B)vKpiTXV?-A=s7p0dpP?Lagq!kr@53)Hd*8^DfpYCU* zGgRQH-~}bK)EVmV;y*OVKW~TJ69rII>0IEnBMS^vSAkabfuOzY+ks>rB`}7#tvNY* zs`%l~pPVQ_r3}=$z8K8{L{RWscs*9-DpLnK)_kr)at+Uqhaukgz&1So5X6gwrp#$V z@+WNvxW=7Hk-v})W{&8W2uLWIE0;|TEwlf=8hjX3gTNE8mD2R4nAM9dfW*M%`^xcj zEhe5{{Y>|oXP>ZQ<0`aS{~4ci(LqwvKVLY;%B!yik1=i4kVBx{+oJ_BSX4c6Pm&Fg z$n`(_{NERma%wg}Q4;5f>X`#W&hyL8K;6!0bF|`ou(4QwEk()-;Gu_YdNB6o9bR5g~(O=%1x|`@A^z?l1e^ zD{DuYA!=iTS=UwmEaRPhCgjPDX{Izcc5_st$2r~E-2Ttq|L0BreepShd3y&2?G5LA z0zPfMJOZQ9c~*zJv-JuXg-9fc5v}K=2yiw=3Zv6AGO}BI4}u*60bde=G}oj5rc^Xt z9m<)`HPi;H8MOX30RG3H!P@Ve_qqj!=N3^#niZ~-^3(s^S_ynKfo{bFHch5u><(%ufC`av4YE%ehrDt8j4|vkWUlfQ|UZ7=%SPZ4p#%D zQfN_0Gz*yEv0P{5SjIq5A9ew2L5(;TO`35Xr_?-;V%xw0Cb$L|F7yFE zXt;CS^JmCw!htGR5&SZ`FGSET2|DnsjlZL*7nyPj{;Nr(53qL7E+K?Vxr2!;w-0TKhrgo&0ih3o5aj?n(SITHI~#$QuFL8Jm$gKbY~Wt_e*+y5`N#o; zv2tK8%%oYKo%gmxqf89R4FTYLNq@SqUUA*#W2I?R_}@$3r(1NPtc!y4IH?H~-)b+~ zysic%d-T>-&#i_PH?E_+2!!myGs2Nd8vmas0Y-DqfS&Ja<+v`VW*T4+>nt$ce6Ie2ss~>rByG4Oc2XJ}evtR3+KQeqO@!{h z@qY=$IY4(P*Kb8*dQ}*k=kZjpNgMP$R3@9fn)}lPr;T19r`eIqTY?A;fctSsZ;N*W zNvoPzIlz%R*HfT!CF>VPQ;lTtlT=O5gz}ejXW81{6vex~{-5oUtp#EH>-eV>We>21 zdHDq6`N6uRet!b{;b9-E98j^t`MEMaR;***852%%K-3?b386jf`{`Qqg|0{=$LFPKXC(GlnKq}d@Su; zc@m{%-MSA2%5rC!pPmw!eQ^Ftw_RZW?~;q8g@T|X+jPG6=p6Xi_`u^z$G890)t*>tb@q=z9LfcOFiRXKQ-v0*5WnSfHebw4dqbQdle z8VPc6?shQ4(Y768(6YD!=ta}_N=rY?^j`xP`UE69`BbbI4g0PsJW!k}QzrUIp=~?LxcSN=F9SQ4~kBMS`p@H|K~P(8OFLovyLXat9=YVQ@ETDV5A7 z-vY+0eC;w|LDBcds#sb5%=}2bxcsshu7dF;RpJS<W&1xV&?f z=;Po=%p7olQOVb$Z1n$$T+&~l`)qQ*1uNqkdhK%{1fCMhcewPcH<*CtG?T^g7`Q>q zK+A)D(}VeS5s02p%^*$Lzg!iBK3OsV`X~GFiIMuUoG4w~p(pT@I25dk{niRtT7%5$ zpPt`2ymgwf4LqmTr)3Ilaq9#`f`_$dOP^LjS;3WfKuZbaizi~oU&*;cy8Q$F878RN zLzj<#h~p)45y-gyces`Ao9-{Y`=B8Tc7zy;2My-2(1@wW;E-$|9s0`YpQ;ZO#`v#f zY58Dhqpp7rcDMO^7(G!ue*G7RtQ)sE)t6~R76#__&eMvT=Yt8v98GEndetK1Mt{e@ zrY}XnAUvhM>C6ADv}nk{PJ~~fYyitR@1(A77NAAjKN@gqp3Gy-y}~N74@iu=PWJOV z$Vc-UaDw3oaO)ZW=BINGuUa$DWk@lC$oohgt;zT9yF-_?zX}U1`LA@YH z1M{z21X~|xnvIVlB?c*{J&75Do{y3hun%0#Fk{s^#;U5LNoX1dQ z2cSMm!q=mY)0RB&0Vx1k(p1Xab-SrOQaUR9tJsdFNgg600+FCu~H8S9LXC+ z*jN>UJ7}OJbZ+E{!~J7g3d7yHa!b96CoRp`FRjmlKE9Sx!Oq~ymo;xvB__T5O~Cnt zZSej%ulZ&LG|2m(FIsvkm@YGX9;@MG%3P5QbX%8oE_Q29z(cCrI|ynHdbPn#lPI$l zc2E!OeI;eD6u2k93p$Nyn(#$E|J8ZZ4r0i5X2IKsX!tfTsYbien@<>{j;4R+3h#dB%`lf&diP?=Btm8Z4*!C8K5E>gHc?}{yq%vF zUd~+7pgMU^74=)nFKSp*ih{EccDz?e{dW1MBaIcO`*1qw5`UVfGp#M0mJk(&j|%H% zMPo>F&RkUcn9;Nx{&P^0)nEE-(Mi5KGwH#0Sl+uya#jg@p}vyDSJs5ENN9-zH5v`o zKVKikMQGd?`;A@5v0(k#S`2Lhn!jyiEP}S_IZXlCsL1Vf*R(^r_ZZONYPy}aLAH0I z`3)LItNuKpqdy}APc(WjMYWDPJ`qg=&k!@D_K%LRd-vLNuN(Kj0Ar_U%ho1C$093? z+s<@{Pkn};;(K8c_Q&`qZ`Mxcu# z?JtOQjDSq@hb8w>L)h{MVoa3qr_w8>qRloC#=hqvBY}q+4-uH%Im(a?6 z)Zw|PrC?Tbl?eEv90A3C3SD{$_tNgh>smAP%cZPy$AQS5LkHqbCwVl2MMJcgw#Zxm z_gD8@@GX01yU|K%h;&(ai(5Z?mjoA0PEf@ z--N*i4;+b?sdPLF!@7?^YO3}4R4#EvDB%bOZg>y4tVn;?@7DF@xOU+R?2EVoOQEkI zEiyf#fi3k5nx4Q;-pZ@-H`@+6gCpsz=25a`)ufhXG&xG3H(;VoN~5RU<96QHclwT& zc&B?GpL-1jfx|&hRm#y+10?&eazT$dMRef>twUkx(drQYp1`t43#q|vD5K>XWBd+) zZOf2GY%gwO&|Sa@MCpwapbBsL@&n_mv}kT9=`0AuMNWHj1dA`lzPZ1mfi<}gIO&F- z0BFt*7MX3jNdGv*HY+Ip9RN0FCtu8b=s|9M5y;^ul=OM`NjH#24TJY*T4LIS^ek5z zg*85gY?UZV-F}+Aw!2Ir15VX)MXFyjQ?}nUIm*(wHw~s-Jo(>cYfcDm2c-q@LEn|4 zM?07HX31hmcP}T5gQ+f)()>cjBwIK!wys@`tBLP=JVe2U@K}zGI8)McJb_H$o%}rb z{n38cPx7u!X8;?I+ld8Ss|wiYJk4NE2|EUX;2Y@9cVQLq-Viz{?h|F2e181OVy?kO zQqBEG4aOpo$|<0jSCBhSv|{Z-!{?-54DtPn4KokjS^C84n9j+IM%5D2=pAH6vn&2XNNSmQA*fADN9rP%`3Ek0(%yY zrQhsT(s)vSJoaYj1;cWvh?~G#t&K}LEeCx$bL^`kM@bFTwl<40+$UX=(Zx6r;^NjjvaPfQN1ax(ZZJ(hf z!fi;Yj=Xuo6`eP7;tAgQi4-+2g)q-Vp&ET!2YAHs#t4JzLkvA(5~v10Fxm;_yvjDt zlTTb4W2O<<6aCM0fqL`Bp0Fc~fYM6E0vu}`M-+H8!4%0Xj`~@;N9=Po$-nbOJ!D_L zlRtsgNo9zmy*-JQ?VP1e*#0)+3MuBw8b~)M6m{~U-k4*rfWbqaT(o=x-g?f0T z+=I^12tM0b>j~-dL=oEog^MKsL_W|k8wcG#3E*{aMJJj7v3=%c(3hZ7-F}H6hGDXw zBtM;zF;RS7A|vMsT1(ZO>z0AfuW_Zu6febT&{T0mTLb9XSFK7j#qHH?+wItwitNLc z@TK@)>zp1hMy~)YrQ>GPe0A9OY5KsSM7wr4I6=sb3f9Jp^R^XgTTVOlO&Ckxc1&3fAQqS-x<6^c> zn_(G-+AfJ)_}NS0f<5*NkR>DHu^uEk+CG&w>c3|W`SQ@Qb^Th6Z8_n^@C>{-FwEj^ z_NLUr+IzGbDV^l&%Tp&Bwk4iPVw?PhBDWNr1<6$2Br36^w3gZcI}i}6-c&=Efc#8( zW+sgrIG=?z>GL{7_IPIyhmPC&wjB7`Wsl;K9tCpVB_VOXh0!TS(vAU*enhG0<+s2i z^t~6(+hA*1DQa*+cX$IJJ5|$G_RCqb0701sl)0J9Sce|sEGV zH7M(^g#N?sb)dwV9rxln+YBg>YQ64W-B}WU9%Ab$z=Q70cK9}pu|q!A_1`7s1_|fi z<#jR~j4v%EaxK?Aco8gprZ%f#V)#3Y;-V*x^>#WH%WHmphHx?1H~pD@jRN_F@2f&h zqpx*N<#DsmnK(SjiS9ijwa{O%9g+oIvK-oLAzw45!pn*s*_MBJm`xm0HCG5ar0EVrjiRWT&(=I zu~_mOM>zC%BKTly?P;EFhA9&C%W5r&hui6;i177rVt&vSV2CC7I?(jo5L@bOWDp?y zvz27za(7QM*y;UshigZ;vDb-w`2$L2X?bbbE#5~s1Sp3&%X##}Oq>aW!|tHihd}#} zAahaI-Y&s~57@>-0cCdVo&o=*R8R%Pj%q&uzW;I{vztnIu61IKt;8uRGKJH8}`K*f+}ABlnshz5dRJaVHcksO~T(lLOeS8 z)g5jF2EFR;Uw{UVrfoJq38b4C|Jm8_!*Ft=XZnSQQ>3t@;?kh}D5Bw>%eODlg@o+ zP;n7aB)*j+DzV#G4{Ruti+yX7(;rP@|N7HG#PsjHd6tLE)f#&hmqlKX1)^}^(u~?^qqMQL z@hYNa=QH@B__CvUaQMO8Wmfm#`LF{#{%y9dQK{*X*Pu+)9z&H-_m|78@Tfu)_S!~J zbw~H69BK#Bw+IQdG`iTvxzN9Iy)XXkMFZUn{v~TQIWA6L1>PGJ+vn~^DE6pb`Q|ZP zm9vi1eC(uM5k?sNs`G@F5q=l}hIhlMOY<5*JzY7m;m?6cb1Nq7hc!2raW^dVlOmu{ zYu=oNi6unr3I3dIrZ-HoKw`6nqwrbF;Q!_r`Cdo|196MhT% z%Bo!OPU@tb_MlQax5qyR^gPdV2rbxE%VT!Q7*PCq22rFS5}_vu_ZW&ayP&|}L1Kos zqz_X;tw9z>EPj{)rD4v4H=LIbC6Z$}lFKEg2`xkQgYtv$NuJTT1vi{_>hiR4nq+se zOQ=H(I>kx%B_y-?Ob6V4uP=5gJ$D>FSq5Tx;T1kSGzp2EGz)xQsrFkvPy2{t5{b-V zl`t(C{)lC9par`F25)R&+r&|~5^tM<+2zwZa`v%w%sH18ExL4&z#cmmj8&o63`4{XZB%O)P}?0F!s2+*TT`l$mlF9)izYez*?q z{0Q9}sUa+dimwHQVi-~AKcNaDyz(8va^Ij_?Pl{?EEU5*C40)2IoC7i?Hxfa3;$pU z-7y+oupZunpDZuf0%y1SQ|5vr%|Ni^cVE(>f^XGiiya#NZF31Ckjhf$pP_=`W~bC>`S&O)vE`8Vmx3 z?J)apzKRt4?O+JKZ;FR8<=3!;;2`-@DG$4! zf07Qd!KI{GVPC_wt$J4WY~9A@*DG~CRC3#tt>Btd$rT>hLhP3kqq>jxj-)ixbz0)# zpU|Ry%x(B^Nvk~zrzW%aUWU+cnD^bg&aqu4y95>|_4%8ibyM?gY1hGjOpWsh7pv6* zx;O|jL{TH?ZPxcCcI}s;HtBwYjnNw?+NUtHjXg2oW3h#btanEFI`kg#22f>J2)W?l zHx6F<%46}4MPi{ALyzZd{aiKeThF^-0Ydu}tLqr5E6cR)uF@+g;^dHVHu`dg{ka8? zk3S;z2V8@m@1>z`>?X?1eJWs^+cVOmC&V0tbHIX-r(-g$$H_3X8Qy)=ur6vIh)U=v z&DG0AjKF8~7i^(o3dYFgKvfNmr1+G41q5WIgb??Jw5|4!_y|0IziggJN$h)_mz8de z*O)9!)1W7KFIPSWCH;*x-SOYm?SB*v1~3tC@lx6yPJ~()*NH7wq}UlVQ;y4wtA~r% zLjf>_AiGL|-p5-yE>5Z3Jm z(NT5a6za82<7%Gll1zY$J%Tkn%cAg?|8DNTmfv{>L-j)#z~3FUC(g~e%i0c-GrUCk zIhei1+9Q3SNTq#NbSW`zBXqeMucv!Opt2^BjESR?dVavv51WUH!+y#9+QY%vMnv{f zM#a5%q3N;>g}&K8^I`0FAN2&(pVUaoj^Wn+ylRFEVNHm`|D)@xqoUlxy#Y}M0~tU% z2MJL?K)Q7ZQ9w#*kdW@~E-4YDOF#i>knTp1LAqNQx;wu;c3P?%yo*2CvB#o@zj%AYB84fk#dh{GrCXDwL~u5e%_R1z9m)@FU}vTi|5OC z1hJsW-_NHi?{_BBJ$$AH{}sTY5LGG@`(fET>P+wK!+&r1nYI9r+YlzlbuqS@A6u_l z(t|w+6FF)B0aXIG91SA5x-&x_nqdZRO{m+vR;wF}H82?cx%^^1Pp(9R?*zDtS+xSv z+=Ktdq5c7+e$$}07-JUu+!b;R-CLFmVApAZFd*daBCRsVB)`ZI@Or}}Vx!-IlS+ne zXEL)0z}}}C_xW`m_YMZy!qFo!1c{4eT3FH^G|kc{n9gC^!)LS!Sm*8!zv4da4LpUo zYc|LZ347G>Myx|VTip&o!<0Q}v?6TaMPq@}#w}TarKkx(1ht>=ZWTasMUT=c>De0TI**?AxEbnO>N*$1ATC5nL-7+A!Rw9oPETI)o!l_J4ljZ#!{4Brj(tdIj_>Dl zjqrEgOWhZ7-0(9nx?|Kb)>9Y-#bB*J@6dScDxw>d6JI4^G-jBc;!EXC%v~VgS@3( zM>=;JKFv~gs2Oi^7-n97@}lqLn2%sz?W*Ja`^Y z!}O#+omf_w3B-t;`Tdd@)0!!nTy?mM8CVDfX6F1hmck1g4de1LHY#&?LEOM)r{Cij^JG>y+1eD zv6!c=c?NCuLbTfZo?Ip~OvfE-T}l}br7^q8y_0HIM3F`2*;`$So|0-UV~bDU2*Mz; z-%j4#T{ALswkri)l*$*?FBaOr`zEqj|Hqw(>xcG~5b8BsVwJ9*!w&m!3o}rY!I>l? zc4@vs{61@TIn*Jj4so6KoXe;Ly5DC6Nr!jQHtZ798VkkJ*XegfZ!iw(J!t746mEF| zi@|n&F~I6;M$VY_E?xSKaOB_%Om0;BB8!0*O@d;qURZ*B)Yj?@Tx~I(pESD&et= z=OzRH`u9kFGIBYte5)JW-b60nGo)F={T(Hl%wXeK8j3~pCqipjmMSSG!)QF+cNl(erkE! zia@uZQ2vV)igp!yplM73+gZMagO)M$Kf5MfT#H5k#q|Zg57E)|MJ{?VcA_rD%9mI7 zqzI;UAX?ZP*X{a?lU0qIHH>YcoDU}OzQhDuyRk$a8`@!>A*s5{gQ$L%qxW2le^X*5 zhfeKku*)H4@c%2jR=N&2*6;13F+tnLVN`(y5xlEP@3;!hoG7>#E*5$(#@4bsf zL)u%EPujCQoZOmi)-4w@{6&2V%(D|D{eXeBT3faAf_!+NxbL$R=Gi$-t}+fd!)m}$ zU6;g!;m;7?3~N0)+wGy%Qjix7`$}yXbuJaO^*~fLpD6;OmH(CB<3?BIM!4`rh23%= zoqQ%#3nh(U7@L{ho4&l@?z3aMe^uds;K!7NN_^w#tN6qKb)QJsHErVGHQQ;wm3tXT zk#};Va@DUcKSv*wiKcy??PePlKVA=1e14&zrP=CeHjuw++_(Sls{wwPtwjvK&F5r5 z@gig9e@~T~yrKOjm>=;qZpi}Eb>(A_h&l!kw#4xF&HLiZ6armWRXgScDB^7FfmyUI zPiCQu4W0ZwkYnisic<>vI@J?r05^{St#-smKBec!fXb;m`)Q{c`u(Zj&5bjfi)C>( z)+Q?Zk+%nc03HSR8Sw)9C+HvIfd&KIk>6lVHaLm|x{;|WCGS`kH0s6}HqVTyLN5mE zk}H0hJLTpLW+^?;3c#zJ4&0{F;Q1AeNHm~Gspuu$n4sS&^^<=^R_ezdNgD9)LyTRX zlF6Z-#s*kfJ5PC{)OEW>D`fVhm-=m1BKSN*efNCiuYP^X8{^+3;=SS zs5xGh#r{K&64XsHEv=%*jWdog9?DLPJbx*Xi96qwj&}Ej*}zv_v22o-h}AyEVd+<+ zSW6e28po^5Y@8HXAsfd|htQ{s7AvR%Xz}C;_{0B!FrZmrs^+9sJB584i2>Z$2C(2P z(%s;{4FD?psJ~k$!~bT+FBGuZNM&w~C)n=9;20WF=hq(Oeu}I}E@!C$ER*!tB3JM|*+dx~taIrTm$1-AW8E0+Z2h3v{ z8bO`DDFtff`CvSCYQ;^>yYZLfmy0(r5oZ*S$j-=Qh}~ud7&Lc#j;&{>OHGE$gfEWL zE2gxaC{N2`4Tz7CM-;AWe8UV<(C;>+G7saUKz{6?W(b)Mo2o^ltco0PO5t zg4sKu){@WIsZ&7Y%Ga2B{#$wL*l63J!EA*Q`xHfa0VA@t3JIj< zs$K=^j3);n#{yEHK64Hy;TBH0kIVi35{!isn&U!PzZ{{O>jL~y-FbC;7k?Pkv^6^4uFP`WL^b-jc zA%Xe1Zd;3^x7~dvB6kY8M2|4dm;3vtY>sl=6D%gIBUD;K6ZAc_E9~LKt1bN3eaCMG z_t(j6GJXimDe#zW@@v&STx~44u4TcFo{V&>|M=4p)QCGZz#yqTX3Y5 zBYT@t?4HD6EdUvY+KMoCe||e3%3T)6VL(_fOC0V{2ie)teW9rJL2gS`I7LiMjOAYR zC4U81WS&Q-|FET{^l^LRA$rQ)x}+IZ9hUR4+xc;Nn}=Jw?qO~z-YLRwDQoT?t=+5> zZjymqfrd%^+@Je>h>id)J%{kV#mDYTai_)xvxg&KF9-Pa%_q0WllO8{w(O>?P--O< zf_)08pCL<3Z1&Fm_Q*Eo!eNOE9geTer6wDbLn}|RKCWuKzLXacAuE00nZ@)6Ke%ce zB_LpHNYFo>WaL0UNEuNZ^|(A=2X|DMMh}qU8fADc-d-%uC<&iTWkErOUiUHWC#)pO z3*tI$_tqvgfq8H!Q@g@_lbz#g9%XiJDQv<^`sg` zJ%T-vBY2=gh9L;MtHaVnv2Zkg>3g~81ZTfYXeTo*^s>R6vD)H1c|P5=;4{2K3S-tG zUcCHzyJr{QmE2YEEcU=vWqPyE9m{WCGBWik>{SD14P%dim%~SFK7=oY@z4V2RDw_r zJSz8I&{&u?Mar$f^SWFI%A1$#6d#)69^na6{S1Nr#PceV;&~|k*vGFRAwjn-tRWn- z$?IRDkJ-+x+jd*`Q3hh(LqcdQO4hwRhVpduUC&W}euarz&afm*V7EfM^9fo1!cCLG z+Q`zip&v|xOr7n^(qjf@1B3&%#*hEGEEf_RfJa4J(BTTDcdNF9_KEgLw! z>>JRrMmTx|@T*U;C~QCQx4Qs-i_7(B>*`JcD=m2s9qxEQBz$?ctEja61jhaZ_w2!c zUptZeOQ83&AkuVvo52Oul9Vd}WQ90W!QU5#L@FS^Sys*`Y>31lEP5VkqToLdyaWG)gsFv=?e2eX^#`>9L183vAq>z zX4&PaZTE~E(V(qmSdR0*Rp3=QIEOb=AxJKXJDUP{%ZiwB9R9VL{DmbU5c+^xjStKi zr~(mxmO&;lwwNXL&8+D#NVbU@%zodA1&YW8-Q->KsEI-ir-e^dMqSl$3IV8Jkh+G6 zJIhNs3vQ$`5Zn+|n_C_pI@Ua{|n!g39$gh znLxrqY4vLQm(JqGPlrCkX#ei{tB-8`9*sow$8F~sFEH#-VxF|U1;zFFANlFlUQsE4 zJ^@IWgTw&P8q?@&%2*t)2C_+A zt%6%u@3Ru7<&7qk_yp{lFbaCZrD!$t=PF6xvRrXLK(vGwc{jzMpQHmCe-w;_j&yK) zbG6hWn4RJ?AdtfzIgn!A`M@5Gu8zt3C{Sa3G8}9+Vm3Xxqwg{9G+bUBn((@(Sxzs% zt6w}!g@QzQ0#!;$xNSQ&Cf!Tn{b>CBe{`?l6J3NifIX}Wn!-A(vQq%m65qNtj2Z$f zBe<_vz1>DOIl6Of7@`ZrZxQaGC|u~=uKu0^U$AtZadNkDQ}LKTYhcS$@42hH6^?(N zl=~8}vt~dQ*lKs=e6;RyR{Ev3td&r0WFT81xL|kq)Q&JWvYk$_e)($xsmgCxlKOn_ z;Z`RciAcp{{N&s^P@-7mm5W3nft*Co28pm6zR|_tOuk=W$vU zsnP6m2Dre78wLZ;&Fl2W0m!3AH_Iqo$rm`o@^e99)1Ao6p-&}-848k0;(+tJ{(o&~ zyVar65(Hn@WW`^~1F+fQj)>3v%Ndbt0A+ea<)bOsnNp`=dN{o|xxzYiIifObY80A4 z^eLgJpi3ku9n=f2Oyn{?7eO zw{L1Uc3L33uo}pH=Z0$`$xEbKMD;;ZV!3NK+>Xgs%$8?k(0r!+sLKLl6y?FlKb*_(NL<>~(+BG1IprJI5rrcp`J9kW1B@T7|A@?Q$u)Klj*{&O|_R zI)8)>e2esCxm`oG_TqdoNy*y-i$bvp!E{jqJ97VmGu1VPO|KqPFb}|+BA`YB$j#z2!6?sDz|8BOEv~aZo^*$SC%Vl@=%|P|~J6 z@R;=r7<=;1oW}5~kyRG2JxDnD=f5%MJ--#S<;3N;lK*er|EFmBEr^aOCjSG)B^0%J zLk_!G>y<&Jl9akZ`?YpPRsJ`VH*=FpahBGlGdnuRBlt3JNSKQWbKBxmZE6MV?+xx^9~a z0h>ag=`Kfj_x=jHV9CDSv;;WOVda9^z+Fb@5|30NPOTX9XS04_)K}IYu*+=zUXw=T z4^vOt7mvq99=nM0JQPhE*2eo8hKu1;;%tg%&#BI4Pqu7=I$c2-C!Q(`k`Q&>qCY9druG73$*5WRWOOl~lH4&h z=*wJx+DwHOzisxnZ^C9+dd9r@PKJUIRTdQEi}<$pf7+d?EMjKmT5bQrb@aX z-YH$e_&c>BM%HJ;Jpb$30LO#1FB%3BvZ@|v=89Z=sROno;G7uj_4q+4MGs}>uLt$z zFubVL{D@0RApYVThq23)QG*1lM0QCAdtejpQRzp8APb*^FIJnGmsdiCd(HNmG|r_f>#=UpUrU~zRS;uXF$D%Uvs zqEsN)C{tx=V=^QKJF7yRdq3zmeHrsGSLsluOuA>lNvOc2-nRBXyHeyW-po%O@*prn z)fku#nSY8JD_)oNW>Rm7a&9*UI=^6p*LGWP4b6c1L1;j_qdsy+Ee1J_Z#kKy} z03ckQ01ErqNxNYU-ceyYsXxM&eM}sl88jriG@oBm&nlZ=S$d_ z^g8?OuMd9-FX=%*(QRX40RN10*iGV`B=f|2>`-5*k3$YX-`IOrU5{ z8v^bB&YfrJJ!%;6!b_y^Wf7Zh+wXPafh+BrBnaKOLt3$S07C0zB=*0Rcw&QBdgezr z@-wGVUrS(M4bVRBIuWl_UoC~mZP4tGKqbiEW+vIIhCJM@UOKDnT0beNE3CScqdaUA zKejkN@N*M=;ngP6{mBpdJB~HVAr{m10jPUbX3ui2C1@z;4eOOIgut2o-)0!s>x>Qv zNIj4;f|pe2eafIXnc|K7i)Oo!NCl1&7Q-b3|0V$;G*Ap&{g2d{5+7h>Y>WXWd>zQA zdK4p#0Tl`=3+gV+gJ}|qMt-y3Bat@iGNWtC zjKAn92rb1PzEkvt1F6z{Fs__1_+X`kxy>Y>Nc-RMjDZjyrSo8;I}a;}ULZvv3Gv25rUB=O4hJGa;dX)$RtWXu|B8 zn|~^l-@@qdBb5A$BO1XoA96VWiSkB;DmS18w52afQWB~Z&(-_jK!6>9^>cMq{KCqY01p_=JiAkr(5pmKYH&}Vx1 z2exX*1X@qw9B|R{hyBhiL3GYXw5*SpS|?`=ri!)~YT zV2@;7yE1di%_f4FA#t6~Ra(V$F+c1cl zGlsW-;(F9cFq$Xn(q@@y)$YjAWXky(|58O;*?t%hbZm7E7msCi563Cg|9c36D2J-@ z{%?$!7{EhOlvg&^DRj2g{FFGX?TV6pK>Mr0O4q^lE5cWnq|aI*0;r2P29G&^p=uRS z$Cv>SAn48I8z z5+5REg1LgBVIkAVL>rk((e2?KC3}SEeBMI6@tAwNsj1n8M&?9Ft-xJHveE^o15W$D z7vAeBZK_N4(}dm~-eho*lZ-wG5ld=9=!OHRW8;G_8thCIi$V{)=mo?--4 zmr&~cUVy!sbupLQO8fiu7Yj&8eE~p`Tj)tmAzAM9t+u*5m+`EtDFq@&5fFw< z_hrU|Ov9$HTJBsVEfukn)d-C^T=LC7%;`un%$*hLqc>^g$MC(bGo7iY`^?uM>H7H1 z9`vV&@9#Yv(k6I^XU?#(TIaUNJ4$}a=cE7r{d-d|Akn0T!F!eUjCeNT_+a%xuYt@~ zDLfosgKXceX~cCDOfwz_CHtGRat{lv>G{U?i*xde+t}{*n8J6x!{{zlwY1c|gdhF3 zsF?NVvY4vjOuWH}Z-2j|6bamVW;F)YcB}7(HC@comRUQHuk+5v^99#!a{5!>+X=Wn zI~_*Y-Lg(RX3wJISRAR&JS=@Z$*)q9&i6HMd@M_vOjYU8)$4V&!$=swvk9Sn<6}gP z8o`?b!jnVExLNL$=18HQUxyY32<0ofhQreU#1VC86u!>M_rMb>@zw~97N0>$#X;)N z2tcJ4TVD7+@a1(s;y;QVc4RqnO|x!9;@04v;sVuh(ItN`s2$Hs8~Je6-JExfWJ~{_rfB4zw zc5)?rEe|9nc?Q8g%;YhF^Q8L}unMqTlHd|_`)qC&E`>GsfwJ_yFu%9HkGFe8ot}$^+OTz} zCI7NX7O<+km5C&m|Gi^f_Opi^>kl4megr2jAQb)_J69*}5CnIR4OjF$wa-c%TC%D?0_c%HeEEjjM<4C+^wP2F~Oj5ECeG_^jWt?AR5Xd;Oc{|E(JlEYgi6tysazC^4sGVRZPjeXD2y&Nt0yj}r>nd+(!LC;$B{F*;e z@b>!YVPP75=ul-a%Jd}&42ugZOMZ6#UvK_<>0VvEed3-R;Xhs-0m|u1-km@1&aq@F z(02|2mw*dE4|a-9S8NDD;nD+h<@}F89G6G8KHPMb!bI(~mTaW{Fqf5)SKO0eUrU&$ z^Q+L70P^98TqI>r5#=}j{}xcF?+eW_CCk-8P~IuP9ES5?8dpE|x;Tp7Kvh)MF_k3^ zU)dfuM?g~Utr;u+T_%8TTViJITDxk_x`|44*nZxm<{e*p32f>%)+1|Gss29X(yKvc z)FLy+U;_G!b-p7JC?Y;}ZJYf1^BEQr&?1rn#_$~w3u&%Czm`y!6*W8+I9AHQ#a$iZ z|F0zF&ozB@ju=r}ZqUr<+KsCY*_S)Bkp8PJ`=Em2DGPxne+(SMdcfxT!*t$Q`maxH z?J0s{C27Y%DvM1j_loz`wTgo-L(37o%z!s^W1<~ECGDI)HsQiQG;>~k_Ra&< z#F45N%MpLCO`YJSYGLxsh|fZk-HA1rf@cUXiB^;ysA77Zt1M7wmxSLY>APMPOZD*l z>Eqi7wTh`NleaR-=rys;VCV5a;P6;?&I#9*RFvIn0y3fTorSL8l%0jSnOq9)D*7|^fqH=0 zI-D*hSgZj6rwk-HIBQIOt9t#{67PI$00@xLS;{$Z?Cus{u6#$WaYitj3j@l~BM1tP zW#gy&gp8n-xz1Na8vV~Zbb(tj(-EF`JVZ8P&p%wGan~?PnBtA>Fm<6`8#$RvFSUu2 z6oW~P)@kAw4q)&z8T_VXbvXnm70we?MMXW}owe(^eqHL4yO#|-g-jQUx4ew`^!VIv z*B~B%O41Cz<0g{&;-IQN73@KOe9@5_?Lgf3+RGHzTzM}+sW2GaPMF(VALbZ{i)4Nj z@-cZA98${Z$Aye{>OnMcF5Tqtv?9o_C>?-S=4?rE}k1^^WskK1(PLO7`J2VL#*ZI7K#vy9tJbAKozWN4i$_x%b3# zb^)MH7d(b8l#k^LqU7e^=htVb;b`%f)B5AVH(K|`em$W??R;eh3|Apo(_LUCPg?11 zT?hyEq8)A|Y0*EAEDTz)6f2vn(nUJ-RfA&uoH}TWmrF3B+6Qvk#uhUU64Z0wo|@%K z_vG`sxVwTV5mUg(aoMjakLyKB4+Xx`ZwN|zfiE^C4@^f)NC3ixebst&b+Gr3}fCCE%qr zr6mtV#J5z^sR(}*E5IrU#w(RMkJ|0s2ud6A>7jF`Fd!#?+k_$kj_kdAp!+E2!w4|U znSR3Z{ zh$yI85a2$3Y}BDJT~q~syOb2~&y|AekXtZWcCA9YIV(4Ej#?1qL7$hL&&`!vR_SMhk$$^ko=0bI$V$f{D+k_^S_S3 zA*qAs2+$L=9>K%!VDN(X@i{+&g~YS-QqO*jM=bEKk+OR(Eg89I9}#8~*% zeFZAVsn?HGjl08ED)`vzUK9}vTg|}%M;0S^x>mAr_WR{Hz*-GKH?zc-BA2}Eo+F4_ zAW0%G0N$XWUtRDqb0|Pg^0Kk`J}4{*{yG2_OqDNBhU?Y2;x_9}^AkhWn^976uaDTK zz|?cq*a0bnpA@DJ@bOXIin;l(?o4C_Tit2Lpvm@xxWx&>)TO`+fwJa|%Q5xKs_jMN2b@nY#iQn?W14Q7L z3s8Spcc~T-$zWMTN3qUh02NyYR@NS$sc*qo|0gf%&1iXuCuH;U))zXkQvYT1MW*m7 z76*cc3?q%pTFb~rge1%p%jaLUN8-=a9HnScnJ`yVt=!aiNw)_PKK8#vu|~lm_QmLr zc1u}W8_X}_0?G~4d%6aM(fIhNl?)W49zV1%;>&3QO-w_6^@cI<%mf2$MmpqA34wtL z#?G37Ng;WHfJ>i)VHAOeH{9=q-@pAI^*-iCt^2q0^KzA7aLU$6yzrG3oSn#F0}E78 zv-2K*_VV5AU)V89V!%@S`#O=jwnnx6J?Q`U8AO2F)f52Z2Oy9jFyiV+5bsC_WcKB4UkMMco*O` zCb*CKq_|#a;5Vz{=mVIgF88->(%(lGxQYTBq><9as{2f8{n4Cj4f(*&=gXOygKaVJ zG5?}p=uoXoflsA5X*{EL6L`T&P}Btoz%;u<#`V(0#rUd9vrcLNLxN28uzA5!=K(CZ za!O-g7MvaJD3GBDTh5^qJ|ld29FDSM08l2AU85iv^_Ws$VdGwxl{!IL_g)2LS$zot z@8J_KrT;+gxStxB1sT-3`97KUcL%Sf&XY>izFgRe6@fu7LUmcnQP|DA+qlYjj%+C? zDf_sZK*n@43t;I%!L*6;ol2?kf8*K3M1@sU1JGt2JQJm8pP%z@f}LK@_>-9z0^VT| z<*=*_S0?-L0d*TxyE9tzp(e#M*EM9K(5T^yGrv9 zUtxfYtk9zVeCQn_Q6-gjpz!L6m|}u|psQw9`Uy7Mm@Q;SGH{)m17lsR8L)IN8*19o)BLKmF(Bi$&PW1;*+Ij#yMYQ}_!XebS_cZHLLkc@ z0kdgrYRXrV|Jv&r$!LdRm5y<$>dwA^-glbj%#L{xx|1>VZ7#^Kn+N$ym2zFA%>IMN;}+hEedVg3vlsR1pRc-0})45F%F& zdgf8E?c*ZqzNANhi+zx5%#fD53|t|C6w(!Z^+$8Jh|IzjuE*VZoK|3EIwR1NIsMMxWFSit!cY*KCt~_cS1UjwfB;2#!$Kuz&JGb4e29X8Tw9u zx!)>H7*!IX&?jAD0!37qLM$s#Jk+*T4v%sTL3YmpWlFp(3l&Pgxia4#?jS+Sz7tJUC1Z zfi$ulg|v4FZ3D!9{39JO=6{i5p~dWFlDh+S2gZ1Wlg>nil^m-A@72)?01o0S6~-x5 zmgJFloctX)CDZpep4p8mFSz} zT@ziiHNR_St!ZC!dO|minXB!X_SYQK)NxtCX^Q+_#5?PYzXtrZcKf@2_s}Umv$}He z8OVIRnegOSf2M3X*a5$ibSa&lz6>CWcHL{|;;pY^l`F=s?yux2$4OIyS^VF~ZAM1M*qxIsWV; zSrEwRJFKT`X0?a5yi^a$4&{h*xi)<4mn1h)vo`e=W?~^>+#MOMNghIE9pBJdg*b43 zf&iV$*D)Y$Z@n|(6ZN-A_tSN8!eAgLY8+(!>ty!Y5uX8xICh6~JYRm`;WzLSOFRWS zE%Kt)O?ddZ$ypm?^ZCoIfMGE3EopZ6h>Sr=^Tj*$)z!N(+XY}gm2>n`K$Ve?bxrb; z!xk?kmd6>VfP}_H>5Oi24hR53Q1xbTvSQGo0zvlWUIUD$OG;+=pS>L@@*Z+~^)iWB z<$Is;P@ZY~p>skcUB0+nQ#J#lRW?HqWSdJcd$O|UQ@J~ zoz2?AG7yh_mzCVmx)HYn3`_m~`VH5(DaWCEn@Ygp z4UcwCq4W-%9RMkef{71=ht{8nz>9*!JW8Y+?*T)5KE6ABfDC+cCP3YAe(UnIABuem z{*21gS8%nEQ&?EaoZ}<#^yS=f?TA>^mo`i)Bo?@Zc~a-})-DeUtS;MPkND4?eP!Ip z^qJ!ePvA5u8`_0T2L8~yH%|%uJR016!KUJ9`AFuzWMt)gci&daw&tSeJjC>Po!R&d zJ$kQ8-bp*uD8Nu8gbn-4`y0FJsOX1HNJwZs{yBXD$)R5j;l(Uz%MXAC6Y$euQrvYt zzC1$;AAr-Bmu9Nu=H(Pn7HgDo5%0!dQn*i~ie5VuWIPf8WP%bn&?=ZORg0&$bh~}E zkkyJ?yc?{&&TvH?R+p0pbd;Ie;Ms~y9-N-SO`=J z)Y1G2OA;_z;;rXp%+j*$6*~ZW%_@Q^NF-1U*dow_V3j_RL58D1OX+gF5Hs0Ekwyl+ zNMxlVm|1GCzdW}fiX$%r1aF_%r8!_&H|SPG1r|frrrl4wdC{m1+wVm_U-%I#B_+|b zI04RQBMD$`QiZO5VIBj=K~K<^z)TPis+rD{z4SbwM;VV~d7HO@9Lo!25M_M-I_rd;qT22)U4a7L1BVzmU5!xl?HE)` ztV+QzPy^TQd)CjICq0#088fnaR>OpI{4%$5vAJk#i!`BmK=)^8+DFIUHt|jA;jl!f zU;m110#|n}l_g?J+#XVlRv1U5FAy~5wvwGWLE0+k**%A9^FW?YmAD-7M`pBbDjx=; zi65`M{KOoEh&rS^70|OMN$|d)W2$@un8Jv$C4r&Y_gG;`u=)O;=cQgo*Zs}S?&>9Wys2yc+#$!d*^1wThZa*+5s9h zZj8oUbRid!$H8bgxIx8td^zq?$76Be-LDEOo#{Fn#y@{&HYa({J#^KlXFI8v>Jy>u zVrt0cxN!cPf@HPfx-jiyIP3zPB%2$^^Y}aYJ@B6GNlXb}%gQa%!%gT$>vDpJh3_;i z>LVSh7P=>Q8R)M2A$@v2X0;__zywsz{;$ih^0V;hTFS+DV@{^A=R{?a&u)EVABNyA zcm|4nCrA*>E<=T4MQjIdm#T4>T+>uh$hbe}L&X)+Z-&Bl@JXR>kNdj(G3B_3v|O#4 zY$pRd0bTv1Pm#9CFZ50f5K{g)FZ61sztKwsAO64>Da{2}LHM*u*2<^_ItPYAK1GR1 z`U=B%<|r}f5|9vo)5lwW3B`M?hL9A~7HVIA z3bYaGrqAfT91-}f4>A+y`ivM}&`Nch$@j)YwFyqlYB)>|mR`@%GngrAyj~AzXW$&z zsF)9_Y-iKio@nfk ztoRy0J2BtpkKcwbD^MivR--vKrt@q8V0w0TMKX@zD1OSEIqF@mMA-G2STyoG-fhZO zu!bTZMus#-+_oTP7++GyUd8-d{5x#pJy@FtLT?Izmn(zEI*F}lXePw48+pe~PvK4( z&pk&dWp7}p*h%nqGEG0)?h4XB9Y?uDRQK7(EZ#ORD0Zt$;&Bt#l%wvUCVeaas&CXe zsymz~1xnGc7NB5A&B2d`s1mOQV%#$K?0>`VGV{xGrk=$fu*r5D}bbOX%voyVnCBm zC$y79+tE^#U`OPKQwm>Ssx(6zshDx?RkulSI%*mm61AfPx8$zpn$@ZOqHkMuz5Kck zUo7F-Qhls&ZTcAHSS?Lt!{Ze?syhC;rNU&$fe;j)?l}(Ya3J_M<>%k53$G`*#8!v4 z38!7wDc|h62JWf!-L1NlDMox@{V+iO4r<%NYlj$XjjPJ3MhzYutF!! z3ei13gebq1+D@f`;Rl6OlnuUC%NrE*ea(6RrpAs0Il*&vHGLOqT&U>6+WB;-p&c1K zw@JyU;kaAZ6Ne?mo{u@VylIibY*(|W9mLHKdK_kKdgGS^*L9SrfoOq>E6p7YG+$%C zZt3s(%T?9pM0C&T7w&&K3BR2m5hM4V5MFww#(=`!hIi*v*V*ttviTv9yctZP(Mq zL#>SlM`ym?h1YRl5o7R%-GYyAUF^bYsA$Pl^;oH$g%}+prM(I2dS7Y&tpzx7$G_En zZHRXtZ?FwXioqX7o9hzt@;WfnCD=sMR9~M*^7+~zY}${h1ug?mYBH@JHK_RVsaWuk z6xtt+li_gg5nId^&xFK;Q<5eBD@|+k37L#@C@=qPDK@D&BKm^ZI^M8qgP+gtOvG6CL@kx3 zijZt1q+F(^_6Sgq(Bqx@EL4=;H-N~m&Of0jvn)#olM8_@N$@rUB2}ysameOKakx&d zg!`nYTbSr#d>2Qvxt4L727mhwLUwBmu3u@fP*;&*H$VKEeX(%nB7VL_S*?*u^Oi7@ zLLBeYLLHe9sGN(i-xdk=3pi*GRCNX~k0BU$DY>39*VC*REWxS=Y zN?JNJ*Jl(>281Xw)X==59qRNO!3O5-5Aoe|BD#vcY%dU4YMHpI*E@iv+$?fZnf;7WSiUuH~Cp-GkS6^ zK1^)n!);O-LU=!IG)Jc9z&nMYKwF;Ufv_4veSxV63>+Tenct0H2~5Jed8f2^!XMWI zCt-<=F@*1a;YKX~SX?vST|0@H3ov7-D>Ce z?XKrmRw%?q2q!d|W;9}mn-0*{G>38dAV>#-AIrjTn)QX-v9TL&Ir)}I6iX)BWor$i z2p%pk)9LYhTpFP)3YeCQzcpE8Yz+tMXxwMdoI#Ww%D**VJk`PejGEECPv_O1G(C<& zKG@G%vW}Sg(3)d&krZ!2F~&o;+nN#@vnE}0#0|BK`}aHFiMY%n@db(KOuUJDU+{f< zz>$+tsEYmWF615I4BPalA5H$d!iSe~;c>u!plotKGF|SzE4@+9l@0*~{sC4>WF`*F zy_jU0)gI=0_+mKZCShUI{SWJ*s%gKVvq5Js1GZbEA%hv72Ex|Q8r<2+V`Z&^$sA&J~!+V4{rdu-v4r1dh)B&fwr7pg4ZJ0OO? zP8P=!(L(p313>~HAPSQ#5l%`Y?sDs9lH(#RpYGNKpcs=nZr$Z#$0w27cRa8$gYE9O zKbPov_kaW|i^wWk@q*aE5dRB;cbDfeZc-1i7BFj;7rp0^sjozkHL=N!fCQk2?l=sX zEw2;r;fg@}poh@k=c6k>sUqh0uC-&P)j>_%zTZ067JM8IkM5V~#N>8-p{AC5&jM~= z!Ub?hnPFC6os$p|3ITer4iDd1lq8X-PoN_S;>Hzkg~zyjzSxRnoNs*YWk|Dly@NRF$5adFyQW!Wa`{YlfQynWKl#|JT_GFI(pbbqt|0_q{;;Fv|4+-6sIMZxWYa^ z+`(|&Y&PPRDdP{?8ja0vGE{)MzWtdl{!b3*hjz?l4@ZCLQ|pL(X0Li>s&%>qD1)jf zKZ%ra8$)Y(ED|82w!}>gPao-!Kd6O%hQ4HjbIA+h)4jO)3}y|DsXflJ&1aAzeAe*$ z%)WCzTgH~ZiL7_3kw2GM+?Q=5!>7XsoiA*t!N*vxvU=RGbPeP!Sm`!GToG+wDPc*_tVohHS{vy<(%5MGil8PAZ{ z{vG@&7x!+PWPX`zxq^#m5&`fS)8yw(u{U~oSAPcPwvvdCgje9k`2tPNScHIuZpOvkb z|BDJ4oyrn>B4+svO(uY5;FzPq?OdoeIs9?!DS2<)Znn=NZE27mof&7uGNc?nc2a^- z+YwAHfyp|@7cm`Z4;g`vX@ZH*EJ{2`N}Y?d)h)-M&aZa?Li2@2_3Ice<1%d%PeM4D z!Qzm{d@lC8y~s1m(HAZsPx}tfO%@DprPt?f;+C73U7Clx?f3R%nBRRr$;1DlD$&rY zuehD}y=RARek-mth&=HjA=7Po3ybAS3AE#4XjuV80GEi65i*4o z={?(O$J^pFEBd?|>SSQn@G<1(R9Y@`Pdu;4cM03W#r`aOI`Ira1Z`jg516Y;069pg z;wh(~wG)7G7@at+rFW-?VR;eDC9Oh3Ua$jhN6!m=^BSgEesd9e<5WF&;EsfV0ghv+ zoy4BNrpi<%JfPXj>FIq29ZH(SeX*oOn`3}bRj1=={~XOFClR$3HlY%wVz@SyGSF%# z&=!Yw3n6S5Q6((r?8LS9!`CzYhTB1#K{FU3o}`O?-nxx7^+g{IQld)%(11>>SS~Zw zK2!Yci;!1tfYI_3MI94dH_+>Z zx6bdipJ$2R7no-K5l#PpkYfcxZ2fg-dk#68>1jz$c3minz$WzSyeK?s3Nnml0cNO> zcWFkr@os#MFh%ILB^pG$;ne-Z4YJSotnjUp0u3>Z<=>qS48b2xgP9dFk9UoRd5K78 zqn(}BLI)N>vXLUKF}G=80K~t>e{O9gdS&oghH=crptJp#*zDelHbtpF3LtqN4~8REq2 zMVcXI@vWIaQMgTFX)E8`XZ{C|)9x#K>;>3`%(gTL-~me_4O7jN=20JrjaC6`5AOjF zE^aA>zChq;;Fca1b?`MDbUKpFdwn_mLNfSMxH)m$i4S=&(AtFJsYH_@&OGx2PZu#e zVHLaX^N#3=k11WZL=UUN`7$Y}1Q9hQH)fyd@tJ4?4@NNuf~VQ6220^G0GVPnn~G=a znL4fax`8HOT2FY(zxi(V`j4_AFkkubnK*guawSAeEq2%Yvdi zaXSublirD9%8PnlX6ubS4}kQz-9NlD!M}4^^W7y@F+{=+rHm4RVYrsR?eW?ylapRv zO8+Qd`52`t_)nVI2Yrl!@UvF>P%Z7jV5TvZVJY999b3JOV^g3e5GQ|Im0^>{fdP9? z3vaX$Cu^c^vJIycOx#N72+TzWgx2a6U@5pUFg5e`Ck{))`A9GVG3n>XJ`>H7zs}&7 zu+{4?eB`fyuG@6%mF4O&YG_HuR{kz8D*WF;aX{{&y(J3hlzwphDJ&#>duoOuNT7_Zite}B||KVpTa5b zLZ>oH0~kzVE^w6)d<-8G_kr#yVH|Qv97dhdn`w5^ATXmS^7&15!ygIgD7A>O6S?CLJfgw- zO%ilptWi1DrSvGG|IZh&%d`n_bP1hW)+yy^HX@e$nYwD_cH^i&m_d^vvMu^lD#<}? z?Pk}u?^?0ORZy1Kee5iu2^W?G&rD9#7a+nQO(l3nysvs(a9hsq$W-Z=PG~>7j6AN zzTPq{s{MN(7DOFXlmV5l83mJ)7#d+HQ4kZ95GiSp8ajnRS`He?H3D5aG-}79TFMNSAv-c;~y4M{JkI_nGN!FQ{yech+fVxuKmKvJnH%z9oGxaeV*j z&67QiDR@N+Mw~@7K0bZf2c=Sfrys@;HFsq$Y05q03Yowggzk?WdvHBq7N}H)3OBTZ zDWyG!oJ`B_*H7y24I7h;3+r4ZVjozUomT0|J-x@kn7Na6)Wjht)wd;8TiVW=Wb!kx ziHsARlx0|iW=LKOw=ygwjomvt!8$E&xV(1trn8EQ%Gvj~nEv7<2$Cs@O!I}SaJ24B zqWdi$Vb4*@H3z5pVk2~`$p@TjRw!dbPBSIPIL-)#KldGu(ZyVgK@GeU1%hb!YtHu} zmWR+TkZ7&K1rVzRJO^X8KPivM_6{R2*l1$hc(^FaQ5M^O2jdi$7_ZK!e3RG13TVPL zUkUtGn;6L_3y{y$jbP$-CrP}&R3YYM^=n~zJ|z(bG2``N976?^4T_^HT^}N@R`K-tNOjp~a4$$) z>Z9=?A|xlsbb9)n=Y3<(DYpeaDgKqONR3`9l=!H;1Hhc)-v;GF=5q5jtqKjmgWcJ? zMdHsHB&E9t$6Tqg)pTL2lv8Uf#;GCl9?d3!zhsJvwJh>xD>&CYf?}r~1qNxApK>AH z1C$g!ZF;d;yTPBrJnAq(ofx>^ zusreoA8YBL+n$xXyW{z7j^`)*y=EKZeXPjDVNQLc6JOtI1D-PZ{s!QYf96I%j?^z% zMfdsT@Q^jPe~S=kAaU5qHum~`_8>VH%(TAQ$-fhWDc8 zGq0Hfs*B8)M&;MR4kpa-1@0{0!|x@K5O?qGWW0I{Y+u9*UV{Y&d-Ok_CWo_$+@52c zMQ~z(PwkNifwqm?@=SAdsRP2sZRfMY4ppxvBWC!~o|mB2!%|$uIGwz5_L-X-o^avN zU+*5&GraB%;;VHsWhtwZEcvNBJN69lwl&BgqwRrdch+Y}c<(my z_BR}m5ZfRxbmS0EOtP>pZUTnpGbHjVLF`vv4Afg0)Fup#+Nm3NWhQ;Qy9{LUibB3e zYv8fp$!z}v-CpUG3T7sEhbsb@XaePjUc84u8Tyq(oeKXm;uU8ZbO!t85poO=QB;wa z)cE+>XWwS=4Kl_<-dJlk{RZrfef@?O!k9_?F>qvf{IupI25~o%`s;kVW|1_a#q~_0yo8hZwr3bT==(?vVfn!Gvy=KLn7)DNoo8w%tvdj}*M?)z=9X3Y)fV~g`&yKdmQn0A zA{vMfLjL-~*IcSgIbhGP}`BT1d)v)mmd=?j{f@aDQtG{F_2vhND=>Ll2$;Ew$#ie?6W1Lp}@BYd!|5 z&)*-Pdg|V-n=1&F5I2O{09wsQV3$4?R}TPUqa+xFv3@SBOW;;=kr2p{~aAF3f@IBM7#)W5k4NS4b)CRQr>)JFl2Wgs8w zA>K%)kEYO-EJ!9@{out`Fm;I?asUeb0I%>@{y^P{j)h`72mlK?xbs^NV#zSU*!T)+bJJi zYkdDH(5u)w7~il-MkwRBFlA4s&#Y*(cRO||Q!LSP49J+ny zz-m9tuLW$X#Hj|NmSH-=dI96~=JNBN5H7)9>rm=p^!sL0@JD*jaI~Ux6n{l0p^Kf$ z<-^CUba>aiKYMXN_fWsK;k^CJz6WCVz!D(JG_z5b=N{L+ zd5{;W!HuV$FW5(jpBJP%h4XNK_=wkm%>?3QF~iP&7co&Na3I@@$Ub;0IF)v;-&%g5 z6KGj)#TazSP3r$=6MiEv?k48Drh;ot$4|RI%VYbZxioTo0{Q5cUK8vG=04K9m#*u} z7WU}JHOL-m4orrKHznd%Vlex9w2lp>zL0}H*d__o(YW_48No7u}qNc(Kn5;tf1qJ?*^`0f$Rp3Y6`%I z`o^e+VPJFhDhd5OCOA8uj<9Lpl#J$!MVx@6HvRSu#Ig-JBFzeyz(3U~mqC$z{dO2n zGglO|1*WRp60J9`^ZrvQ8)+h|%SJr*@`uRtg1Eb)u+`rjZ=J-X?njyo%0DY0-~vFC zp{1<;Fo@#i2nIsYCS46$^>XhSi3gCS=c+i4M%(3awIwL(>0JJk>*h)(ucqilmGlP> z5QY!}voM6Xp(pd|Bq^Y3us2qU4FTsoDCaa3q2IpIca(RMwH|Tn-1Tez6k7x#zKx$Cpe-LLLl3lOxmICT>AyIH@jIBM3F@ zg?XEt1O$zIPnYbC?H@#W_#f8XmGwymQr(Yt1U-gX=pT)RB}#Xpk5ftF)@`!O7)c|2 ze6zK#SJLf|FRdQMZ_{1W&&hbvY#3#Oe3S0|3?|o{*hTx@DYFXalwyH9nI|hq43>K* zCmJhk^IjyV0Ip-%C-z?Xh&xQcuc#YJx&li3@rKk}N_c@Yg_ND*5pkd8NWWN{#A+!DEN5Ff~Gs zOyU&ud=_W&vpe-8Kj%r63s=&L0@LT~dGrRPGYww<66#!XFDkv%o5*|g;0VHW^s^Yn zk+@8>r)ZtUG<~6-7j@ZO9ZJV3%thlw6O(W7wpm_PQ3>HU@9P1pp+){0w9HkBsu?#O zM-aF7Y<@XyrcV;LDilL9N;>h=#D0R^Bc`$ssRzpEfu7ani|n!@D~B9J*Z_p#GtVz6 z>~jhuiBb{bD*PVsxnxuwwA#Jv0S{^~svWj(_%C2CGv!2Qa8ZAkY~-zs1@n%x6c^sr zsAGfc0`3+KWF(d35~DQE{=q0)l^}(rkW^qF30yE3d;;uHNp0$?kwOja(AF_tg*`k+ zRd@kDMwcN9KI$BWMsXn3W*!M>Sw{wcisONjPwxBT{Hp@FgXFK^`8@cg&UsfU+-Ag7 z@iesxnsj`Rj#NsR<`fHEv7ujJZFL_p zFJ#_3EM+m0KeVGWdu6~Rd0S%p`GxBWk>?wwx~_%E$7N!vrbFJy9E*#iio33GIr7Qr zGsl;jITfG96=vQD`5s72-Wqkb9@)w1jo9e1jExYv7&|-3KT=;fD%2n}Yc*pNJG!93 z-ND*|UB|4J@jgT~pmJzE*pKW}*Gcb73%hc!Z5+uZy}S=GPO!q*nEVh*DSc68TYhsS zaAA)X)Uq!{mf?P}FfQ93wvReWBpSq43STY>;M>CKrfa>}fU)y5*)lkh=`pm{Yigg5 z;yQOI&=eR~oawPF*ITK1v_9Ovki9*{6os#1EM=~E!QlAp^d`{faf8g~Gz)SRAJ2|P z{;bX*kR%?(EL*ac+9z;#AT+utm_Vek2rm_31`mCyC+dUyLHyu*&A)qHJS(5OOn-9P zfdkH_Z*0%%?`3{}d>zMMTTJ%rm#pc|_Ez+YfMAD`@OZ-JAr&kz)IbMJJ)qAOqSMp! z3}FXK^1@H%Yw!-=yoas|ECJ`00ldWZ>xo^IR_GIJ#bNS;1jCveHua!n&m*>iCR8Zx zxOV8vAz~8k(5c2&h;Og8%83F74 z-e}2yDP*;oZ4!4-J;X1_-aLNc?xMsOrMDM6gggZ3o;W}-!n%|fhg1~5WK<%>5vIAa z2=`>tO$}&oI8I+Y8g=l6h!QG+$$+W)ZpjjN<98e-AtZ8&e6B>nGyQO4)T1ZQPpgyx zT}cn97M7=ueS$jhe7NTtHJx&~>uqmpPZow#hfZs_-FknsT8J=k%zgeYaKU;VzseIDk5-1A;0 z%ztqKB5D6XhRw-{9+;Q@3?F(VhtqbZS*)-T?=Ued5~a3y7T{fN3pOg_$IzvCeDLmM zp^U2P+pMj2XdSgga0fC+yGXMHMAaFA%nD} zeB+(IcSjuj7w4R~UMn|sQs=r>GTG&1vAC?hrk3UaUPs*Y7y6LimRz}$9K7_6Jq)#A zE~iD)IUOLshVU~g4;|((z5WXOgF0e1z)}MOb7zzpkD3+CSD1Qd0?|&8BWI!4pJ}(e|U!`3fq^Z}(nr0s6MU%+rBYcT`Y@!tE1`3`bar zzlhthF6&8FjO6r_)lO_%=qjYc!so9oh;C9FhFP}qN(exjipOjRQ=Hq8{yUQSE(3~EN7AGw?Y}eE5d-t>A zprAa_sAM>X;gYrsQGa0nK8bA}L7p7K&)I?5j!~yBNf#?lGkvmH9rv=$x&Tg$YFTE0 zDYdAaZP1Ozz(~g)+~tZ#j}AxW4jyY8sHAv%+JD_kJnyv1j&?WuMTzC(4~#H}niKmC z=9l&6gw~-=%Y2cx!D-u2F&Vmo7CfKPm4OdLYm`r_ZE?G{34CbJ&8@7HH^VitFAEAD z&(*G4n0G;v?m_0K7?xt?vMjnl6pM*0OlbYVzcakgdYj#d7@1HyN{A`;efTS7t$nSI zaiAf98xy%{a3oe{+vgoe{p5~!#*TKuBHAgC;U#yR_&F7egs5F39Z6ME`?X%qDcV*g zGAcCd6S_g%6L~_BBBjjL01Gn18XXyJC*^mDGsOi@mHN%%ps_&I`(%6aS#apJ}q?=lv<)_!4J;n}1GkI6!z-mkHZVD<#@*=|u6U{+ktX0<~2-8-$v{ zi#zhLsKm(Ie(H1*Ugl1=z?{4s_0DD%72Ip7-VWkcp^N88JotJnt`BsPVY{M)N1@0F z-H4ohhH9LjNNwJ)LUUk$k>@-Ur7R9J*L&`vcGu8J$qNK81OHzn&roh`M-Hk1(Zgm2 zJoVX2v^{Pv=?YZUP-dbhz{YBV@nFH=vohv+Z!3}`O!J2?g-;!FGGCD$*-OAY(kV?V zMP((-R$gLlqa3u1>HzkCucOcGSNtXr%HG-YQr@3pi8<7k-(c3eWgNv~u(!?k7rA=& zu@hhB-BwtwIB;zK3SLL06--R>x3X`?}M)JcbV!jdZW~f;=V&NJP5?Q(Pc%m<=L9(;9) z!Vr`l(Xrb){p|&=1z#@BeeS5#bQ@(%W*Ht4;;KI&G74l((R)Jt#oOR@;|4ut%S8>p zS?bEFp5o`1Vm>px)+c3+Q<)~dU-TKfeATY}YLVygrW5FighN=;qVE3kcYoQEAbIpj z3;v@ZfOazPLQhB}TWXZ>@9QtNI8nk@qSakK={aizwYkyTR{^xO9o5W|HwA_z6FA;8 z^U%x5j3^55r;`v~#~9NV={Rzb5(XsQUe%@P?zvxw-U>DJd$$FOlKe>~7L^7eLd=wm z8ePaQ-Mb}eB%g3M=c1$1h6(5fr?(#8IP+_TPFX1NN)NmuIn?$vbJ~f8eTY#mP}P&A zeNEk%Ab`WqF)qB-5NaY~ga>zc1$#oV=mu?zmkf|4Fhc-mtb-?7g%k8(R>Y zGeWEr4esqj^r`{JHgV3}^<}yR?YVtl&_ZMKp-g+h`<2;Ii=DI@TsvbAU-ALt;}W~utTRtJItFjjqOF>DDJ17iQVe&c||UMXcl`$f6BlWMq{*p?NU(T zfO4I%nafw{soe>YhWj?{t;SFs6nqg2!?W8Uww>1@p`ucbECj5|FyrL@}7yYagwO!o- z5y`za%>9wow0LWg(NpO$@)NHH@qVGCIzJt0`aR!826iZxi!iQnR^H_~u)Y}5gFDK) ziMh*Ku!n=;kKo zu=r4r{d!);4EJDK&=^NN&Cf#?J#L34>z7VT2_2c5IvUq%dG~&ywGz5t~1KLu|(6&zRjTZ*%c;NbSK7U}7Vs(igFmT*nC}>vPHJ zH06%sD)&(3eglDW>{%KGI=SvNCZmsAUcI89wTx@mxp<6>iGo(nM8JdF)k-}%URu;$ zjH`TLu{(nP4<$QN_Fv+V6t;;AkEa~W9l3PuO`ky0KoSMZ<3V{hR)1P%EF@ z!y5Euo(g#~z9G?&uevA8-066p%iD70QBefsDDqwC49T;dp1?GCHrC=bsmX(M_F_a; zdQk*j_HXr+T4z2;_Y+ISyOcU2!rxvXjy+aSf{Lxqd}F>$-k#Ej zW7dAzbV*H5i1i-TnMup_xd?@KYz=)C5txU#v%$Y&ouxHwF@wIB4QA&=lg2!aFP|a7 zwD2&J5d)XK$LfufX#x-9%dL8^8t#eWPwhx+^j=|_B`>!-Y(O5xROLZw9d+NF*WfY( zS%A3H2wBuIMCXx9;}*YX;f%kx)!~ik6+6|@dyKpU76izCjE)#7V1g3O{D3Ia*}r7R zpmAYMomio;-I8HZi_mz|`JH1TAx?4WA9v{AP}pnoYm+ZChpV579PAcS`GFcn#j8A- z)2@^j!E`1G1wOpY=VWr$JW9>;)SCwzBR1D-e9l~QCtNvejh_&AUdJ`Y99tqzp=uJ6 zON2Nva2g2h&J*BK(ENp=*lB_R@ z`69p5ss+c3VG}p*E6;dBzKD~GbC`bdKV`yzzY`lkQn?pz{$fCq8|G7~y*as!G%Ayq zu#aL|HgvnBdJ@%1^e6@>E1X6?OSWYN4$d%a)mM6C+2l8a_S}zuA~lhm5yJf&#mK0@N9s?C_V{# zF5V~29~#tI-GY_qU0K#*4kcb$KL+x9t5Y;(xS*50_ToZrwI>RW+BZB7@RIZ5@GOB~ z?B~k*N!o7@!S)?br}~{~A^G}Ls%;(nq|Jz9mSX3SZDo1h<1kP5UH--2FBJJpuppWI z7bW3er||@`^Vj_vVkIO#;#Ygvul^9>FY#5{LBPl@ORqwhnl!$lJK9FuFr7B;gt?}Q z3ikE5PL9KP9Unxp8HeS7Tv88okG zJkuX>C4W7#-H)O}+9J<_v$%f}vx&ITgKX0FOoL!XbAgh5J+Sw!^)AThkgJ@a(~2n-JM|HcYs_Rs^xc{<3-}jr=l{^s755nus)O zWrtr6SJuJi!}tJFD#B1K`@_0SAWW`6!0Y!{OdxUh3ycm82`tL#-E!LEqYd!>=i`UJ zQ-+m52C6o6vr%Ao-3)4B`ep9V)b5!W3y_%XMvS8eA*SE~Sh!|^E0esJ@`qdP_wldh z&|}SsK^N3>zWdH$W&Kh6_Ml_Z=^wEke5@EiieLgFe?MrVBt7Mr(!Fcs=*hz0rS6lgALXc;H8edNEQsYBWG%zC7LCjri;XVE0lCm{Yle zJkh&0SL`uu{)1G|A#MHRwHr#la`em1 zvd`$;n*y)X^C!^>m8ZfW^dS0g)(-eK2e=h=|D-pHJ}pIxxCkuG^mcMw^ zW3|xxV4vmEdxC-gvo^9Gh%epDT4b-PZvEb;k~8SHNIPmQU*ps3HebxiIapgAx9(^hS2*be z*4ZCnhAS2ytu}zJb%oTES1`d+2&sd&94?bp zkXm*DdrAN2xA1Hr8(glsJec=*+z8xW+JJ+x>!&sLovoJ52ur#ztOWQMdWdxgV)q2I zWd@@dL>t$dbfc0P?7*%~x5|@=!WGwb=pT<_KiRs*s{2;K4|9eE*?8nMqvVI5QK7!Q z^ZTcES9&!i0m7(~4H{xXfSHwa?9BCYu<<>M7LSloP`Q`Rc#Y~6VSWfq$x}K(Qyd-_ z(;9&KAwdejf99_3k;IPEBoHj#k>5}?Gm#YLb_!kOa4o7wUYO92e#d-1Sb){pMb z0=8NBRU&2Lt!#K7nXpmvA0~BuHco8q;G_NhtdSkyUMA;;YJj^1E}a{m_5d?_2ZMpj zd2%)T)Iv?d;J!$lWgJX2FIY28iPFARIzt*lO#nVqkHPmh;hILkHaKD<-X{o4FAMqu zFQXJ$%mA2Ewrl=tdvVVV1(`OsyDGr`{`{Tt_safbi@!=m6MlrqP1T%B@yB~{Y%L&` z3lJ_B;x`A*eY?7Ph~g5XSmsw3$p33KH?--+#>pmjmmrzRNBm4JL-F>i9do0P(+u#>> zdF>fk;k$60Z2x$#y$_`>07Ev! zU-b34Y?yX~n?5pXOGEUpNRUpu_p)7&r4KTYh^TyaUacv>5q4*{KUYN7{0SRicX%)C zJ6r_Q+iq}I3g~J_`Va=1Q)JvDJ7D3`1%4s3F$i^FX|}%~F6Rm64cjyQxQT0ULX?b_ zz;DxsgZ@$Doc3XK%vSw|%>P6bh^U?>=b{UnQ_7AH?@FJ$ZPI;cod~IfzhT?IPyQ0{ zERvJgMCuUV;4*vg8?&wDsl=ZxJIWwX3-bgcj+3Dd&M?l%0PMSz+yMVR+pxJp<^z{j zQBSLk9D!3M&TBe%GzKYA&fR+d%o!LqoTlPg+F$m^$B2Ow@=E`VQ299caq)(t8}7g$ zf_$w-vluegk{>q42|zke`0^IWa;-@PocrMQm^lHrEl6YprX^X(z~@4qqy7@?SfvXjkW!xkQ^@X&ZCOI_ih1j$%9of3)a(nrYPC#{7B!VT34exSLF?Q6!$bAY6N=JjkCZ*AF|p`Dj;k+q zfv^}m(OfWY{S36-o~!76=6vq zorl3|7QQXz?&lic1;q3Ryq$AKyRyw^G4*i8@9Zo_Ub%Nf{}b4ac=0g%6J6srcz9U{ zSg$3k@~j9Set|Vyl2{+ZhUewORa{R;q6v?~@(6{p#8zfBVaQhiD4>Ojc-DT(5?^nV zV+gZF1AGu(SJXMqAwhwlayos}k)Mi08EUiTT_@N-Dnuw!MLw<97H$7f&Yyh~pZp_G zrujae{vSW_XC*1A%_jLoe*!CqCRfJu8-7El#~&N?9g(fA=X@iM;cc7-oFXem%<(Qn zvrPfN*E)`|aSk!MrEUeMCR>i$_ID~@d}}jTys1e%gwcZuxg2l!r+ps<86;H8naB;W zw~8>z%P8LzSGPF$%DXTdd?-|r==oz3Ww^>~S}|$mcRR9bAM-rj_X^yJ+OkZ65JhSk zlew8Dn>PcuU*54*9RFb^;TEC&lq3>HEvNH;Jl#E3vvjMebXv z+Z)xe5p6;P#xRQ|BCZBq zf=s7IDJl89mJow9v9GGWi|m&-!49k5#QVOo>EgaaCvfPa*t^kHmiS4+$Uckf-@wqp zltJ#m^^)BL_Qx*YDf4~H3`BunlI-`Cql=lt0~?6LjA!AqVRd&$`=NG_;7=EWx|e#D zzz1{np1$$@hFy#$Wpxk_4Vk_F_E>n;DwtE95zI=eXCv*tIsnIpmC^;=n>klI+W5?U zvyljQ>x$K9wEGpL$D{jwi?LFm{Kr-vJI`;QWtdkNgT_LY-d|q&_Z0~D0n#iD1PJz7 z*o2`66?iUCyKlL_$(p zY}l8!fw&uu(n!};SYdv9Bv0^qS+-GY&6Y1g>7>X~uO$XnN8xi$jV>8?tQ3;4XbVZ@ zxzA?ilcBXUyMljBO%aq+3`86e$dc6PPsl$?pY+R9W%dvaC*MTx@M(8+wI6HHV^hWQ z-RyeC{gfSaT;o4|JvivaAA$*7Cc++swPCdOU&5C=9nVeI8%*4d(8%h; z1s^vdC8%39OE+HM=NQc|^4GoU?g9JMsZ?vYj9a=X&ThJkp9&pgf=10HxNSpVmwF4~ zeZbfX*fiyu6@&4i^lBh~(>p|`YYSnYy0U2of=3vs<@t+Y*t^3gT^=vdR6$MW0!Mo8 z%gK5FyT6EYaQRs+4Q5;elmp~J} zN)Yv(Q7LsRvj7#d&#@YL@n`0U8 zw^*q?0LQF_Gd4N&DZ>dMh&)n| zM_?x4JH)Z`69UoD;wdM#epmg+CLM+KHyRZkc!dRN<%yMurJA4Q4iMKcA3j{^wT{cx zY0f`zhT5q$ZtyQIK!9@BDtyLfnIF{T_u#^#d9w{gx)3^+Xig^Fq+EFyz-;w$fJSy= z=vtMY*iY1M@J**xUXx>F7BX3XzqAYcS32;Seq#sdU)} zS|6xd!5>riy>f`PD{(Nvn?q&`R-A^eJ%AtxHa9FC$ zehcTyhLgXVe5*x0QB%ZU7dBuc6#+I%?}ii7ye6wL#@?giFlr4S-_N}uAzAsgrabn|y$aW)*U6;>!Gi}Uv~7{fbY%Wg=E6}Q}Cciu$U z4r7`H1j}%xn9PM1mE!e@4_HmK4Pd;K``m-X*Q-#aIBnQ#`YmqYuo!mxOVQ#4 z>mo=z<~5cCY@|C4bT#Jc8g@oK-{2hh#X@rV8Y9rB5xmML&?^ND`mHK*!6WlmzZd_I zC=$t}JU{v4@8cJf`n=@gQO%!~~xmI=%dqEKH8-J`^ZTp)-+Fui~Ytq~Ot zZ39uq-ba||-xt#Rx^VzHXaJpM>P`VeKq=ZxqC=5SbfAa*$U3~p-tYtGN0Vq{`gZOtNn6D=1^Log8NM#}e zS4$J{rO)onFz8m1V>Kq`Ky}w^buq>G-<4$~`fMt@##UbtwP9;i)e7qDgzt6Ql&-+)e zs0rXbie&MHw0c3PO)k;}%9`rQgq~RJO<41lT&G%H8;$)46$516h~8lk_n!v)SljAT z;T;2|o?rOxFrceWzLNjHJBmvpw?)(bmt4NXs&>|_I~Zl&SNQ*S|EKwoebgW{%2@fd zYt9~hH4kXbSo&fBfa*IX;fTD3Bi{sg^mf_<8Amv05ESnj+~Rfcgf$AgcHbG#D`G1KN+(DO~6m#HZa9%yqjfWh>-FMA!abw zWm{Jn2kkA5y<*K2IEkHf%;^HGQ}@z+*tDGIDxqYcyzA8u)Y)V>=>_hW?6ExVo`Bq{ z2gB+A0gCdG^*Q}Ch+O|c0X4+cCO`yvKlNWjG|mW_|8QF zM8gjpS|EG8m;~l=v>m<9L(mKD#$1lZ@)`$hMy!RDfIp!rJjyia z=*~VJ1ito7JIGa>f*1k+AB>$gm(PwvECZrdF#|H2VNE@f3Aqb9z*;;r85+o$GeGJ% zWPxJhME)3HaM#eiUZZ(mxm2^-`bjV9|3( z?acN2Rl^A5#a2_pa_u{rw&5_(>5?zD3Z7^~)bFjbyGylEKSbN|!_aJ#Id7c`qn$5dUHc3WuyH{Tb}P>6$Q{uTNH7LXAA4;BEiBD3L(B%|&%{u9ELg7Sd+t>QgsK^eY4I4;Bm zE7`&{svCS>N5z%JAt2+L^GTy;p_aukHL4eS1y|2Z7k@FW8u@8-uvu6rzvStPQqQe* zXy)E_Kl?RJ$YBZfM`?Xh7n zKhB|3YZs9Gzh9e~Z24ak6hfmUcYw^B^H8lRV-{3(;FZgS=;L;*oVv42H8OE2%|+JZ zP2HJl1%xyW%df|m0j$mi&zD}I5d^6g3DdCc{2BwtmANX^5= znO>3q0wSEaH|!i210UP2m3U4C3+o}+l~fBHA9DX^nuR<_ktmz6hvfJRWz;-@r@O#e zR{n-C^}a1+#DruXAmxmP)BIQH7wI^h!T~OZ%$hscv*K-({TnN@=1`5k17e4A1Q+%# zjHUaOQl)7VITryZcnEO`))=7(Zd^Ny>!vo!X5Rli-Cub3=ub!@Z;rl*dkg#VTR=qK zQcH3}{fk+o%R#8_XXY}}qx?lQwU8Vi3IMPW4GFIFEy&dBbBD&_gX%@SZmw&&ti53Z z#I)iw*V4?O;yvaC^>hU8w=;{vTD-qsgyS9ParY!cZuFch2M}MaVJhP(tEL^~k~NDZ zIKru>k!QDjW>P*9fyY~qR6f=CLa2d=Bl}0xM*Ga9c!gfyHAd930cv!$TizL(+v+ zO31;OsT0Uuj#M?BpQu!p61q4va5nM7oiO>08ZHI)!Z>Drd>dz=AH%drPD=J`OxdElW1#4O}?EvL!$CsYJojc3yDfkHO- zgXQdGX@=aVyos}jrcI=-^Z<(qOaf|u1W+nJaV1kCGr8Uo%D?Bp<4`5xYMx5aI~zmZ z0;+HWy&=S9>@}ErEnb#F#|aM4&k#zLdvh8mQwtM*OKJ6ht3Mmo5C(@}ABZK&16YvM zir)2Wx7~^c>I#JIW~SOuhcdiDv3pdEgWg+hZ`&5Y>h=CziPojifb?kU%uKNmLeDaN zG4%95gu|qAc%0CWQMk&=BKTIfyuf~;XXA|j{937j6qs+6{ZZIKbshQd>Uz`uoJq@o z`BW3bhwj0pVaJBVQf_{)V^mdO3=zrEqdMTR2?qWdRHrWIJPb}P_B8bs{ulgW0_}sQ z$ZJ+gqA-;UWHKswftJBhw=rQ+0>){^P=QjwAjf5MUha;0w7gCE6eOZ{G`&&meY6GC ziY&k+bL4Hk_`UjIlGAnqgs|miO-c)!pYrv!I0NZt@aZ>ZyZ!RgHr!7WFv0!KD;kg+ zI3vJi-j!Jmch5B3x7$!486?b9lCYkWE^tPw3_e`D9g=iuWtIbigDfGkG)5J=N-xl> znJnCWVc9G=dI>N0jQuPlTckfJu>g1E{#+rfl&C#U;0Y|k##|a+WsZMP{tXHeoPj6E zmAaHf=kbxkHb!IF6zIidjtX@PhY}$V9`tmeXDNg>M2)$WkR0ehVgTZzgg8h)3xc$V zzaa-Phg^ZQoA{3q(-lbVoaRG0sCLNfE}8lHT6+EG#`LW zyIH!pGA>g)kwD?GJ(HQFS2z6XG2IFb z-6shpsW8c?H*}v=@X{t*dNsczG#U$m0ah-OP8>F({bH*-uE0i$SSVrhM9rpphms6& z965|i#F3B%5cT7>AjIf068PK8?0GbpvWMn0R8G73}x;i}ge7;^_C%o*d>% zG>gGw-I0hX#$U#c9%zdzrP^5k!53d#Dys4Swf>go&oTh~Vp~nontc1qBQ#(oZ>imT zP7LO%HGaG&Xs0ud@Ye;UC}j$WRGW1a$H-WA9Hz|6PnkKb5>fbbF$=|8I%YA>K_?h@Cc-_9H3X;{N+LA$a>prDEAd%e1)8*tEYS} z>{hRoDy4Slb9?QO_CnPcx9+(HBJ=T#6MAz6+m>@Hh!a9jRhdUarLm$e3Qd(}I=mH- zU0j?*?Iea<9_S!$zMj#!ZK@;m)T}w^)CX}LA*M%FY%_1;vst^7nJgd@QPwVd(N8N`uA5a7)@fI%g;3HRUFKV(fh8AZDUJ-#^{r+UaIMfr8g|goU`#A z&eY51#c~fSpN*{!s4660Uc5OaK59-Vx4P|Xs`!IS&{3zmV)A1C%Ecr27T*8^7tzvo zg2Y0^v3wfJg|)+1GnI8#WN*73U^;FgKV&89Kfr84eeS)%iXEsDzI^qY+Rg||`=vI) z^z{BxmJF8IOWFsPP0IStGbe}RE)}|ZY5bx*m?B5{7@bwd45{nBeY$!-MJZw#B z?6o`@)7s>6soni)uVfOw=dJ=aT>r?CTZ!z>r!>mv=zhjMR)t_GW_0OGNIb8Q zJGm|oKj1219wlk(WtgljIK$?)W>afML0yMkUg9<`c`*rurt0srR#BCm{o9V*j?cg? z?=j3&Tl8E8nDLy}uo6d#9gkYH0F(H#z{S zP%oj6d3A873O_>3NU3`Y1YPfvC}flp65U>1l=|+@dOT>{Gm-;e&T4IMuy2+~*>H)i zO|3MWT=?>8k&{GAinQvYtHJRAdM+vc4`Zza=bInzw)#$_k!oJcTnH+}jdkzG=6W{l z>rRuQJ*t8$I-*x_`p2uPne1B_72NF~Be=K#r@HSQ(-F_5_m2kJNk1LV5cbZC?K!!W z;yBnDd(ppnkLj08KaS-};r**F4<6e{_z*lCMT)-R8WvLz@%Q2!?g=97&TGwgp2fr-yn>Ov9fCbelW~HHrP? zOD~6$lm980l<)g&bcZHtWtCaUYZK<=vqeGFbbhD}2>hc{`qW2B0KbXb96zv6hBsBg zWumqn?QK~yFU8Mwt(ehZZ-wL!{?RfEi&K|{V(c&3t}l*rF6Am`Zv#$h7RuY5$Sp^7 z1%_XbJuWuIN2%>15SAkh@^bH&E>3Ru_*Mj<#I(!E?)XJn&$STbg5QP-C-pgd32j*h zR8Oj#Yl44|>@xu+vf6#x%P3F-S-v!0%DGuflNZ?0m+qRq?r(6ZR9`&U1j{B%-OTQbu~N;lO~tstSKo#9KbFL+Kc9sTqw$cluH-hYS;+p~_K_ z#421oIybONHo&5SI5tDU18FWrN^@_N@&wn-pl8>2SVg=lGN*KPq>|Ym(Q7k)nWwtM z7sK}8C@pu1j1hJ8w_F^@5KCv+>Qyf>r4$?s<~$a6QSQ^ZBtd0eA-c~+Fx$IR@0Yng zSCK@)Lo>)Tm18)Ht#u|7UXcB9;l^A4NUTZ+z7P>|a2nH<@%vi}yIrulbTUyqHyfG9 zj~-y8d0|fbg(T$Wj;?atvptQyqCQq@FbkXQU=@u8(qTmJV1Y%CWu>0}g(nwoNJ^`+ zm3`U&R!yyg|M11|`;mA@pRC%@&v-9`xPW#!HfLq>e@MgH-8VgZu4l zU;HX*QI9>Z7-s)mFh`!Jb>|;pXfPd-BW)==HGNjFD>wH~olg}LX)aqCR+mjRv`y$B z_Cch)BPQIJnSFvzMeIcq@z_s!@WHjVZnkXnLXOUobSRBUKXh;I3CApuwys#XO+H>xOgZl=wbe3xSDBFd&oH4dm z;$05$>L=k+@h7D67Q`T!Q5uiCQvCDJWqxT|t_@Y@7%blPTw7?1sZov3657`oHm*)ty5v<~!;4 z@*1+0+L<3Ny_{j=#RNYhMe>7a0(wr*FU8+*p|SzT)?JLUTfyxeC!T=$dG^kyg6P6= zi{1apJr&ZE&@YI|fi?0AN|}7^r&Q?LMTm3SlL3{1dk5)wB2Vk#t*+-`=YF|fhrx>a zRV$XxnV`avm&Q@5MZ4XUV_d^g;jve>pQ?$Oz|g8)9I38lQit~cv8vDIfk@LtnykW(XNJWgaZFh%E9SA1c{OqL*>6<6Wn(|dQ)Nq<;V zWH2bl6;jW3bo{#II}%wdTFrWm)_;Zi)IqQ#|8cGP=K@(`;1MR_|6)U?I?dl@upUor zn}4}?>fP|l6$H867f=E*YO@pj9u4bHb)@Yxb$A)HyQn2e*d&Fu;Q)+AR~GQ1^>i9A=AS)Y+;s z8VESH4M>BP%BqT*S@I>}+KXphgvM#)=x0JsV#|WSL{(2S_H@qMwz#v8Gx%<2tBACT8RG|khpA&o97S41$|WPhk3+hx!C>ZuFrW(UX?N`yYcNB z5J>K6)yePPvgqvdad~Sp|qhm=ZWwn&`n7_CHZ>fzw z53`=%wOTB|xS@hBey1~tLK~sL<#Y}Qc<_HA^{VsT`}0TsA6;JoRpqv|4M-!pDQPJc zFi>e}kPs0h1ZhN4I;2BVxH?A-?gss-K%bku z-_<9K#Fi$Y`7rz=A_^Ebmv6xuywIH%l%rL}UC7F5(%YgH_8^w~Mxkwn-s#v7e*yxE zVweQ2?R*Q_;Zw*Nd*O{?#)NU&cJvtmw+++YqeE=s>q` z&2;&2V8vDIdhZFCEwB|DfE`LCZsPg=hj%X^Ev!Z{4=8y{C2zCZ)H4;+8rjX;I1))H z0EGm{nU*(4AjNmT98S$BFjoU!GL|5X{F?WU(*k}HLj+ABzGWNc6Hl+1v9SB0BmYJ# z$KC${ai~@~M{0G=Yb|shpz)VhAoTcGEq=Ek&<&8wk6fVCTDP_KLUyM}h9@8tZ*dc^ zeYPY1qk_btlC+-%5MMT%>rP{11e)ZYC$d`P->1@J#6VbuNNY1fvM~{wuG@g@3_^K- zZOrSeg^UkICJ|?+Z(Ajn<3hc6{Nq08OATypCG#o27ZU|qLb9r%tm0pwBmF4eVU2x} z)|hK9$xKXw&G>VsA+tJfs)on=O_hP3X;MdI66Y7g3f5F+mwUXhS+_N%6M;3VM+1U7SM-3P=>@7l$AvGS+f+ofUZ z^@Q@tO7DUD0y?jiW}ZL>aR2o=0D zImD#qUy3--P$sbp2VymZqnPA~PJ zJppQ2h^>z*bO9j`ba=U9=Hd(~f<3orV9f10MU;uJA1}hGssuI(18}@e-0|+h6{p7p z>IMUSrt}brBXDq+0A>l{Bu?n<0Q`(SR~DgMFor+I1QpeJ#1=_wJM7j=?^ys=OmAqE zp8a3^2944~z4(~gCGp)!b%a}fIlvXS;zpyA9yw7$Z>lin!F;QV9|s{Z zLqzZ-aRSpkHZ`|dK7vMYn-A%y?texcUb?NvAcDZYjjLZ=&jmy97HFAzQ4KWG>CJn(lCs!GUI7|X`^;R)~H&E4wpdEWL6E~~{A(`&P|KXVB zB;m1c<&P+)@VGE(#6LWUPBCqhFYaZzhwyE^|JtuXATyrl#hmeyp7uOr<}#(D9!|Aa z_xvJ;83iI4Ud3_Fiu=vH0c6HBnmpIP3(gJNjmh+iJK&E}asYUN8C0h4+Y~;r*o0K& z+(>jRctkV(ypv+FiU{qCzX3I*R1j95<>xX{*9#zTbO73zR~dw`2++ns)-VVRR1*m6 zyNmDqh}_pxgwI`@6ni;f+P{b5vc#i8CL-3F1U3LqK77A#Of!6U0*PHbSEwM_$h1yk zeF7J7k(p^^X4>P; zTi@Nz9fkg}*8QW!VCSf%;W7u=M*Jf@oPu~lpf%F+1LC#>M3*v9An9*QUX|O%c=i^| z1Bvu<3*(3~fDm#77?zyRPBhAYl6J$FFqsuH$fw4?M*jGP znCHQ67tarOiU6S*eqnzWj)%MIRt0KEP-*B_I78lO4wx9JpQe?!kYIpiUeVeM2<)x7 z8=rX3aare{&-s7T;>93ZykUFE58wKJd3}4v&Pd2~AH#{E5#iz^gh7+&-@XyfZqV`O zQY_NbT_~Xg6WD0hO=MxwL^Cqt8L{%-FlmM$Mib@)$EVs49Aa&20b)D_A88#%blvth z2dBBpd-HV0(`K2w6LIC$)-kWDWE&&Q6U4|*LWQ9P*4lBlGnlONL|ghAc=_j9Jw?8) zKx8WGYmNt^25&lt*WwcE{DbqZGcz1U)EacG`=nY$XtY@bf^93;_ z!!DXX*0h$GVibrj0G{z}ux0AnKKr=&w91F#(Kp)CT0z9~%lY@9_(AY0eyDl>z5vFc z%6EOrI52nb_syrc_2`RJNxsHh7xNKDgk1VxU$GrIX;Uba0cfH>or$nsfn}-Qn7jzJ zuGF}Tk$m*QYg@(zaMG+ILfU>tNa?%6cWJ#Jj$3Vej;t#gVbZ1bt%)gR=9DPr_Ob}gshZ(vlwx_>G>0jT$l3`fs1gq!n31Cn@gS8>;L&r0nGzd!PJYp=Ea5I^q9jyH|@yAVC>q z-}u}Dsnlk1bfPp}VVeuR;>>))5ehFtjMotA28i$?GAarMuae z3=RR6FVV7VIyxj0f>pZd`+PB$AYAJC7mro3y&jZEF zk;Aa{Ll82*7@YbVi7+f$1nfa-m7CXSl6hFol7065JD%ukR>XG}vWrq?wcY*z?>{rF z!W7l$NDvHJAyA;?`YF6OMe+$R7cdlsc*&#F5mkx_XeN0TaNfkm!@x=0b2j6BD6tyR zy+GLOq*`VEm52;)M_g*6Fmk-W&}cbQfxOn_IkQ5CZ;S}=HVD^X1*~gTKIi|+|2yKj zQv*G8w#Juy;i>1wp@1H9L;(`pj8d$LP>-{QnK-&U;CMj*+j9*VR1RWHUV3uDRLdh2 zh*dDcY-p0$2UyayY{VuAn4cE0Y&;et#z-tz*G}+;KT~M_LT*+2kzpuoN$pv80m7Af z{34b*F11KTe{hRhfLmFw%|t*vnk4Pvd=2!ng$`HmKGr!zbWI3L6JEd+?-GR3TUUL) z?PY_W8ZusJ5R{eW0N^@x`vqM>Ki@Ow1v+7co5IwO$i-qP;cz(gF1+JnoGR#6U*Sp zV7y+1G&A&TP(o3QncVyG(e^yq!SvEIQJMjuM(ml9`DMH;wh=A z&~4rr({|fZCf}-h(ty!F4epjD2(v;?aO=@z2_j@&lZ@mzF7rQ#$~by z!f_xO4k7?$A?>c6RPc^%04*9L;%~kjhdH2BPzrl+zMm^V4Z3eKj*@L3qo+o1YNP!8 zgt1$#A0&7dOc^-*bOMg~Boj{OF$v{xTgSlvk$q;7sKd&2;Kj+27N5jtlW$a?snm!a zPbOHzdR@(;u@$=~GraF&Dww>Mr}v_h!s)}wt?uw@s&$0!>d5{l;c}i=7`v8weWX(U z1FjrZox$d~%o#?F4uL)?0@i3ASgI=Gy z6aoYF9D1!0ijv9zVoZqk5)^^uv{M7LH5fKdqM(NFV7edUds}?>ywEQF_l4G5ZW*#a zK@(nU1rRe3K>uKMhQGwDLc+;aE%X%k|wiMZpRwX(M9jK;UAQ zTR={Ikk*v~L>6X;+urc+WVCxpF5jK{k_Hp(MIQpV{*O9DKT>cz%Cebjv{#f2Oe2Q9GypGN+Q^jzS3@#^(6 zsY%UUXQZ&U&qqvAtdP-24{)TaeW2^UMEqNhx_h*F2+_f9U%X6hwJUN~DsH<;U7i?2 zluC+5vFt!-Fl^D*$JF9#L^mxu6%N{%M(RPcLj7BZiJF)4Vec_`-dojA9M!uw5qa}% zExOR$XxT;eO@zL0V*L>J6j2*4P%~Zac*|dZ8%OH=wAWgN=>2_qozG6&^r5$CzSp*W zosk&W(H+>!)Uv@GaNu>$|8RO1r^l$cOJ64LSgZ4MQTpYj)0oC7EOZ)2Rx&?tTZ+lb zs0E(5sDJlu3a@{YGp!b{x08Ra=Dh#v_4&$MBq9^yR!lbhCU@(ZF7rISsIclEBH-8Z z^-C5{$`_)W0k6Wy&IN>C$ti!>v(z(4a(kr8!IIgEvC-u&?@S6sGWle+ztdNWX2#b{ zu8bar296CRISW0lOaiae6Dn?jK|SMy^in88I5Dl<_~6>Z^l_G^K#zc=x%a_DnE_Xt z8qjnpn(@UQKPzI&(_GC6A?NJ4M1j@O76Q6=eQu_2Z#YwN>%=vb!P8peQ)H>&hrU`j z7Z*6U#kY(B=e5+aEeS>J#W)&_kI-H{`f+GxYhv>5?eCJu!AYH6?5m zXLa=)t^bk#L3MLWdIMpxs$QI{eLfiPKHNT?^6&OCW!EtW&bT%eoj(o5L*A$dJN;|t zz^lXw9d>b8dnXAi0lb3i?@oI5XkBRzXvX<>g(ebb%1+jr4Yp{;^LE)T=DDp7IEGn2 z*_oUOb zag}??)$d*`zUXt!}&hh9d3r@SdT*}MdbCiP6eY>2>^dF~}qy0oZlj)`S&p5&%P z_@x_H7}@4(GqFON`G__OO6rL2fT{KpqYlvt(E(9+z&@er=EF-?j6`Duwf;H&%i{F5 z3}Fl`8?N%W(@vkoEJ(xC1+U4Rjv}-`iogcK<5rx>Ce{Tk2Y3I~E2F}cgv1bq7w0d; zoO4IvoAzw8nzth6u*SnU&fV0v=keQCuiTHVX>i|QbX8F!X5p$=B}xGDLV8iOuJ6o%sX#eNi zOt$CK-QoqD3i;1>9aM)VEf?LR*PJx|`MwKmh*Tf{^4wu_A#JMeXn9J+*)&^(qMzTm zcuvptib7Aq4sPZq=*gxB3my(Ctp%4f;dxfB<^R_hX({$Z`JC}RZDz%lwR{%4LIYIs z#~7}3NpW(I7zO%q^{bOf&oOd@zG|TB2=PTtd&@~$)DeYVw0um<+ZTb&R~)My@NxM* zG)>aDvJD6~Z&3aA+pVQEYc}OwvvGU-V+gC6c#)}`yI^B${I1?Grc-!Y(9t)lJwY-v zI)0tH@j~Y?cI*|ji&aex$9nw{$Bv;ZKlS&Y3LI+|ggyIit5))}+%fOKZB}RFNZ;;a zLLT%xzAyg{3piTZyTvoT+&_4-EyvKX^HxW%PP<^`rnb>dTiWRg$FUKD31^O70lg=* z7334qyVk#OTT483NLHr2IC!F`1{}sb{hBqK8xorjZ=G{@Ojw}UPB>|>j_KBXeT6`; zl{{*s>fa$0F^rma*N$^J8hw?Ukw)e|?+7INHMmbAt-`s?aLiA`*NSOpxYpD zP}!~W4pFaEpcJ}y?Bm{U$@;sGM&XpMNh)BMLZm^|Ud=M=E6=81!6NBDDHi*DP<#n7 z7+B^3FFFDu!pw)Eoq@*RHNn#Q(%pQid*T%0R+ldeDtz)U6z6AV`h~jjV3VhdySC3x zKu-J@BR0E^kK7QWqRkmk<3q=;YmP&c?@)Q#Wx`VIN0PTycKfm(cMvkO7;f;e_NZwv zW=PYv3sgMwQo`h_@KzWTHPpwMki=W)77gsaOPG`0nheOXBCk3N~Sp9~QbDzTXc*nuEX=0mBj!JEG!i#~2w8GPG zmh8Hq2M5e5+||wInDyiTni%B)vXov=GJ~dEx`-IVT~2LD3pYOFJ67;ufaOzZBHDPt z-bJD}xO9d-1&yxq8E0#w=4U81RNSzAhX>Xqw#ve4rSxZ9UWKWIhNy2pO$%FYn_rpO zSo0Z}u+_$te7-4%er%U(jr!^Rly&JDwX0B8$uHhqN-OP9yL#MpBSF;TqrIYEi&(0; zUw+-;?F@S{Mo=>tF!%cRuPT?$YWbyuWFM-~Dp5~i7Q@J&o~H3hMII|*Gv&umszsXv zlFnd=l?acOtql_G#s)vtlyUqiz!GSi)3)p8k{IB5Oi4bXSHx*0upx;V_O`-v!8W(d zsg3TtWiDm@lRo^!LC-sd%z7M}j#3&Y)hDt#W6|C-)&9#8_9xEoI>IxC>H`i~aQFY$ zzR;m+CtK`CxBc`NWp*}^Rq<)BD@^$Gleg;}CYUEHa_UY_Di}{nZJYJ0qjb(DJK8pN z2y3vrTYfF)ZQqTzK3aCS+}pWr&Ys(2zOa%LL9D@T>wT${dy+zXMs>$k(#32e{Y9Ad zk%86siDK(drAKBp4f$J*ag(U5{=6^O+)Q6Uw_qrk<>iav*CPcPIY&WtWwo_>bF_7S&8t;z`F*$AnVW{o6**qO{``cO*wj{!D+u6Esyvt8V~ip$Sj86DNXspVewMm ze>s@-eZWD;Qn-R5rS=z&X@y!F)`ObfPi%txVvk9ieEFX;Jf&--A<`9%=t+#Es!Off zaR=klQ*ItjRDVWDD19jXLlcL2_ejGS&Y@L$Enf0ZBe)D)G>rm{Y`=Kr z&_<>h%r#XB(xD`~=mOjxxzFGN^Pb|`v+}Q)R$#Eh=jWNk zfv(UDiC;DmBM2TpCtjiO{lG6)f%hg#w9PV<`5W4&IkeF1n-88*`vOsvGDEjx?Cz9N z9jfi2xl7X*!b{r}3d6VrV^6r+Q;^b1auYC46>*FayuQEM&OKr^sX9%|iTR>0a_1_B z@QU^*-R_~H#N7r3`gbYrpZ$p} z&gL8BMJRZ|d*M~ohKAhfQWL{a(pZ-`HfVX=5xx%F013iE`%NI8l9RQnW z`M6Y#_9PQ0_O*FaX}EQxk@7gm0T_jjo_o2lN*L9^`ojPn#OAdg*HhE~cA^YF!K>z5 zlX9{9Wphp8)oTY03Ei7Q&4Jz80&FVT{^laCTPd4$zpgb3Jk8z+Y!~@7F3_OoGr6_E z&Ub!x%YR4Xiz(mk2|0UQd|BWyC1cr(ACZP}3n!9p$0zUEYdXq=md=gsyDW_J;?sw^ z=mqmfvBh+COnZFT?R)GnR^s?_qJ~vT)n2ayl{Gq0CfBUC7Hn1}0Jp{N zzUMn-QK==+~w*?%urH;9Tr_v74yOr%9(v@R|?8+hj0R z0Od@QTPLJL&2@K?%u)Ly_CWz}#q*SShJx}7W*8+n#TdN=pH18|54|;~)I)?7TpmP0 z@!;;g6WmX@UgGr!874!T;NHTvPfpZJomrTf7SbbXKbPY@dAB^Zl)tM~{0fs|69=97 zgU=jYeGMCiUoLo3qsPvUW}AdC_gFsCyPU=wdbS|t#s=d@3LKp76g^(M@bm<(vs4NM ztr7hh=5kjsF}uMav42A7{Tt=p`xu)v&>ghVb{7W z7{nx^5=~SX%bX}Xa`<882`B5x+uL%5IJ+&ziU%aWJ7&=4%=bn_%2$R;vNUAsnyozU zZEs-Q4(W*09~-|j%E8Abgw9U6TG4Lg|KEV6mCfXdTEJ?+y*WDs)R*)yC16KgQku`r zzuoORiD-S}{~r7F7|+C>99NTVuSd#oFpX(#9@KR$9gf-U{MzoHKa0+@df3;j@qW@_ zkg&^nqpyx*h(97`l4z3S;su8LV|d@IOS$xiYMNG_Qr=|XJ{j(l(sEI7$(pE9E|_)< z-Y^TPJ|}Qud1<>&wye}nckp@j7Ec=xlD|?PlYMpT-Bu}a&)V~Au#dK}5qZ2VRvp%7 z@IYfm1RMQkucg7~Lt4M!7XozUccicq?L7Jg#zp$jcQl*M%i-~pjy4>zshedzS-!Y2 zQpe~%uq3X3O_I~rf)ay?s8KGh-!EB^EGRkPNnIryqed_2+yJlVlH0`&HgtlrG%^lDZl_+23nIljxbp0oCId zKcO3LFAiD=uYIbWsEK<+f|1BSn4?>mNdC3<{HNNwGCyfu{!%et{M^L`@ETJeNYCAy zRzd*3xyEM$!>FXqR>(gAq07Okao|&2t8xEUBLxFHl2tDmfP@>gG}=zXFpv%t(qsz3 zn|gyAc8kV9c9&B!VLh41$*qafsGMqW%!_D1K#XA}BddZU-g@nXCy)pU^!rj7_MgOZJSHpQxWqyc?hv4vDPVF|}Tr@!O&xd$!f;cxsC<>Ej+wZN_+pOOTi`y6L6P1t4HF^)SI zU4?|h*RfxdC24y9_e=)q%{0y2>rf%UtHEJc{vGu8H#p&2WZtQ zVr48j1M(8vzxr{8>@nB~U#9Ps*2ioKw}vmTXr%<^{&_^>?Wf#LK|)lHWcq#aF^~)0BqLz#@F8(=C{~WH*Fr2K5N4QZng;E`%Az8xNx0lzyd>G$#|r^AcRiXz3M;_ zhcTQQ6G`*j*=ye6KRrytXO;jGUvB*Hw3X&xuY!RXEB`tGTgIG6nTm~!7}Cg7C1vA6 zC{};}^6W*m5D+EY`vD<}-=;ef6Jw+^$l*DQNk4&9RC_2z)LcCUaL*lxP90%|2_CL~ z@_iL>C0-q)*GG8zot_zx5HgokYyL0`(wCkVI4YI2|E+b(v(z`_89N)9j`v=uIiM^qPYy^&sicfeTzV!tpU)bMsDx-!*Fr zrp%m`bNgfL`_PMAbyn<_@i~t!Ba2)*p(oij6$lgj9c-HT3pjx>PNpfDh|v%i&5xje zOc=O*K~IF8c)LA@>)KNDNAy4D6R9=^aPz8yeBSn`{v16MQvn5HnS=r$qEX=0KtwNH zO*?R1!<#VOPmUZApIkG@Jypq8E#SPE`7$aLVV6vPG=A_W0`mtBgF^R&fkDmcj*!!E zogcsb^IyZMFu)_A01O)jz#uFk=G>o`C?bXdfLbAlY%123|DZT~=3pX0drUIm-50Bg z$M&xzcz6Kow>=?eW*8tbJ%mXhl;HdF50h^BPCS&sqhv9x128ne-+#n)>g{`v8siMk zxniJq^nmU#o?EY51erthd3Y1=9sx7|pl*8^bF9haWODam8jDzwB=P-ko%9jLm5O39 z7It?xtO!C<#5RJjJSGv#cyO7FjB3PKgtc4zwc>O#`rbkAfzF5K(&nO_J4P!;FZvej zw2hn-wz9X}6KrV?s-Vnq52$FED|fg3+`n^01a!1K!PAT$Xc(YSM}Cu6^#oa^DtP1P zfGMDFj@JfC1PvV%#iEE_Q$GkTfmncna#Raa?&V4RKK+>&1S9a4pviGl%<&>VCQ5{! zJ|Do#VK6>4hu2h2A6|&Y;QCEJ?mu7RA!31VJXefBy_L`BUd7TT{W$%oWO7w75r84w z7Ijl+*^mZKSywmR(!DEg-U8q#7UGA&wU~d0|L*y9l?kv^vG%r$#!D;BljF z=6|y%)^KuGRG92) ziJ5)XCH&eQx`l<~w*D;2RsN;o$hK&X=HfsCEKf6X#L&a^7ij1UYch+^d%nX!jH^MA z?1M@$!h{!ti|Y!oy$aZ57&dM^BE9c{s+Bj3^ZEOQdWbkv5Ug3=jLd5Y4&{0k)KofMN#e*=W}R z(zEfdyQ$%ZKn9g#ph#jSNym8`8KkK$H*>(FY(K zr*KHc`*K9j7t~&`^zdz8n~^+M?N9UiCyp-W0r_D6lW{M$hDp+tJvHTm<;WA~logy6 zyA-{k{R5b!gJzQA*+y5__wMiTQl@H1WpmHf&dv0k1YbT78tO1AoRQ>b+qI?J8pa8`u+1|s5*DtnFyUBX;aEzXn^3QDE|EvVFbpq@@J1+7Jw!A82-W?aq@vm1xg6>wa;?r6D@{ zcyeMwN z0)-^DPZze(CH$2X-?d{JEDOZK8L_7`x~agMuy}hdROYwKF%F~6s$_~MExriZ=WCp2 zFHkOw7&jPCo!2RB{`~P~wCR^B$>Nnc_q``PD)cmaTTjAsh0;mt$*0wVxa2QUTt&zW z#UI|&xe}j!>78tyO(vXz^;JkDbW3RLDg$40-q~M|@dLI?CayBp64s7QtTnumJZ!E# z*4$4!fpRzm4%`M%Sih$k_Vfvm)BAL6;Lu+h?T=ChqgGzzxiOE5pn109y21*5%DdWn zQPrv$q272&1eY$oN?PZS!h}>O8_A?gVvfF*_+n(=)xzvBxt z5;&ISKe54An~uR%d&HbC01e|SzVVxMU}lnM}aI*ehc0_|NrO;lssj)OKI{^RHTj;R_C{W^@;=fi#IkRhPhO?^EEiiHCgs>v~wn^njYc zVg6N8lwKMB^P?}k7yog?XW@E`&?2t>b1q)9o(NmM6oi+ll}7sC$QiNT=@ILtZ-S%HGwt;F8!=Dv?=6H ztXAN!JDeoyJyP^Q6j`};6A`DoY&Zw_tO}rfiRpcAl2SAO;}gRZV106Sj|=(xJK)=* zNy>q5WDX|$rQnOys(fD-zF#DiAQ%!O<@lF4!KTG6bjN45)pa27-d>`FzAwwEE<4A#RRH;&Y`CNX zg%M2*J`jFc-|9LZ)%1FcSmV#Gq=Gy1C$N_3|5bgxUkn~!q51@nEK0$;VBBX_&FyVr znYGXApo^t3<$sR`5qu1U1BEziUN;Yf?q2Txt>({=UY79geQHq+P!3$C{?;Vo3aWX=J8 z^Kv-|Br0ltvf@s~h(OKPiUgJF;>3dIrJ!?UGN5$iw{3(^-INeVA-K-w#m3 zX|C45jOYW>93%f+YUGH`u)4hq6e}d};nd3+HbqXXndBq4zlUk$rH#R(&^a}>qC463 zL+IkA&<-Jge(XiSW~ISBE71fL%5mhFShka9m~Kk#2mXG{Scuy{c!}g{s2_vha4}F< zz6d`2PIn$cESB&(*b}9RY~?RNSvC48n=H*r5Cz!qmI!Ly| z=m{+=Zt7Lom4BpYX8LQALWw{}Z+i_bglL!K?{!wOo+gf}Km1XqwMza#hKf zZU(X4r@GUO|9GipY$CWFB%UH&_|;H*5oL$Dbp&!SUlK-a>?#H;_p5J7{Ir9N>m{HY z+Pi!}k|n^Dzqvx~b4*t(p#(?7R+S+@8f?!YRv{O0T40kOg}{PRy@pr~ z5+#=^u#L`w#utu|x*+V(8!CGSvc7gDnS4{c9pGB0X&G57TaDWF4+7Vz`pfUlqOOn1 z*Z6<=GiA%g^OZujAnCJ6Io_X`Wru5Ik2Q5c3MC$)9rTTeq;Ur&+4FXT4e4a>G5z{BO?c zIXDD@ckH$x9mFFH%P5`^mb>$tl!v-5-&>^H-GfItY35N!Tmlavedhy5H(5*kr-3QR zYJw#|m0>ez@U~)b1-fCSP=3dY?|Wi6;bT<5+L9RL5W+2m53_RU7))P@8#j!RlP)^? z(KyikmIEuFlFH*=f?dJGOP_cCu}H9Bz^h~wN&6;GfS7sry_Y&)=2JS(qmI7PnS7UL zVFyx{er^|?dpW^30d`&a;K0etRAi1A>hs8yYgX8tX8Is5QrfS(f^AC_&=(th31o{> ze@MD%NhIe*9jSDU4Z{Lkd!Q=12ca3WhDJC+UY+naW8Yr0PKI{Xj?~r-C2GvL!h%t1 zzob44y-qF1oLkDxEcK5SX7i${@$mhbRG-R9Z&vGcqpE4T97ODN<+J{|B#X^5%*|Y>8O#k(no-*8b~4=QcgnV#bSM);}j6lAyB)W?Lh_PE4RN z5DXhO_WLy6|JcI3gteOE?0viB3##BDLJ+~_;sm`{_L_Uy(X0EF7;-hYQqaUjKSpWv zFAhP2$lNd`U$H2ZjP0uagAXH)lOYyrVl@=+pKunkJWzg@v1+?I(O|z=&|FzFU-F?8 zV;myhe%yGIcT}!?NIbW{3nD?AqHlDdY#~oyK+5qTc+zq6#A-`y)c|LdZ6L}M!>vR* zc?HbsM}r-)Vuy4zbhC#t(l)s63aU#tFTT?YwZ2A@Vf@AuA{bhH+n^(BClkMLY`p3{ zWCtpuW;>+%YbLqEKDx4ju)zlS@8#|v>4x6kexnE~f*adZZ;KKbu#SJ;%pd}p;J$9vqO@7NlO&I0#}JmxGX z%P&}ho_k+?KIVl*Ffwt@cVSHJXHCL0-?jCAw?4N=T^sKc$qCOXyi_wq0T~}d-w_Uu11lj_ z=MHQhQsC9Gjd2apuqJtG;7@pG%_bp@j;Sh-W4i4Yrm%9d3YPz|ev0j$rB{>~!yN&F zxji_#Rz#1-;ALN)c2g+NiEhVVC)GB&Qf~OV0~Ey^bZsL2*;ZKI`1w7oF$TFFn%~1s zC0&czo!0evZi%304CPXSZ9!iBwl;XWx0A#t<>eQ1qen#*e=q~EUk^e;Xm)LxZnC~h1C52(K4Vi)}bl2I8#+|FT8Nd0WI z_Wd36X64M9_>7dhPDiwu1>wuinl%kZ+sisfJ&AJOeY-O6VbS!D zo90AGx3s;vr&5j7b!!8Mvcq*c;+B&W%g63SA=pNaMYOtlU(- z1)P5-yxauIW1i)uO`L2bo|yHOK;m9GIs+A23gz0|Ye-mWL%7MyWw{G)kNWZP+;6pj zuyeE{EYUjC-NecDzv^-vIEF5-goE1E^K?e8o?rk_Q38&dF)Gd}^x+@jm4AFn6ZVhU ziVfRA4v%|ePE1K@MP!VU%QLxyHTVKtlNv=oOA^Zs-(5p{H2ZP2hrA7(Pc+)rsURt< zp`XiJ^T21pcSb+RkjL~&(YCDeDs2wSfJ%AB<#IB`YgU^QKl9ojQ)E8T{{CvA21Dt_ z*QFpscT?>0Msuth{7x=v^`hI1-{G)3DW=<&uHwHC6!o$HBO1O+Nmw<8Dptq4Ais?7^=w(#xNu+OGC3KE;ZK zlbB<7!e0wK))#y`rl9kYU>~{=McRM-GbS^;Q(4C68ilge#}s;Hj-PH_t-4E0dB5&{ zLm*Vp$a+nk9B*vpMt_Zo$h*AX zb6W|O^F;X!7ALikYVl$nhN{*ih>q-J7Mg8)1iP=|n3Q4Mxbv*jbDOVU5WMCKG)#o! zQx39cCZR!MC+EvLAB0WA(%M(=rJ5YO&tOD&-1)$Pb|q*(1)YTI?8Mu=9fs=AuIqM` z#u9$8v{qQDcX(i@2!P^r*{Q!MFAl*|(vi8{4X>{zDMf9v zpm9^xU;T=9L-P5DOkq7^ed_F*x-}TXj$b;SoHPp`|1{(^l`O)hVxjqQ*17Fv-*w#| zA8LLmJHVE9WYKU*_88e#rv2br(2l6`5}8^9l#qQWx4@KF&zIA(89;1$|3Kw`jRc zs4oz^))?j0Xhp~t9=6LBaVr7}R9wX1x)_;ZCJmg%08Z~s;N zA+GNuEBtx;q(jg}iM1HCZcBe`l}Xuz?|Y;@%=egMaqDSiUKR{`wt;^3X~Y0ItIq9U z+ak)T($HB~{-DD&s!D~tv(>yr&HD=JFOz!j`;0u6sh#TI6SLUVOz3Rmv*?9l1F_wW zo632%&9k&?DxVQxZHZnZHkR;461GLO(m;f2k>so0+0$GL`^}t3N%@-BC|LX-kK1M6 z#P8W^opWC}eujFR5Dg~nowQkQKT6C+SJf!E&E7rxf+1X_P}H?0mZBo=fBoY_VZBN} z^+0V2^X>;*kAhU*b+Yd}TdR6w_Zr@q%=~e7Q}BiaohGfwlH$0xOy37**-$CuakEq- zeGXagg-ssp%dQyvE)pkArUxm;5-)U2e_zabLTVabejML7ZUlYH0KPNB{mFHAL1sw3 z#aByPFyAS?Kg_Pp&u;BN?6hk?>vo-kF0|Pst9p|sDNv<2kh5!tA3R;LA6|XZfAwC3 z#F-sszXx{Jbv*lZL8Y#YJ{_NXP<5fmE?O{hUzh~T^AXaEBAaylKfS1Ilzc{RxJHqQ zcd}k~)3b5&wpL}_oWVx*bDE;*hOr;ipUW=&rp+?8uhHgILd$V$CF3*UE;kG~J)A~6 z*G#0VfhL5@R*NCc5M@ef>l-iaRc8u1rgJW*h(s3g?hi8J_}JM4XYS(5XTGOjdG+f6HvA%AagDRc z2zPtFvAk}@owdXeZh63vh0P~^!8b#w^+!@k%M0c$2YK+!7@_YK7tNVZ5OQ91jODrpwIC}DOkgq@!uGr1HhgZzk7X0U@r zB|nqCn;h#GlF0^Y%(f+k+_dtd0@4p^`6pEQNx#3BJgCw>OeGDgN9=tkBE-*d27chu z1ZWDH$K{xux8z@dodkPYIUD8v0?RFxdmlwK7;-WCTT%u)@J;Q4Rh$B^vs=jFX?NjaXR0&L_mF_EhG9F~Dd{vy8e`ow+ z=R@~f(AB31%-tSsO@*D^6|PKz(}wKdzaC7*X z%AFqfpjoEo2>~lZHzW{80;AHWCP@9WQ5DI`d&Zv+j0*-o7ttOW1Sd_Ms$ly+-Oz4~ z7=v|a;+1!5xg;N4rY|Ai1G-lDbHPFTq$sXl_a*3vMh@%5t(SR}o(&I+5|=dyus{1} z+koB<{iq7@)F5vBAlFO@Ew88O<{W7`RGjw$yhJ`icPul|THybDV*vpT)x>ahh<7zfpf9a50VhTf zS*8!dB3D!z!g<{VsuM_DJK6s(PoCif&mqcX2|Y(7!V3*!4GS9W#)#Lx-SWsF;?;ak zWj2{|twI}tJP>_(auGK5l#3c1-jN8BQrIAAl$emobj=)`5j`PlK`=6!a%s-n$s*9_ z1EY8Y3u=n{hM2`}ir@;wp+s3ik3t!b*Ved{I`1rp$Pf|01vY{ALBmQDqy0J4EuU50 znxf7B3|0CCtm0;Ca21l3d8Y_d^$vQQYyb5*#0hn9Qu*T}>E+MYg-rknwC|Q?M)){I z)oBvr0=_HICo2RY#sK6Y1s;28BEaB1C6BtfZp^98Y*dEQhtM3&idRXj-VX0!4eNu2 z__*$^sM5#%L^SWZz!hm@XuLkl$_6B)beHYh8!;vTPhvErrTzn+*vNX2o7pafc4@;Z z#O8?@1#mKfQ+fNfTp1f$Wd3(z9Lhd6=96n>^t^lS&S&!-Xc>&_D`eSJu^w8qEFHXU z%drGI3hLn22fHbCGB?zVSq89kX7ifJ8i6oESIU3?#Xr7I(u3T^181@nxr%(^Uj|`tO>2Kn9^4HFrZm@Ie@?>v`Q^T`Fi?Et zTGgX(LqB?2XJ+m;hBam=92zvrt&-gUwdZB{HzJB5regy)M`$k$vp>G#bt+u`n^d9x z_SYU_1BktTE#LqTl%)mlT#^TtMV4koVGSJE@AR6)1d4PKaGDSGA}pa;K^OZjo#1Oe za)4$&FMECNKM3PgAll7=>2L8V@qgci8TxVhd4b_*PTgMy<1|Iw@TzgbUED!}mfr!Ny2j~lhU z?xDu=)FhL~)j-m%IgI3T?LF<88E%1P7~)Y2okK0iAdmIpKoSYTbh54ndhOD^io17_ zL})lQm2p9k&gvVhD;Qr`Iw5je1Hd@M1?7vQG8@?im{a*9()*r&JvjPsTc~h%9KIX> z6L3r>%>bjZH>^r8FTcI(5G-ZQrDFQP1Y^Z2`z{xq=r@s3UVw)BXlk+sn+S`190&Xg5fHY8%WCU&~LybE0 z0UTVz?ht7Lw4b`m)|`Hyi8G)FX}37gRZL8fW{kzjYXENvtY|hG%<1ah}O~l_% zk}rc2w+$Z3%!`ltaE$Ytq1BcR$E;GX-L!nz#&Mb z);iBMc?8P{>+BLd1;qUVuf-#%<$vnRKmSC3$5U{7 z;1hhWFyfqV!)*0P41I|~tIR?RVCSceKo{~rR-q;IyoL+HhpXNQG=*RXz@?|=E|0E7 zC{xh6DUz2}D1U)8rIt;He)I^HJ4z`al+ z?G6Omd~kfI2N-)H)CFp#Ik-!(JfgmX=h=u$14-Whudyo+hkAYA5m`f)B(k(iqHJZC zT}s+ySGG|kl8B+PB&Q;6ipo+Ql&$PxED?(A4Z~QX>_#yr%kX<%#p!oWeXshXt7We7 zem?K}KFj^w&;6Xa!{w*2S||J*5EEAXyzc-1K$W&QKoCe!m2{SIsIIgPKI>`u4ub~2 z!1yu@%ujyF*4F9Ovl105Ekd%k7ul{;qvm$AZCv}PeKTQUiY+l0Hx0~C)yG|9jWj+6WK zuD8rj#A9h)eDdOnPlGhoOT0lE)fqfFqfhZnvBkbZ!0lh8K1RY!fgFmJFnS9EgHbdz z`eM943*;lohi|HwI?s+8Lc7cX!enOSu#M#ShH0DHJ+M?p?eL278eKJ6|NY#psa*cVu7G9;lSY z^eEi;{8N$m=eu0Ra(mm+x34ac&!}9S*qWzv)}En(w+{b_)r9ZwL#1LM@xqyFN`}%y z9tCvZ&JV&EsdV5v*3{?_@Wb1P8I~EJbi;%O_WJNi?o8{`c=u+0$a?-I8cQu|DMRG@ zAmL1yBX5Hl>j`YVaA$6Dr9uWjIt#X}6$&jvly4v~Q8NlxKPP4uxKO0?d}V0jVm6RU z_H@hbUWPMd#jN(dzE;i|1Q+io-+lE}3%1GVj4bIN(o3Z(og`yoX>!tVzCtGRqHWLv z)}5-Uc-T7!x*A1Qco34a14MGN=RVZR*BV~0B7B9J$3&piJ5JL)K4v@C)!JK*YRfhh!KXU-P9m2Y$I z=>YCAHE3l4)W1iP5+1qJ;bY{|upKiK13mcEi@x=8y4Tl!x?435L66}~>FH0aR<6I2 zeH&YPHKcc$ca?@bM1?H+X)kjG(fm>*`$!Wy$fb7_p62Mz@0h{&!zrpsZ}#3udmkOd zm5T<%{V;q?fh?HPVPONtCmn@t!4GdwFi!XvK;<^CHDhibJ3xrCScxdG`^x6w>PJ63 zi1+oD0L`vwQYw~^wS5+D&1pzsvmS>jzGdttSe(1AlmMZAw)-aoG?7YaE|}jc9Y#>* zZ|dc%K0QOvC#jI8c+{gHbyXTB{6fmwcrI(7!siy?w$G5gii`?MsgN2pXR_x?p+Ptc z!{tD}D*rxp?e19Y@c4_{y%~4HJ5V?S^Dt`Pgp0(4lM-s3P>{z`Iq;KXeuR;3Ay6T_|I3in>H_Ba6a;dapgsEPD` z;z?*`ljZRP_wU_0|8X>P#e;rFz2&;UE<=ey%*3v72pr1~vULDNOq;IMK9$*wsP1`B z8}EU;JG-baUBVmuw4q7JUe;oWnLt9MOrg6P2}dK98S^`;GGE}Jz)#?Nsfdf?dK z4V#vHHLe};`L54sNi0UBI4^iCWswtHbyn?2@Y|`?)!+>4R%sC#hihyJa{0<;^}9w{ z^W`M`-apvG7=lLS$ODCjzwY^Vi`+UFBN$=gt^`xDNZen|{+a_vB0Zv?N~`2E7=8I} z$LM`yFLGp}(USzzF;3;NOG;FfPr^VxGZX|V?e}SUE)ul|K&N+&66_=(#8~)1ng3OQ zz_-xU3Mz<}s|RXlW|bjz(gSi2q7z`L-WPiYOg=?{p`Y>Z$ zog_6<=VK6EQXuJ;L8(|eBtnP~NJ^$c8rq6R4o_m$z{R)UoQ&FJs3BTSPh`~yG|v1) z|4Tq{7et6LVtUV(LT7`6>OMRd2Aiq?moyJZ>1tr+wX%UY9gQ;Me}+gR!D&}yV9#+k zSTN-w5~;bRNXYC4<=QB32rSfX>$#OnhlGwHp1GgVT$1_?VxaD&C$hV9X_D>{6qMWL z#nZ2vWV(()5#pmTSgCa=va7ZQ&h729-p(Uigwn!PvD3NO>pgeiT9&V@SMun$ftPpH zTo=-ClHp0HDsGO&;^iKjsx|U#HmuD0+-Sw{)xf5!827h-#z$Iyq=~3@yJ?E&I56rO zp|xmgJ)XNc#275=wF`Gtf6CT^F75TAFOvNR-Mon$Butsa$#RJe7#yzfB-W;Zd)qf*vd|U=F_@M5i@rpOU=-EYudfLT@Iq$5| zQo5NZ6*}tDLR>|r-t+6C_#^|`x6^-;a;<8kRYa1*+KO$VeldduTJy&o*4te_781~A ziH*|JRDh+)H!o8j5N|=V+2&E?<5^2ST~hlDcVJe!zwh8{i{tJpk74 zW7B7|#cKuDo`GQ^;M!i7!f^f@gTYQU8?Ty7l%6f%Pp4%yBx%N}#|zk$*GJkZH+}F5 z|?3ex-BnP}J)(NA@PpU3-B=eeesyY#)I*4!(v8BCB<{DeOS7p`r&(eKLS zT_(8Z={QgD`%{_)_r0o`N~+!Flyzcz8R4MXBX7SB^i~lYDAG4@ZykNMFk8v~>e3zk zltA#^t!1G+rruB2jD}lp>V?UT+2G0eUi0N?wLqHx+EaIcbIC9*ddKg+1HwIhR!ngX z*ro4>Bd2H8>5pQmx0OIt96R%N)0btFo&Nry!Wse9*B#5{4h3852gTlY zq{FrI)zy9h9Z-A6hv)CM(O>!<$occi#a11Ma%pq2X(J*h`9Lp$Jd5j6oJ> z27&PiGU;HtVn1{i@At*%}q$f+`M zkiQgEL~jWsMr`}KAFM~`=x&k8O%`-yJp(`}(JOMPN*vfCp-eTcH@+{RL zEeR8a2eJqIf%3nZ`3b7m?Go6sw7)-Hl<)t>#eqW}9i?~y-=NeL^!V_m_N&m_VGb~+ zr33($-4+yYh;Lc(!p zP0weVWLxiR!YV|E_YVeA(>%RScZxo2gGvY032+2ta?CU3ybLoHJpu-=5I9KuD^GqQ zggS@dg?(2ycyB+Sg3d8;$diG3FmR+iPoLEnw)I1~i~9CK)hovkny5cLg^^JOy{cSmKeX$QyRr zoc_-7^9*efp?Iz58}560HG;j%)!>K-N$^NDw+Z4rc$>E2%Go_drNWP_GWCoQcb~PGGZisnW#4cf*bQT&xzjfIJ6wm#Az( z*o*X^?JCTnX$jr(2!jTiN89hyH!5cO$a;h?>QJdgv7A5m{sG$?~4a``#qRBl8_JxDsBxa4yGbGp>O z(xdT|XmsxaA_4>JZ>mLXc+9#r$OMyc_Ky8)Z@pb&T_(B*)kR#M;|whV6|;GfZZm1S z7fo3Wh~8YkLRObH_s53rW{_^WV)>{4m>^U@s|}^iW93LP_^HK6${Xib2?Tr z;Egz&Aqo$7{5rKoUn@#C@{W}4V$NTRCgH{|JdrcE7pV9TBcit0EUy6_m%n~6hVs(CNC}ADIZn9se`ieK>pj9z%jCT9*ffs-<3i2=-GUYji+Xr|JC_VTU)K6xhZ(`x=w36J=NuezrKE@gE8yL#$yD-ZXdJt1tH zMez|pt$YBJf4y{e$I5t&a+@_|dGNpr`Qs8*_Lgn8z9TyyswAC>$#8tOnWcLsnA zspgwQoc)J~qX2L)1nsx7Ycw{WtI2;o-H(-)fsaf~Pjmj{o+A-9_H zA`Hmxd?-VKYuB8h#)2O zFrFWdn0gdFlwdR~D77R`NkKkxOJrL<1AOtOV>_^Q(I)`%> zB05WEe*lU#1JvKb(}~}5qyPQGb#pBoB50k0!lnw6%=~97V_V74L+}FXW99%k{P{(K zu*Zed}nhRxxVPx%Oy3tS-LoPj2xRds~4*VzRTe?o7Li9u?Ke*h-pxG@) zL3z`89IA&M!nWnQJq83qY+&3Y`7PYmoZVZFsZ38pr-D65GDz&!B~V-_BD#Jlz%hVg zY*cPvB8Ptmq@*_BVYhx${#ICd+NFp#nSBbHbT0H#0%L}3J2|jIQ!4Xv^L}8*vjKwX zSl8Ft%jew=$@Ljj@-OWHSojnGizzZD>q@94;jm}A=AhBs9-`v{MZOd5wg7cFf>X|h z2q&*+)@R-g%B{{I#~+QA@=7L(nP4S}(fG7!igN${3WiQ-4{AlFAN)d-k5FHD8WeS& zs}ZT_#5%@~&*sh9QiebY*uxt2NXYwGXFHoxg|ueSw2wYw;<@#m(3WO?;Q)Z_Y#kog zR0-4o&!lcKoBgjP5=y&{%pDZEn1~VqGoI5A8nyRaG4b%975(dY#Z*klaa<8GWiCle_;GDBhvf$M zIvj8rIXSwn2rY}N^8u~tXxc0z)7^fnvS~jw^8;S!=yXYpDK?p_!{3NNAk6xxM6=EX zT90=k$3PNg#tKh)LX9+EU~sL)$4+?nA8NlM&)Z6tZgmn3B6bH(cg;ZQDvwTR&~|J; z0Z1poXmYn_od#2IuY&3?v(W`h{LgQ85ycR~VUV9iaJKB@)=10=t6p|TwP^E_XY`7} zHYGQd%&v?EDvGR`R=1A3S6Tq_AK0q`84)s=$16DwUg>viLXg!CugMsu+Hm=dW0a}; zSrN;*c-KEmnHwtU@wF^zEm9Q?pEVmBw^=46a9MXn{TDnr?Wxb_q3U>D3D;=uJv^P7 z-(q6CWGIhm?E|yNyIhsY88 zl9`qhF(MVyQ9y&;&b}i3#odkJv#-ur^%B;~Vr+nh)4iH1N968&ahDc0`)aW16y5#p zOu}BVHJ}E5R6uefOx|3XNSK|P!-C5lzGb8us@SGnpXECruzY<%@Lgwg7fS`c)sI?c z&h&;i7W1hXmDsKiH=6V*xw9=(x$y1?r(X?$Ck09KhC#Wd zs&~WZK}C=@8HRZ@HC}~B?uj5jG!6ZxfiDnd256su@<0ECxImMZ)gd-PY{ zVyh1L%9CZE5B)wgUbR*?>TIaunVN zYy#GwpTT-ZgFxHbHilG3#PvYHs7yh7QQRYfD6s)iB#?a+i4-0k96|BL$pIl-wmaC_ zc%1c}5k-dK`ZfL@xq7{PSsbE=Bzwwgx?v9~3GEyM!q%T`1H-bBIEO6>hn<-6YQ|=lv-Wfa-{soc6A1G9gc-)n)Kzl`=ZdbmH{pMz6 z;K&{*bIMeZrYVlkrhISDE`bw&q*cAfg5!VBJTgDg<*817R`Ws<=Mq`c0BP7IS~sz@ zMCRmS?oCzQdqDPZcZV-v_%41|sv#N{@gAx)EuAO9Q9gTx4P-(cAJO<625f7KVAkyN zbV?>jd>JM{NHYl;*habB@V&4jYQ`g;1=E(N9mgTY8e4L3%tH}l<3Sk(7pwjsK|KYDT7 z@YN%6Za)QJJu@jU%mw{0;z~B99t(Z|Cn5xCbH8goG^ho+?Z;W8I=<-l^b?Wm!bR+R zs4ad}3=xe9XjPSdhwxY6lKP_s|fXiy)^X?|c@ZtK>qh7n&-PxrA3)XV$CMTMq9M2I2sT}?V_WB9!E(xCX&V?(8_QIX z{g!fG`=`zO?nI)+V9>%Oj}SJJygGAGU0e-Odv5L5bNzqr))6ed3pi2@-SB^}XgV$U0m^YW${xbC{4%hg9 z3<|#i4gxKv;5EOYx)2EW4lBk!kL>tvg*8#9-K1Ww9BYsPiqQ z_#YlzcS%j2)W`BOZo{0%^#dwD4LiI2&u?%~Y^w()SC}-~uOAajViV z?&+>hPQ3jLKo)k-*Mtdc=b)m*1zs&d=XOiW2XDF$a`y>+uzzBrk2DptksHrc7^PJK zIbxZWX!bue#33|fqW!1Mp1yIUde@|+4}v2;v7(q5Oi0ekWvy_wn1w?De*zE>3vRl^ zI#G%ob~DBmFM5Imn~=Jn;MYek=GO|j}jlSC6Ok!&<6$v?osi0ya44}UWwWt zW3l;7w0mimFjdiVrG`J!lEA5g(p3mY099y~kF-CdeEbiJ-_L4(fcyY48k zv7Ga=q+R^5DzOT^o3y$NrBlohq0%;IXPaMVt1m4s{s7arPDxp>4l9Y9gm7DB<~ZwX zrPTO@1kPO={XqANwu_3wFcEvH15@c)eL0R1ockAG=$=12Ha6x=e_fiAe9^??YRW-3 zI!zWzZq=jn*4l!4ODs5baX=ZJAayDMcd^kyxaWtv=205$vC)!* z7BdP83%d@xnqdcvWT8}w5?m0YcOC;LK1!z)f2^ekffA)0Y%D-${S}w%_4P~1j1J9& z=^9=|X|jHosVpGvrOy_ZmrIP_jioA%)Cd8jZf2Oz-Cr{@kXI5|5XBPg|Zbl_r6rlzJw4akW^q6p#iC?}=V)!x2C1x2M& z7qY5amhljoGhCo6b!zmk59yb|3^$O?Lm8cyF!1Yoem!mQoFlMP)S9S?PUwb4NEBr= zewNi=OW, e.g. - 'localhost:8000'. - - verbose : bool - If True generate verbose output. Default value is False. - concurrency : int - The number of connections to create for this client. - Default value is 1. - connection_timeout : float - The timeout value for the connection. Default value - is 60.0 sec. - network_timeout : float - The timeout value for the network. Default value is - 60.0 sec - max_greenlets : int - Determines the maximum allowed number of worker greenlets - for handling asynchronous inference requests. Default value - is None, which means there will be no restriction on the - number of greenlets created. - ssl : bool - If True, channels the requests to encrypted https scheme. - Some improper settings may cause connection to prematurely - terminate with an unsuccessful handshake. See - `ssl_context_factory` option for using secure default - settings. Default value for this option is False. - ssl_options : dict - Any options supported by `ssl.wrap_socket` specified as - dictionary. The argument is ignored if 'ssl' is specified - False. - ssl_context_factory : SSLContext callable - It must be a callbable that returns a SSLContext. Set to - `gevent.ssl.create_default_context` to use contexts with - secure default settings. This should most likely resolve - connection issues in a secure way. The default value for - this option is None which directly wraps the socket with - the options provided via `ssl_options`. The argument is - ignored if 'ssl' is specified False. - insecure : bool - If True, then does not match the host name with the certificate. - Default value is False. The argument is ignored if 'ssl' is - specified False. - - Raises - ------ - Exception - If unable to create a client. - - """ - - def __init__(self, - url, - verbose=False, - concurrency=1, - connection_timeout=60.0, - network_timeout=60.0, - max_greenlets=None, - ssl=False, - ssl_options=None, - ssl_context_factory=None, - insecure=False): - if url.startswith("http://") or url.startswith("https://"): - raise_error("url should not include the scheme") - scheme = "https://" if ssl else "http://" - self._parsed_url = URL(scheme + url) - self._base_uri = self._parsed_url.request_uri.rstrip('/') - self._client_stub = HTTPClient.from_url( - self._parsed_url, - concurrency=concurrency, - connection_timeout=connection_timeout, - network_timeout=network_timeout, - ssl_options=ssl_options, - ssl_context_factory=ssl_context_factory, - insecure=insecure) - self._pool = gevent.pool.Pool(max_greenlets) - self._verbose = verbose - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - self.close() - - def __del__(self): - self.close() - - def close(self): - """Close the client. Any future calls to server - will result in an Error. - - """ - self._pool.join() - self._client_stub.close() - - def _get(self, request_uri, headers, query_params): - """Issues the GET request to the server - - Parameters - ---------- - request_uri: str - The request URI to be used in GET request. - headers: dict - Additional HTTP headers to include in the request. - query_params: dict - Optional url query parameters to use in network - transaction. - - Returns - ------- - geventhttpclient.response.HTTPSocketPoolResponse - The response from server. - """ - if self._base_uri is not None: - request_uri = self._base_uri + "/" + request_uri - - if query_params is not None: - request_uri = request_uri + "?" + _get_query_string(query_params) - - if self._verbose: - print("GET {}, headers {}".format(request_uri, headers)) - - if headers is not None: - response = self._client_stub.get(request_uri, headers=headers) - else: - response = self._client_stub.get(request_uri) - - if self._verbose: - print(response) - - return response - - def _post(self, request_uri, request_body, headers, query_params): - """Issues the POST request to the server - - Parameters - ---------- - request_uri: str - The request URI to be used in POST request. - request_body: str - The body of the request - headers: dict - Additional HTTP headers to include in the request. - query_params: dict - Optional url query parameters to use in network - transaction. - - Returns - ------- - geventhttpclient.response.HTTPSocketPoolResponse - The response from server. - """ - if self._base_uri is not None: - request_uri = self._base_uri + "/" + request_uri - - if query_params is not None: - request_uri = request_uri + "?" + _get_query_string(query_params) - - if self._verbose: - print("POST {}, headers {}\n{}".format(request_uri, headers, - request_body)) - - if headers is not None: - response = self._client_stub.post(request_uri=request_uri, - body=request_body, - headers=headers) - else: - response = self._client_stub.post(request_uri=request_uri, - body=request_body) - - if self._verbose: - print(response) - - return response - - def is_server_live(self, headers=None, query_params=None): - """Contact the inference server and get liveness. - - Parameters - ---------- - headers: dict - Optional dictionary specifying additional HTTP - headers to include in the request. - query_params: dict - Optional url query parameters to use in network - transaction. - - Returns - ------- - bool - True if server is live, False if server is not live. - - Raises - ------ - Exception - If unable to get liveness. - - """ - - request_uri = "v2/health/live" - response = self._get(request_uri=request_uri, - headers=headers, - query_params=query_params) - - return response.status_code == 200 - - def is_server_ready(self, headers=None, query_params=None): - """Contact the inference server and get readiness. - - Parameters - ---------- - headers: dict - Optional dictionary specifying additional HTTP - headers to include in the request. - query_params: dict - Optional url query parameters to use in network - transaction. - - Returns - ------- - bool - True if server is ready, False if server is not ready. - - Raises - ------ - Exception - If unable to get readiness. - - """ - request_uri = "v2/health/ready" - response = self._get(request_uri=request_uri, - headers=headers, - query_params=query_params) - - return response.status_code == 200 - - def is_model_ready(self, - model_name, - model_version="", - headers=None, - query_params=None): - """Contact the inference server and get the readiness of specified model. - - Parameters - ---------- - model_name: str - The name of the model to check for readiness. - model_version: str - The version of the model to check for readiness. The default value - is an empty string which means then the server will choose a version - based on the model and internal policy. - headers: dict - Optional dictionary specifying additional HTTP - headers to include in the request. - query_params: dict - Optional url query parameters to use in network - transaction. - - Returns - ------- - bool - True if the model is ready, False if not ready. - - Raises - ------ - Exception - If unable to get model readiness. - - """ - if type(model_version) != str: - raise_error("model version must be a string") - if model_version != "": - request_uri = "v2/models/{}/versions/{}/ready".format( - quote(model_name), model_version) - else: - request_uri = "v2/models/{}/ready".format(quote(model_name)) - - response = self._get(request_uri=request_uri, - headers=headers, - query_params=query_params) - - return response.status_code == 200 - - def get_server_metadata(self, headers=None, query_params=None): - """Contact the inference server and get its metadata. - - Parameters - ---------- - headers: dict - Optional dictionary specifying additional HTTP - headers to include in the request. - query_params: dict - Optional url query parameters to use in network - transaction. - - Returns - ------- - dict - The JSON dict holding the metadata. - - Raises - ------ - InferenceServerException - If unable to get server metadata. - - """ - request_uri = "v2" - response = self._get(request_uri=request_uri, - headers=headers, - query_params=query_params) - _raise_if_error(response) - - content = response.read() - if self._verbose: - print(content) - - return json.loads(content) - - def get_model_metadata(self, - model_name, - model_version="", - headers=None, - query_params=None): - """Contact the inference server and get the metadata for specified model. - - Parameters - ---------- - model_name: str - The name of the model - model_version: str - The version of the model to get metadata. The default value - is an empty string which means then the server will choose - a version based on the model and internal policy. - headers: dict - Optional dictionary specifying additional - HTTP headers to include in the request - query_params: dict - Optional url query parameters to use in network - transaction - - Returns - ------- - dict - The JSON dict holding the metadata. - - Raises - ------ - InferenceServerException - If unable to get model metadata. - - """ - if type(model_version) != str: - raise_error("model version must be a string") - if model_version != "": - request_uri = "v2/models/{}/versions/{}".format( - quote(model_name), model_version) - else: - request_uri = "v2/models/{}".format(quote(model_name)) - - response = self._get(request_uri=request_uri, - headers=headers, - query_params=query_params) - _raise_if_error(response) - - content = response.read() - if self._verbose: - print(content) - - return json.loads(content) - - def get_model_config(self, - model_name, - model_version="", - headers=None, - query_params=None): - """Contact the inference server and get the configuration for specified model. - - Parameters - ---------- - model_name: str - The name of the model - model_version: str - The version of the model to get configuration. The default value - is an empty string which means then the server will choose - a version based on the model and internal policy. - headers: dict - Optional dictionary specifying additional - HTTP headers to include in the request - query_params: dict - Optional url query parameters to use in network - transaction - - Returns - ------- - dict - The JSON dict holding the model config. - - Raises - ------ - InferenceServerException - If unable to get model configuration. - - """ - if model_version != "": - request_uri = "v2/models/{}/versions/{}/config".format( - quote(model_name), model_version) - else: - request_uri = "v2/models/{}/config".format(quote(model_name)) - - response = self._get(request_uri=request_uri, - headers=headers, - query_params=query_params) - _raise_if_error(response) - - content = response.read() - if self._verbose: - print(content) - - return json.loads(content) - - def get_model_repository_index(self, headers=None, query_params=None): - """Get the index of model repository contents - - Parameters - ---------- - headers: dict - Optional dictionary specifying additional - HTTP headers to include in the request - query_params: dict - Optional url query parameters to use in network - transaction - - Returns - ------- - dict - The JSON dict holding the model repository index. - - Raises - ------ - InferenceServerException - If unable to get the repository index. - - """ - request_uri = "v2/repository/index" - response = self._post(request_uri=request_uri, - request_body="", - headers=headers, - query_params=query_params) - _raise_if_error(response) - - content = response.read() - if self._verbose: - print(content) - - return json.loads(content) - - def load_model(self, model_name, headers=None, query_params=None): - """Request the inference server to load or reload specified model. - - Parameters - ---------- - model_name : str - The name of the model to be loaded. - headers: dict - Optional dictionary specifying additional - HTTP headers to include in the request - query_params: dict - Optional url query parameters to use in network - transaction - - Raises - ------ - InferenceServerException - If unable to load the model. - - """ - request_uri = "v2/repository/models/{}/load".format(quote(model_name)) - response = self._post(request_uri=request_uri, - request_body="", - headers=headers, - query_params=query_params) - _raise_if_error(response) - if self._verbose: - print("Loaded model '{}'".format(model_name)) - - def unload_model(self, - model_name, - headers=None, - query_params=None, - unload_dependents=False): - """Request the inference server to unload specified model. - - Parameters - ---------- - model_name : str - The name of the model to be unloaded. - headers: dict - Optional dictionary specifying additional - HTTP headers to include in the request - query_params: dict - Optional url query parameters to use in network - transaction - unload_dependents : bool - Whether the dependents of the model should also be unloaded. - - Raises - ------ - InferenceServerException - If unable to unload the model. - - """ - request_uri = "v2/repository/models/{}/unload".format(quote(model_name)) - unload_request = { - "parameters": { - "unload_dependents": unload_dependents - } - } - response = self._post(request_uri=request_uri, - request_body=json.dumps(unload_request), - headers=headers, - query_params=query_params) - _raise_if_error(response) - if self._verbose: - print("Loaded model '{}'".format(model_name)) - - def get_inference_statistics(self, - model_name="", - model_version="", - headers=None, - query_params=None): - """Get the inference statistics for the specified model name and - version. - - Parameters - ---------- - model_name : str - The name of the model to get statistics. The default value is - an empty string, which means statistics of all models will - be returned. - model_version: str - The version of the model to get inference statistics. The - default value is an empty string which means then the server - will return the statistics of all available model versions. - headers: dict - Optional dictionary specifying additional HTTP - headers to include in the request. - query_params: dict - Optional url query parameters to use in network - transaction - - Returns - ------- - dict - The JSON dict holding the model inference statistics. - - Raises - ------ - InferenceServerException - If unable to get the model inference statistics. - - """ - - if model_name != "": - if type(model_version) != str: - raise_error("model version must be a string") - if model_version != "": - request_uri = "v2/models/{}/versions/{}/stats".format( - quote(model_name), model_version) - else: - request_uri = "v2/models/{}/stats".format(quote(model_name)) - else: - request_uri = "v2/models/stats" - - response = self._get(request_uri=request_uri, - headers=headers, - query_params=query_params) - _raise_if_error(response) - - content = response.read() - if self._verbose: - print(content) - - return json.loads(content) - - def get_system_shared_memory_status(self, - region_name="", - headers=None, - query_params=None): - """Request system shared memory status from the server. - - Parameters - ---------- - region_name : str - The name of the region to query status. The default - value is an empty string, which means that the status - of all active system shared memory will be returned. - headers: dict - Optional dictionary specifying additional HTTP - headers to include in the request - query_params: dict - Optional url query parameters to use in network - transaction - - Returns - ------- - dict - The JSON dict holding system shared memory status. - - Raises - ------ - InferenceServerException - If unable to get the status of specified shared memory. - - """ - if region_name != "": - request_uri = "v2/systemsharedmemory/region/{}/status".format( - quote(region_name)) - else: - request_uri = "v2/systemsharedmemory/status" - - response = self._get(request_uri=request_uri, - headers=headers, - query_params=query_params) - _raise_if_error(response) - - content = response.read() - if self._verbose: - print(content) - - return json.loads(content) - - def register_system_shared_memory(self, - name, - key, - byte_size, - offset=0, - headers=None, - query_params=None): - """Request the server to register a system shared memory with the - following specification. - - Parameters - ---------- - name : str - The name of the region to register. - key : str - The key of the underlying memory object that contains the - system shared memory region. - byte_size : int - The size of the system shared memory region, in bytes. - offset : int - Offset, in bytes, within the underlying memory object to - the start of the system shared memory region. The default - value is zero. - headers: dict - Optional dictionary specifying additional - HTTP headers to include in the request - query_params: dict - Optional url query parameters to use in network - transaction - - Raises - ------ - InferenceServerException - If unable to register the specified system shared memory. - - """ - request_uri = "v2/systemsharedmemory/region/{}/register".format( - quote(name)) - - register_request = { - 'key': key, - 'offset': offset, - 'byte_size': byte_size - } - request_body = json.dumps(register_request) - - response = self._post(request_uri=request_uri, - request_body=request_body, - headers=headers, - query_params=query_params) - _raise_if_error(response) - if self._verbose: - print("Registered system shared memory with name '{}'".format(name)) - - def unregister_system_shared_memory(self, - name="", - headers=None, - query_params=None): - """Request the server to unregister a system shared memory with the - specified name. - - Parameters - ---------- - name : str - The name of the region to unregister. The default value is empty - string which means all the system shared memory regions will be - unregistered. - headers: dict - Optional dictionary specifying additional - HTTP headers to include in the request - query_params: dict - Optional url query parameters to use in network - transaction - - Raises - ------ - InferenceServerException - If unable to unregister the specified system shared memory region. - - """ - if name != "": - request_uri = "v2/systemsharedmemory/region/{}/unregister".format( - quote(name)) - else: - request_uri = "v2/systemsharedmemory/unregister" - - response = self._post(request_uri=request_uri, - request_body="", - headers=headers, - query_params=query_params) - _raise_if_error(response) - if self._verbose: - if name != "": - print("Unregistered system shared memory with name '{}'".format( - name)) - else: - print("Unregistered all system shared memory regions") - - def get_cuda_shared_memory_status(self, - region_name="", - headers=None, - query_params=None): - """Request cuda shared memory status from the server. - - Parameters - ---------- - region_name : str - The name of the region to query status. The default - value is an empty string, which means that the status - of all active cuda shared memory will be returned. - headers: dict - Optional dictionary specifying additional - HTTP headers to include in the request - query_params: dict - Optional url query parameters to use in network - transaction - - Returns - ------- - dict - The JSON dict holding cuda shared memory status. - - Raises - ------ - InferenceServerException - If unable to get the status of specified shared memory. - - """ - if region_name != "": - request_uri = "v2/cudasharedmemory/region/{}/status".format( - quote(region_name)) - else: - request_uri = "v2/cudasharedmemory/status" - - response = self._get(request_uri=request_uri, - headers=headers, - query_params=query_params) - _raise_if_error(response) - - content = response.read() - if self._verbose: - print(content) - - return json.loads(content) - - def register_cuda_shared_memory(self, - name, - raw_handle, - device_id, - byte_size, - headers=None, - query_params=None): - """Request the server to register a system shared memory with the - following specification. - - Parameters - ---------- - name : str - The name of the region to register. - raw_handle : bytes - The raw serialized cudaIPC handle in base64 encoding. - device_id : int - The GPU device ID on which the cudaIPC handle was created. - byte_size : int - The size of the cuda shared memory region, in bytes. - headers: dict - Optional dictionary specifying additional - HTTP headers to include in the request - query_params: dict - Optional url query parameters to use in network - transaction - - Raises - ------ - InferenceServerException - If unable to register the specified cuda shared memory. - - """ - request_uri = "v2/cudasharedmemory/region/{}/register".format( - quote(name)) - - register_request = { - 'raw_handle': { - 'b64': raw_handle - }, - 'device_id': device_id, - 'byte_size': byte_size - } - request_body = json.dumps(register_request) - - response = self._post(request_uri=request_uri, - request_body=request_body, - headers=headers, - query_params=query_params) - _raise_if_error(response) - if self._verbose: - print("Registered cuda shared memory with name '{}'".format(name)) - - def unregister_cuda_shared_memory(self, - name="", - headers=None, - query_params=None): - """Request the server to unregister a cuda shared memory with the - specified name. - - Parameters - ---------- - name : str - The name of the region to unregister. The default value is empty - string which means all the cuda shared memory regions will be - unregistered. - headers: dict - Optional dictionary specifying additional - HTTP headers to include in the request - query_params: dict - Optional url query parameters to use in network - transaction - - Raises - ------ - InferenceServerException - If unable to unregister the specified cuda shared memory region. - - """ - if name != "": - request_uri = "v2/cudasharedmemory/region/{}/unregister".format( - quote(name)) - else: - request_uri = "v2/cudasharedmemory/unregister" - - response = self._post(request_uri=request_uri, - request_body="", - headers=headers, - query_params=query_params) - _raise_if_error(response) - if self._verbose: - if name != "": - print("Unregistered cuda shared memory with name '{}'".format( - name)) - else: - print("Unregistered all cuda shared memory regions") - - @staticmethod - def generate_request_body(inputs, - outputs=None, - request_id="", - sequence_id=0, - sequence_start=False, - sequence_end=False, - priority=0, - timeout=None): - """Generate a request body for inference using the supplied 'inputs' - requesting the outputs specified by 'outputs'. - - Parameters - ---------- - inputs : list - A list of InferInput objects, each describing data for a input - tensor required by the model. - outputs : list - A list of InferRequestedOutput objects, each describing how the output - data must be returned. If not specified all outputs produced - by the model will be returned using default settings. - request_id: str - Optional identifier for the request. If specified will be returned - in the response. Default value is an empty string which means no - request_id will be used. - sequence_id : int or str - The unique identifier for the sequence being represented by the - object. A value of 0 or "" means that the request does not - belong to a sequence. Default is 0. - sequence_start: bool - Indicates whether the request being added marks the start of the - sequence. Default value is False. This argument is ignored if - 'sequence_id' is 0. - sequence_end: bool - Indicates whether the request being added marks the end of the - sequence. Default value is False. This argument is ignored if - 'sequence_id' is 0. - priority : int - Indicates the priority of the request. Priority value zero - indicates that the default priority level should be used - (i.e. same behavior as not specifying the priority parameter). - Lower value priorities indicate higher priority levels. Thus - the highest priority level is indicated by setting the parameter - to 1, the next highest is 2, etc. If not provided, the server - will handle the request using default setting for the model. - timeout : int - The timeout value for the request, in microseconds. If the request - cannot be completed within the time the server can take a - model-specific action such as terminating the request. If not - provided, the server will handle the request using default setting - for the model. - - Returns - ------- - Bytes - The request body of the inference. - Int - The byte size of the inference request header in the request body. - Returns None if the whole request body constitutes the request header. - - - Raises - ------ - InferenceServerException - If server fails to perform inference. - """ - return _get_inference_request(inputs=inputs, - request_id=request_id, - outputs=outputs, - sequence_id=sequence_id, - sequence_start=sequence_start, - sequence_end=sequence_end, - priority=priority, - timeout=timeout) - - @staticmethod - def parse_response_body(response_body, - verbose=False, - header_length=None, - content_encoding=None): - """Generate a InferResult object from the given 'response_body' - - Parameters - ---------- - response_body : bytes - The inference response from the server - verbose : bool - If True generate verbose output. Default value is False. - header_length : int - The length of the inference header if the header does not occupy - the whole response body. Default value is None. - content_encoding : string - The encoding of the response body if it is compressed. - Default value is None. - - Returns - ------- - InferResult - The InferResult object generated from the response body - """ - return InferResult.from_response_body(response_body, verbose, - header_length, content_encoding) - - def infer(self, - model_name, - inputs, - model_version="", - outputs=None, - request_id="", - sequence_id=0, - sequence_start=False, - sequence_end=False, - priority=0, - timeout=None, - headers=None, - query_params=None, - request_compression_algorithm=None, - response_compression_algorithm=None): - """Run synchronous inference using the supplied 'inputs' requesting - the outputs specified by 'outputs'. - - Parameters - ---------- - model_name: str - The name of the model to run inference. - inputs : list - A list of InferInput objects, each describing data for a input - tensor required by the model. - model_version: str - The version of the model to run inference. The default value - is an empty string which means then the server will choose - a version based on the model and internal policy. - outputs : list - A list of InferRequestedOutput objects, each describing how the output - data must be returned. If not specified all outputs produced - by the model will be returned using default settings. - request_id: str - Optional identifier for the request. If specified will be returned - in the response. Default value is an empty string which means no - request_id will be used. - sequence_id : int - The unique identifier for the sequence being represented by the - object. Default value is 0 which means that the request does not - belong to a sequence. - sequence_start: bool - Indicates whether the request being added marks the start of the - sequence. Default value is False. This argument is ignored if - 'sequence_id' is 0. - sequence_end: bool - Indicates whether the request being added marks the end of the - sequence. Default value is False. This argument is ignored if - 'sequence_id' is 0. - priority : int - Indicates the priority of the request. Priority value zero - indicates that the default priority level should be used - (i.e. same behavior as not specifying the priority parameter). - Lower value priorities indicate higher priority levels. Thus - the highest priority level is indicated by setting the parameter - to 1, the next highest is 2, etc. If not provided, the server - will handle the request using default setting for the model. - timeout : int - The timeout value for the request, in microseconds. If the request - cannot be completed within the time the server can take a - model-specific action such as terminating the request. If not - provided, the server will handle the request using default setting - for the model. - headers: dict - Optional dictionary specifying additional HTTP - headers to include in the request. - query_params: dict - Optional url query parameters to use in network - transaction. - request_compression_algorithm : str - Optional HTTP compression algorithm to use for the request body on client side. - Currently supports "deflate", "gzip" and None. By default, no - compression is used. - response_compression_algorithm : str - Optional HTTP compression algorithm to request for the response body. - Note that the response may not be compressed if the server does not - support the specified algorithm. Currently supports "deflate", - "gzip" and None. By default, no compression is requested. - - Returns - ------- - InferResult - The object holding the result of the inference. - - Raises - ------ - InferenceServerException - If server fails to perform inference. - """ - - request_body, json_size = _get_inference_request( - inputs=inputs, - request_id=request_id, - outputs=outputs, - sequence_id=sequence_id, - sequence_start=sequence_start, - sequence_end=sequence_end, - priority=priority, - timeout=timeout) - - if request_compression_algorithm == "gzip": - if headers is None: - headers = {} - headers["Content-Encoding"] = "gzip" - request_body = gzip.compress(request_body) - elif request_compression_algorithm == 'deflate': - if headers is None: - headers = {} - headers["Content-Encoding"] = "deflate" - # "Content-Encoding: deflate" actually means compressing in zlib structure - # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding - request_body = zlib.compress(request_body) - - if response_compression_algorithm == "gzip": - if headers is None: - headers = {} - headers["Accept-Encoding"] = "gzip" - elif response_compression_algorithm == 'deflate': - if headers is None: - headers = {} - headers["Accept-Encoding"] = "deflate" - - if json_size is not None: - if headers is None: - headers = {} - headers["Inference-Header-Content-Length"] = json_size - - if type(model_version) != str: - raise_error("model version must be a string") - if model_version != "": - request_uri = "v2/models/{}/versions/{}/infer".format( - quote(model_name), model_version) - else: - request_uri = "v2/models/{}/infer".format(quote(model_name)) - - response = self._post(request_uri=request_uri, - request_body=request_body, - headers=headers, - query_params=query_params) - _raise_if_error(response) - - return InferResult(response, self._verbose) - - def async_infer(self, - model_name, - inputs, - model_version="", - outputs=None, - request_id="", - sequence_id=0, - sequence_start=False, - sequence_end=False, - priority=0, - timeout=None, - headers=None, - query_params=None, - request_compression_algorithm=None, - response_compression_algorithm=None): - """Run asynchronous inference using the supplied 'inputs' requesting - the outputs specified by 'outputs'. Even though this call is - non-blocking, however, the actual number of concurrent requests to - the server will be limited by the 'concurrency' parameter specified - while creating this client. In other words, if the inflight - async_infer exceeds the specified 'concurrency', the delivery of - the exceeding request(s) to server will be blocked till the slot is - made available by retrieving the results of previously issued requests. - - Parameters - ---------- - model_name: str - The name of the model to run inference. - inputs : list - A list of InferInput objects, each describing data for a input - tensor required by the model. - model_version: str - The version of the model to run inference. The default value - is an empty string which means then the server will choose - a version based on the model and internal policy. - outputs : list - A list of InferRequestedOutput objects, each describing how the output - data must be returned. If not specified all outputs produced - by the model will be returned using default settings. - request_id: str - Optional identifier for the request. If specified will be returned - in the response. Default value is 'None' which means no request_id - will be used. - sequence_id : int - The unique identifier for the sequence being represented by the - object. Default value is 0 which means that the request does not - belong to a sequence. - sequence_start: bool - Indicates whether the request being added marks the start of the - sequence. Default value is False. This argument is ignored if - 'sequence_id' is 0. - sequence_end: bool - Indicates whether the request being added marks the end of the - sequence. Default value is False. This argument is ignored if - 'sequence_id' is 0. - priority : int - Indicates the priority of the request. Priority value zero - indicates that the default priority level should be used - (i.e. same behavior as not specifying the priority parameter). - Lower value priorities indicate higher priority levels. Thus - the highest priority level is indicated by setting the parameter - to 1, the next highest is 2, etc. If not provided, the server - will handle the request using default setting for the model. - timeout : int - The timeout value for the request, in microseconds. If the request - cannot be completed within the time the server can take a - model-specific action such as terminating the request. If not - provided, the server will handle the request using default setting - for the model. - headers: dict - Optional dictionary specifying additional HTTP - headers to include in the request - query_params: dict - Optional url query parameters to use in network - transaction. - request_compression_algorithm : str - Optional HTTP compression algorithm to use for the request body on client side. - Currently supports "deflate", "gzip" and None. By default, no - compression is used. - response_compression_algorithm : str - Optional HTTP compression algorithm to request for the response body. - Note that the response may not be compressed if the server does not - support the specified algorithm. Currently supports "deflate", - "gzip" and None. By default, no compression is requested. - - Returns - ------- - InferAsyncRequest object - The handle to the asynchronous inference request. - - Raises - ------ - InferenceServerException - If server fails to issue inference. - """ - - def wrapped_post(request_uri, request_body, headers, query_params): - return self._post(request_uri, request_body, headers, query_params) - - request_body, json_size = _get_inference_request( - inputs=inputs, - request_id=request_id, - outputs=outputs, - sequence_id=sequence_id, - sequence_start=sequence_start, - sequence_end=sequence_end, - priority=priority, - timeout=timeout) - - if request_compression_algorithm == "gzip": - if headers is None: - headers = {} - headers["Content-Encoding"] = "gzip" - request_body = gzip.compress(request_body) - elif request_compression_algorithm == 'deflate': - if headers is None: - headers = {} - headers["Content-Encoding"] = "deflate" - # "Content-Encoding: deflate" actually means compressing in zlib structure - # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding - request_body = zlib.compress(request_body) - - if response_compression_algorithm == "gzip": - if headers is None: - headers = {} - headers["Accept-Encoding"] = "gzip" - elif response_compression_algorithm == 'deflate': - if headers is None: - headers = {} - headers["Accept-Encoding"] = "deflate" - - if json_size is not None: - if headers is None: - headers = {} - headers["Inference-Header-Content-Length"] = json_size - - if type(model_version) != str: - raise_error("model version must be a string") - if model_version != "": - request_uri = "v2/models/{}/versions/{}/infer".format( - quote(model_name), model_version) - else: - request_uri = "v2/models/{}/infer".format(quote(model_name)) - - g = self._pool.apply_async( - wrapped_post, (request_uri, request_body, headers, query_params)) - - # Schedule the greenlet to run in this loop iteration - g.start() - - # Relinquish control to greenlet loop. Using non-zero - # value to ensure the control is transferred to the - # event loop. - gevent.sleep(0.01) - - if self._verbose: - verbose_message = "Sent request" - if request_id != "": - verbose_message = verbose_message + " '{}'".format(request_id) - print(verbose_message) - - return InferAsyncRequest(g, self._verbose) - - -class InferAsyncRequest: - """An object of InferAsyncRequest class is used to describe - a handle to an ongoing asynchronous inference request. - - Parameters - ---------- - greenlet : gevent.Greenlet - The greenlet object which will provide the results. - For further details about greenlets refer - http://www.gevent.org/api/gevent.greenlet.html. - - verbose : bool - If True generate verbose output. Default value is False. - """ - - def __init__(self, greenlet, verbose=False): - self._greenlet = greenlet - self._verbose = verbose - - def get_result(self, block=True, timeout=None): - """Get the results of the associated asynchronous inference. - Parameters - ---------- - block : bool - If block is True, the function will wait till the - corresponding response is received from the server. - Default value is True. - timeout : int - The maximum wait time for the function. This setting is - ignored if the block is set False. Default is None, - which means the function will block indefinitely till - the corresponding response is received. - - Returns - ------- - InferResult - The object holding the result of the async inference. - - Raises - ------ - InferenceServerException - If server fails to perform inference or failed to respond - within specified timeout. - """ - - try: - response = self._greenlet.get(block=block, timeout=timeout) - except gevent.Timeout as e: - raise_error("failed to obtain inference response") - - _raise_if_error(response) - return InferResult(response, self._verbose) - - -class InferInput: - """An object of InferInput class is used to describe - input tensor for an inference request. - - Parameters - ---------- - name : str - The name of input whose data will be described by this object - shape : list - The shape of the associated input. - datatype : str - The datatype of the associated input. - """ - - def __init__(self, name, shape, datatype): - self._name = name - self._shape = shape - self._datatype = datatype - self._parameters = {} - self._data = None - self._raw_data = None - - def name(self): - """Get the name of input associated with this object. - - Returns - ------- - str - The name of input - """ - return self._name - - def datatype(self): - """Get the datatype of input associated with this object. - - Returns - ------- - str - The datatype of input - """ - return self._datatype - - def shape(self): - """Get the shape of input associated with this object. - - Returns - ------- - list - The shape of input - """ - return self._shape - - def set_shape(self, shape): - """Set the shape of input. - - Parameters - ---------- - shape : list - The shape of the associated input. - """ - self._shape = shape - - def set_data_from_numpy(self, input_tensor, binary_data=True): - """Set the tensor data from the specified numpy array for - input associated with this object. - - Parameters - ---------- - input_tensor : numpy array - The tensor data in numpy array format - binary_data : bool - Indicates whether to set data for the input in binary format - or explicit tensor within JSON. The default value is True, - which means the data will be delivered as binary data in the - HTTP body after the JSON object. - - Raises - ------ - InferenceServerException - If failed to set data for the tensor. - """ - if not isinstance(input_tensor, (np.ndarray,)): - raise_error("input_tensor must be a numpy array") - dtype = np_to_triton_dtype(input_tensor.dtype) - if self._datatype != dtype: - raise_error( - "got unexpected datatype {} from numpy array, expected {}". - format(dtype, self._datatype)) - valid_shape = True - if len(self._shape) != len(input_tensor.shape): - valid_shape = False - else: - for i in range(len(self._shape)): - if self._shape[i] != input_tensor.shape[i]: - valid_shape = False - if not valid_shape: - raise_error( - "got unexpected numpy array shape [{}], expected [{}]".format( - str(input_tensor.shape)[1:-1], - str(self._shape)[1:-1])) - - self._parameters.pop('shared_memory_region', None) - self._parameters.pop('shared_memory_byte_size', None) - self._parameters.pop('shared_memory_offset', None) - - if not binary_data: - self._parameters.pop('binary_data_size', None) - self._raw_data = None - if self._datatype == "BYTES": - self._data = [] - try: - if input_tensor.size > 0: - for obj in np.nditer(input_tensor, - flags=["refs_ok"], - order='C'): - # We need to convert the object to string using utf-8, - # if we want to use the binary_data=False. JSON requires - # the input to be a UTF-8 string. - if input_tensor.dtype == np.object_: - if type(obj.item()) == bytes: - self._data.append( - str(obj.item(), encoding='utf-8')) - else: - self._data.append(str(obj.item())) - else: - self._data.append( - str(obj.item(), encoding='utf-8')) - except UnicodeDecodeError: - raise_error( - f'Failed to encode "{obj.item()}" using UTF-8. Please use binary_data=True, if' - ' you want to pass a byte array.') - else: - self._data = [val.item() for val in input_tensor.flatten()] - else: - self._data = None - if self._datatype == "BYTES": - serialized_output = serialize_byte_tensor(input_tensor) - if serialized_output.size > 0: - self._raw_data = serialized_output.item() - else: - self._raw_data = b'' - else: - self._raw_data = input_tensor.tobytes() - self._parameters['binary_data_size'] = len(self._raw_data) - - def set_shared_memory(self, region_name, byte_size, offset=0): - """Set the tensor data from the specified shared memory region. - - Parameters - ---------- - region_name : str - The name of the shared memory region holding tensor data. - byte_size : int - The size of the shared memory region holding tensor data. - offset : int - The offset, in bytes, into the region where the data for - the tensor starts. The default value is 0. - - """ - self._data = None - self._raw_data = None - self._parameters.pop('binary_data_size', None) - - self._parameters['shared_memory_region'] = region_name - self._parameters['shared_memory_byte_size'] = byte_size - if offset != 0: - self._parameters['shared_memory_offset'].int64_param = offset - - def _get_binary_data(self): - """Returns the raw binary data if available - - Returns - ------- - bytes - The raw data for the input tensor - """ - return self._raw_data - - def _get_tensor(self): - """Retrieve the underlying input as json dict. - - Returns - ------- - dict - The underlying tensor specification as dict - """ - tensor = { - 'name': self._name, - 'shape': self._shape, - 'datatype': self._datatype - } - if self._parameters: - tensor['parameters'] = self._parameters - - if self._parameters.get('shared_memory_region') is None and \ - self._raw_data is None: - if self._data is not None: - tensor['data'] = self._data - return tensor - - -class InferRequestedOutput: - """An object of InferRequestedOutput class is used to describe a - requested output tensor for an inference request. - - Parameters - ---------- - name : str - The name of output tensor to associate with this object. - binary_data : bool - Indicates whether to return result data for the output in - binary format or explicit tensor within JSON. The default - value is True, which means the data will be delivered as - binary data in the HTTP body after JSON object. This field - will be unset if shared memory is set for the output. - class_count : int - The number of classifications to be requested. The default - value is 0 which means the classification results are not - requested. - """ - - def __init__(self, name, binary_data=True, class_count=0): - self._name = name - self._parameters = {} - if class_count != 0: - self._parameters['classification'] = class_count - self._binary = binary_data - self._parameters['binary_data'] = binary_data - - def name(self): - """Get the name of output associated with this object. - - Returns - ------- - str - The name of output - """ - return self._name - - def set_shared_memory(self, region_name, byte_size, offset=0): - """Marks the output to return the inference result in - specified shared memory region. - - Parameters - ---------- - region_name : str - The name of the shared memory region to hold tensor data. - byte_size : int - The size of the shared memory region to hold tensor data. - offset : int - The offset, in bytes, into the region where the data for - the tensor starts. The default value is 0. - - """ - if 'classification' in self._parameters: - raise_error("shared memory can't be set on classification output") - if self._binary: - self._parameters['binary_data'] = False - - self._parameters['shared_memory_region'] = region_name - self._parameters['shared_memory_byte_size'] = byte_size - if offset != 0: - self._parameters['shared_memory_offset'] = offset - - def unset_shared_memory(self): - """Clears the shared memory option set by the last call to - InferRequestedOutput.set_shared_memory(). After call to this - function requested output will no longer be returned in a - shared memory region. - """ - - self._parameters['binary_data'] = self._binary - self._parameters.pop('shared_memory_region', None) - self._parameters.pop('shared_memory_byte_size', None) - self._parameters.pop('shared_memory_offset', None) - - def _get_tensor(self): - """Retrieve the underlying input as json dict. - - Returns - ------- - dict - The underlying tensor as a dict - """ - tensor = {'name': self._name} - if self._parameters: - tensor['parameters'] = self._parameters - return tensor - - -class InferResult: - """An object of InferResult class holds the response of - an inference request and provide methods to retrieve - inference results. - - Parameters - ---------- - response : geventhttpclient.response.HTTPSocketPoolResponse - The inference response from the server - verbose : bool - If True generate verbose output. Default value is False. - """ - - def __init__(self, response, verbose): - header_length = response.get('Inference-Header-Content-Length') - - # Internal class that simulate the interface of 'response' - class DecompressedResponse: - - def __init__(self, decompressed_data): - self.decompressed_data_ = decompressed_data - self.offset_ = 0 - - def read(self, length=-1): - if length == -1: - return self.decompressed_data_[self.offset_:] - else: - prev_offset = self.offset_ - self.offset_ += length - return self.decompressed_data_[prev_offset:self.offset_] - - content_encoding = response.get('Content-Encoding') - if content_encoding is not None: - if content_encoding == "gzip": - response = DecompressedResponse(gzip.decompress( - response.read())) - elif content_encoding == 'deflate': - response = DecompressedResponse(zlib.decompress( - response.read())) - if header_length is None: - content = response.read() - if verbose: - print(content) - try: - self._result = json.loads(content) - except UnicodeDecodeError as e: - raise_error( - f'Failed to encode using UTF-8. Please use binary_data=True, if' - f' you want to pass a byte array. UnicodeError: {e}') - else: - header_length = int(header_length) - content = response.read(length=header_length) - if verbose: - print(content) - self._result = json.loads(content) - - # Maps the output name to the index in buffer for quick retrieval - self._output_name_to_buffer_map = {} - # Read the remaining data off the response body. - self._buffer = response.read() - buffer_index = 0 - for output in self._result['outputs']: - parameters = output.get("parameters") - if parameters is not None: - this_data_size = parameters.get("binary_data_size") - if this_data_size is not None: - self._output_name_to_buffer_map[ - output['name']] = buffer_index - buffer_index = buffer_index + this_data_size - - @classmethod - def from_response_body(cls, - response_body, - verbose=False, - header_length=None, - content_encoding=None): - """A class method to construct InferResult object - from a given 'response_body'. - - Parameters - ---------- - response_body : bytes - The inference response from the server - verbose : bool - If True generate verbose output. Default value is False. - header_length : int - The length of the inference header if the header does not occupy - the whole response body. Default value is None. - content_encoding : string - The encoding of the response body if it is compressed. - Default value is None. - - Returns - ------- - InferResult - The InferResult object generated from the response body - """ - - # Internal class that simulate the interface of 'response' - class Response: - - def __init__(self, response_body, header_length, content_encoding): - self.response_body_ = response_body - self.offset_ = 0 - self.parameters_ = { - 'Inference-Header-Content-Length': header_length, - 'Content-Encoding': content_encoding - } - - def get(self, key): - return self.parameters_.get(key) - - def read(self, length=-1): - if length == -1: - return self.response_body_[self.offset_:] - else: - prev_offset = self.offset_ - self.offset_ += length - return self.response_body_[prev_offset:self.offset_] - - return cls(Response(response_body, header_length, content_encoding), - verbose) - - def as_numpy(self, name): - """Get the tensor data for output associated with this object - in numpy format - - Parameters - ---------- - name : str - The name of the output tensor whose result is to be retrieved. - - Returns - ------- - numpy array - The numpy array containing the response data for the tensor or - None if the data for specified tensor name is not found. - """ - if self._result.get('outputs') is not None: - for output in self._result['outputs']: - if output['name'] == name: - datatype = output['datatype'] - has_binary_data = False - parameters = output.get("parameters") - if parameters is not None: - this_data_size = parameters.get("binary_data_size") - if this_data_size is not None: - has_binary_data = True - if this_data_size != 0: - start_index = self._output_name_to_buffer_map[ - name] - end_index = start_index + this_data_size - if datatype == 'BYTES': - # String results contain a 4-byte string length - # followed by the actual string characters. Hence, - # need to decode the raw bytes to convert into - # array elements. - np_array = deserialize_bytes_tensor( - self._buffer[start_index:end_index]) - else: - np_array = np.frombuffer( - self._buffer[start_index:end_index], - dtype=triton_to_np_dtype(datatype)) - else: - np_array = np.empty(0) - if not has_binary_data: - np_array = np.array(output['data'], - dtype=triton_to_np_dtype(datatype)) - np_array = np_array.reshape(output['shape']) - return np_array - return None - - def get_output(self, name): - """Retrieves the output tensor corresponding to the named ouput. - - Parameters - ---------- - name : str - The name of the tensor for which Output is to be - retrieved. - - Returns - ------- - Dict - If an output tensor with specified name is present in - the infer resonse then returns it as a json dict, - otherwise returns None. - """ - for output in self._result['outputs']: - if output['name'] == name: - return output - - return None - - def get_response(self): - """Retrieves the complete response - - Returns - ------- - dict - The underlying response dict. - """ - return self._result \ No newline at end of file diff --git a/examples/clearml_serving_simple_http_inference_request/sample_image.webp b/examples/clearml_serving_simple_http_inference_request/sample_image.webp deleted file mode 100644 index 9258c91271eca741cd12b56ed7db4c9a8f2c790d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 27042 zcmd2?Q*$Luw9Sca+qP}nwr$(CZ9AEmCz{yF#I~);B)MPRzi?mfQ+HMG>h6cNyLPX& zwyKPzWUdVeh?az?vZgY(eBys|ro}NAKvQv-135>B|I9Prm;+f&%ZXX#9*rZOOacQD z+n3`v7zpoKd3w5Pc8+Q+GU#2uYV6M-a;R$TTVeh$o+?$?yZ%48XZiWRu*o#WOtw2x z$m)JtsjlsR znP5`MWo%7nMAcaY(&qpx23x)J7Tp2(N#n}{Nz*5cL`G)zTTpLXJ0soG;~$QB>okDVsM0@8QM(&?hle4K^fmKIO zW-yB5HLQ1A1RdpO{Xqo&nro&tJqxx6M{Wisn}w%G%KCPe&81zanz_TGaN$Q>u0j5p zKOQfnQrw9B^8#3QO3wP)I>^PB9uaB5Vm8|)Y29%Gb$qo^7fqQlpmfD;3rocVUCm@D zD!Gp8rR7j4uIB-_n}?fpd9t#>Irr)A%xKhzC@+?fbg^_Kf5PSc1Cc@h(yIu|%2J`2 zq8}!kE^Pc6TEi@|An3ceRU6Mq?ZHsjpD5|up4r#ldf0>oPay(+#at#c70TDf)|)OY zER{cLyN6N*i+<`LoA^kqTbYTd$+-~?;;b*5e`m+bhnr9>O(`zVEvcV;@Uc?3;;N${ ztCQk90BECHUHRnPURcK;V+p{HMKq&%Y>s$KC5EdD0M%IBg>aUTksvhLupmpwOZtUY z#OLnq>{x|f^Ht9zI4i{G8Mx)82d!CIYl9(n z(p60!K;R^$a34^&ykwl(x5zUm?dz;crE+1l9|a0?$EE=*TlCn(MTL>YY>U7z&7IiVJM@(=a_=n<`sMLN| zMprH@@jgjlswA$q04?kXTzVIC>(yL+LIy=s)6^^$DLz`KmDwU-IYiLDxOBHsXK$sM z2jnS1jO#>wqnzPE$#ACOFJX>7i>VU3VeJ!j(*hZ$zX{WXHzQi?>v7poo#K6lP@ zL3p(G-u+U4q=4}*lUk8EM5`OY{|_7u6$jFsd4quj{7mH7i(dK?2t(J6S6>UOOUcUk zF>U-|cnKh=5E20NG-KM5-c(sBFwNO(swecK0(BdpFtJW#2F;>Hd-uGetS7VGblMj= zD2%d&YhCl>ue3HQx39~Rh6)0S#rj&nYK0BBEo_y0Lhl>Jt>)V&G*B!uh$L7RY|5B; zVUu=H`R5zIwRQXJLqt=`5)RJZ*>9Z0W$N6?x~QPs$|v#Ta4L2I254F#qWt`Ijf=OS zhD|%ago1<6RhXHhHWxtbH|-4@_4~swAcR6(^4Mc`uX3en`Xw=|4)}QkyP(!(zjw|l zInaR+r00R;BH8}{itmF`4=b|v?cRrw32I~uka`l8QOB3t&Vc>Ceaot|3h`>P)DvzDL2 zmJo5>wP#@EW{y?8nzFo0Q!cF101N9Ys6C^}qPgixA%S&CMOA}2vrnORGI!aTtyX;K z{yRtdQ|%mU?t|k8zjNMgz0XC%%((&nwdqL?JAPv2kAB(%eQNEYlvr|hqRRGtCkE?> zke%ZF6I_c^n)~cWs9xPjrj>AmTisG|sv1yO08#_~yR?K;GrOPF5xqIeKb+c7QKL}~ zh$fN$9{Mh8P_3Si;xo0`z}D2z>=)*M;i@mRQmdylOy!1yp#DSG!lE&D0@b?p0>Y#T_E`s%)4 zJQREd<@M=tUvp+xO7wB|bFSqjH>I#iT?$bX^s&FGT5uBp2Z}>3KIAF_K@)C83_?W-3-_S^9^MX`Nl!$@CZ!7!#ihafde3nWb5+4rf&N|mdlT!0bz5M= zs$t(~8w99yZjGAx#|SU@SG6ENA3;c!mwGSy+0apZIUY#B@4XJ(X-Pn;;)Ue(F$?uE zt1RsiQzNNfe7X0B^FKqQ7T1Z*`Q!H5t{FWR_a?H2!P~^2r?gvYnJJ zRAaN`&)kHImO*1cY6~x|rdIiY+h92RpPl`*;hIxWO^>00>9Xi6neU$6!6;)gOm$k5 z-Lq3MFzL!SQJ1ylNKGEg)t;ZL&XK8zC5aTqd87@X&JFT*YQjkkSIx$nY`vlA$6+qV zxQ-26U$1f{Cy??*Ta^Fm=#h@@meWuH8X4p+#6hzpHij34WCvU;cR;hwPF>J~RX_&5 zR)CV4AoJ1xq|~ln1M*Ka*~zsxtCY5AUQ0EYn1AxDZE0{FJNfIO=y z$?3UC9CmxTKLs%jxSr}O>A|h2tnuSp3>(WEV<*$+gv6}g8({f1S6&mmGhZ^hNTrh+ zi>Hy_f&A2H35_E$vsBl??slCzye6Ydw`wN)yV>*--n#MI1U`#sx=kMEkB{!ZDe+u(+^9(B`B>&G@bOS1a&?ZwP+`1S!r;G7>aVC}^WxcIMBq9#$qwsM6eD9Y6+C zT!#x&K3xk^_+dfw@LZ@4I#jxjNtbwwYa@S(-b7ZjiJKjRCX~74lVJG5n}NH+*2uczZ25KDGQkT3v61+NWrEh8sqDrI8nt zF0@kk0gl4!4Fm};1y(I%+87ZHDjL)t0G}@UP0zqiJ*k_>A-uZIQySO9W+`o5sj|aI zU2Y_{=mQ#$mGIrKPgBDdvQPMBp6ZHq(KI&UGXdA5KbSH<(*u+#R=A7?;%7s6E71F`kmcf@Lb61YH7vz?h=a2H1XJNp}#0D zSOZIw>i zO#q<@pQ49(Agka4^|dSMD*@WliIAUXnMld>P7Anfu$i|ynJm$HBXptp9l=WZ-p(Zp z9Z5zq@ochq({=n>aC>kzes+|V%JX8*Q*OIP(V5Nn=f!pWZ&%Q6zaM=M#FrwHd%mDz z)pAsgz&{z%sgtiR&^<&s=S6||%ll|IeANhv@k>7a6Bjssx~4$SpX#T9t%tWD4Nb!G zXBA=bK3}FcCO*vV!8e#~?fsgP@0=R|+AUbfAlh0x>Fn)7 z!}|he_s)IK_DfC4OWT=e1J6Z5S(_gZ)>rERxsnIp`kGAix$7kx}7H&2zzG}&u+yFOGibS>oqLPvhWNx_6 zY!c4;W>CK#w_}UyTX1&JF$9)l_G-^Byl1R`DdRH=7?@j4|5x%dWU)Ri2eS+qzOj?sR65yhu3ky%&EQ-bUb^%*b zy}BwrL{3SOu0g>^pv?dt=#0Ov6x@i95I+!X%MV(PfypL;r;P9g+4L<=hy8npZsg`iWEjoB02+5Bg`w^ytmtr{a#>Ok5#)3* z7On*n^yhdJcEzAv;vX2EhNlv`MoQC+0t;B31>};=0j1(RRR{=)+c*NMXsoOn-8NbU zEVK*Kmp%AXs)G>sG%ZdH0phs{^4W<6PZHlmDfcuTd$O13Pw`Xb9pi0_pj&WsY5)y% zHF#F}pL>nl%%kGsq`KgTCim2Ldi=?T6(OR7B9B$D!wqvAIx}NSW#dskq+P*YIEWCA zRTYbYeeRvS9$Djil9w__!Pq}TSK5F}4y)2krCw@G*5TBg(}tgbYu9znUNt0XSWcDu z_I<45VAJw%TJ6`1f__D)h)&k%h@1$sB7qRlkF&B;b?}l))fG0~6ImoCVq7+HLZx6^ z-S0d$XQgGZCXEIk;Z~dYOw#L0SAE+2w+}Y?dDiZ`4rP8PqD5=Xz)+jgl45dEIYomU zfu@pRdt7*rKPW8oVSBj%pHjkKMKcqE#4vT#@On?Jps#5k50GE&>?yiRo0*Y7+)5u5Yo4&giC&G;6FJ+3X5=Xh*y<<-fzTy4#8GF2n*89 zfj%8|J*b}NLl`|RQl$Bm{gOV9mJu`yk%6I37xl3Ji7I)jL^0zh&(+GXrsqsUpN(F; zqWGM9l#1sI_p8@c1-=m8F&0pIC@e&t673sZEzI3Wwsc_MzNIs770{Defo*Y`Nz6Gdm5)LSaGUEnl5rlxJ$t#g5l?yTkw;;wW_6sW05 zeiE7_xK_+DauF*K61+UFe?f3hLmQQQNu%RyMaLyJfnP2cZv+A%O6%8F;iivZfdz;27`0Nav`I0uo%?w}+@+v)i zIZ>ej0nXkCI?te!W59nH_{&YX$*@Mxt1PSOY}}->kZTeD8)n-}fs;2t%owPsu{T78 z3>ZUtHt{~dvB~C20XRP{)GGFt_Orsqj*_EhoCv4Q{*kK?-2~m?7yZ5HdT~aDq`w6T zdBmd6pD-yf9N@;}JYE+uB+`X~*ty+ad(3`(5W-&V2{M#F4ggUP+KuK)PzMw0C(^J8 zmCaZCB7i?krw#uy?=v+gRGUlPt?xwg9G4a1CL29>vIQ^z34NYj$Nudr_?glxKj)BWnAndZ#(P!e!Vuyi+ zUMIjU&726nP~4Bu-n&44h&Gmq=_BTzN~8+rDFVacqJD2lbLIF=M=MU4aHuTpbtG;| zb37{1J%Xmb(_TxA#fKkV)#s$8D8FfkSmLU{7{_6O9{kS?fT>JK1Kbsk3h|C|VJfoI zD$*{Pd-E;3O}+Y4&TGq7eVS{y$orlfm+yLaF+|k_^{&4e9<{5V*9uQ`s>!ULThT3aA5cp@4v2$e}RWSNon-9K|Z8R$O z;Vy8n8|bgW4f&~k9avl?V3p0mfw=^z(YaVja1T_$LL-ia>98r(c8}nghXHd{FfkcV4!K?gW+MNY;e0>U#xKYOvx)Bk6=pq&|9w@;ElVL@9=_kgZ(FsCT+#b z>|rWW;I!S!&80#D5H9;C3VX9Hu(Fw>w?9%GtCsn=h=vKdihaTaLtjZ4r9Zhh{H+4Z z4|aqms}##POchToZj=V)f@M*=&_GcUNEz81-nT4d0;1o}Nh>H;`b?TUN76o9Y$six zRN3Cx*Be>i$yocS)z>G@g_FQeC9T$N7;P1&(?B{wcRWOV2BP+NQ7jhIMR7Sag~=-} zS-UhnfVjSiafD&vF%?{Hgi3~epK|A=RY%s2?J`T0ZDn(~w>}_gLeVklQ2-ecr3V#( zHpTL3FX2#=J9F53NLKXNY0LTXVa#5!?D-dq*;7Ayf1-B`4vvs3v|B;v{)Yv4csj!O z#mA(F32aFJ5h;YY41A1@f*;5VU7Dqn$pEH|)!Wa*k%B=V>?akMs;~@X@A)x6`ZQ_0m_c$( zmq~%vxCZyx&ibPQH}~C@CD|EaOj73C{XzL33Lrm zVa3HJ6o3ro8ckHB$CiOx%4>pV!nv;6&hoyS5E^|$+tNmg4=KSN;X$^@VxV%*#t8y3`C*^v-$J65WUhglz z&`{prUO`0(hHJC9?*9gbNq*pej}OI$j9g%EA3=5gU;x-K-pO;$oY4%)8t}9qhOI29 zwGu*D16b^Xm-q{Xi)MgOu>7_Y?>di8Pala#Ak0ZzO;)D)05rm=~12hEG#daXJK-I>yX9MG}P>>yO zwp37jL#h>dK}{xjRn7F@*`g*oe_znNP{|Q64mcoEki}cG(*8IfY`3CEy&?_!=2boZD`eo ztpb;K#pts=lXERG3h~#_!A{UQWae7pl4>kZX1%_4FT-Lgeew%@)3?S&NHji4492RQ zjiwtqax6wd<}f76n1l-nqi>xr@zSS?3=2YPk0ZkFm*(|rvTh^8vJyTRmDTGH?~gVZ z#TtmEpb;MZ`!^XfW&X^Zz)n}djIvhpoIl*OsVquV5suY8PhR9sS#(I=>?^B zZ^Tyq6BBO&R$eBqtvWA3?FC7V?aWV%ZN3C~(rWn+q}#&CJZX#>HP%Hg${XO5JSmh2 z7f<))FXpYNgwBuhVldFARufr+1EpH+RYM&8N8csYD9!&kObX=3I@u-8UmNl} z0-hRz+nY(~C^(-X1WiGhM+UKD5q_-W;qXOEElx-bjbx`Jl+G)*)`r@#Y8fN%)FP1J z@b&S0KLw<|@xVy3N$1LI{FkI?h@cl#Wg>-sP@t7Wg_B1a+=FJBayS{dWw4=5omX!b z^Nu3PJ*_Y?2A3`;Ju3eplc+Hr>>-t|S?kU^rT4-B!=t(zZgc344N!$0zmSeKN~#6v z_->x=7gBpL9G{7!K)G9E)eFD+q~HO4;|;q|XRZ7%q4#Bn3dzhoR3U}W_lA0?!b>*Q z>AujLcP*9FRxOA#CJ95mx(ly%(hj?8&m}S{)1QL3RNwi)I5~(H_gKflIsh03E^aa) z_nrt~3^!Uzl!n;lXpB8TWQ?F7imJMG|4vxLUnzgMt0lx2nzI=$Ya)J`zj=i*s*Q+Y zJ3NF$_BDE)*&q2rC_eQ>%^98}=I@LD_x(X_2f3S7P!mg58uTVv$0)`iN5_0gl6g8Q z$QJ{({U5ja4Z!~Af^Vbu{84F1UQ9qFH0juW*HfgrX61t3=BSP6bj{vKrtI5^OU8qJ z#dexkT(bZ9&#{QGwd7eAGuYh*_UGGA0r5)fb1^~pSu0norvNpZ{eH% zmk3c5I%{zQ1|T^`$@mO2_`9L*iZ#!9R54L4_99d{Cz2snAl`wq|8^iNuzcCtbh5p+#Yu?NTTCpHRFO_kEsq2q zRJ=X|VN5Jd8gBh?Oe_%>vogV9rRu|*uLn`E8cLVpLS8|ZdbIIzs`7FPAL}-s5R4nPgi4H3;VYBHex{k0L8VdJ zZ@OTWOWjd7LM~zb>gWaM3;&u^ zd=U1s*(^((qPXSiFl&lkT#;2Z{aaCgTRe>87Yi0ljUg*wYEMpzO-aeuZ&tsQlTEop z_nM}2zajIwI_USAaeJc&eA@T&dK=O5X|M%nB}Gm=k8!Dyu?r^)-Yxx#wE1RO=VDKB z>RzSn)%9A4(JurG==5Z2<=ZQwHGhp>ZB^LJNm0T1+K`FxjO4H}H<`toaNeFHJ$p(* zrFXtY@+jVxne=8hWmjEx%Aw639k95*R!V{@Jog)x2KgGyVVpZ_ZXzld8_;KRk6Rk3 zSFqwfiNO-Hwb<_)OP}j)q`saS)+h~$-v<6>C8d=@Et|zxlcRur3Zg4;2BaOvZ=-m}65Q7?;&uvp`_=3t@!+6nV-hZ1r`3DwUt7tc$xPuo z1lFm0Xpj&j)?ZEzHmUczl&ohiq4I^G?*!oJNd<*}uy`|17^?`@@&8x~y$H7OR zP$~jc9D2sq{yoxfmQ6`&ETy_|M}vW;p4!cS6?kuCwX%`tdT3bjQqeHMziO~NR7C)M zO%CaQDHScHGM%SQ1d z3i*O!P138#3|^0m`p^K=^#_XLd&w~<|2eTcMEz9=rA0rEAg3U@+^tzCFl(J=XQ1+N z-)lTMXl$6g+v;jqSAv{5=k{Qk*|lX`Kk27y($KiguG$O@j9LEyvf~jwi@j8hME6pg z#%^^WjGHgp@3HV>4v8zLR4&V#+Y~kYx9giWAm)%H`=n;iHg^0!U8AlrT^e0{wCleI z>4TlS|LJ#-Wp?bgh|Q)Pc8#hv+?;l4b5LNlbMGZP+$?5=pb( zn_D_;XF9w}Dt{$63A@c)*c;mmrTAj^4lLRBz_r|KnoG|?<;@WApn(*OATNLVZLheE z{Sn|wz8?i5X-?j>vgBR4t8k1*otXVE>9Oa_+<%a}fx1G1Y?TALrh(u;@q%`4I%*1S zhjLTtH^*qHdzQF7BFNQinvYxUL2P3QZ`6(Kl2;P)L48~dS%`9~YlT3jL8*BACPSn*h61J=y=nA%Q1TEx$$5;J}>}D&iRP8)>bku!)B2wu_ z-sPGt5tKkD|6<#`iF*`XItpyoCnGMDioMt?rTTRyY+o9j5INpCrEQij+4sRprjoq_ zbs6|wreRDgN-e5n*|y&!=aBtaS0cKAh&dxpHbMc^hQ;U*Z&yR(YnwNShP!RmrG9@G z1}^2mKuZPseZ&TMJIgsz2gh1ct2bE_WB^XudmJ1M}b@SzE;;@GW&Ngv}s~@2y zqQ8`q)JEMpowh<|1}I_w`>vDMoKmg4FxYX*E@^BQvStngloN-}5n1JYBM7&f2P*@Z zQ7i2U{^j9AAgg7LhzoIIOi#@X3Fc9mV4Yp;`1hk?>wQPJZc)MiYX!7bP$G0$N;5?^ z{oBD?tW1+RApFlAvE|%!Fiza`7InlqfVpGndEiNPpX{E(b=BX~aQ9gUdY!bBOLlvO z0d7u>8=H92x|1}$rbT%NO4Co@?NeFmE8cRSne2m42b;wa1#|g+pQk$<74xzrzSPyGz!`j8 z*$>yMU@becu~@-GF>JwsiaO)XN`QPNOjBY3 zZT@gQG6|!r^4+knLXN0z?WZ(Pb+_-uf*=kKjDad}M%^aQnSA+m{F)mezhal(-SZb2 zM&pSRvP}h=ZaWPSS(pxQknzuz)@Z*5)Wc=fBeV)!@01Z^}J{hWEE3ITr&Oxcjw4~*O!ZJ8k%K5M| z46n#lCA24WuQBnMXHOA4D)QuLA`Cyw(tU88;vbZiKn}&uRD@koG2uqu5Gt0PN@xlK zaZOMPkz$|WMJ^ElPpW}`PO!E|42U3z5|!{W&kj%4T!xJ((G0ZkC6xEPC)%&4QD|DM zU&;ec!^2J)CSnO~*@yZhy%W;R!>%?(x6GYozN(GkYJ>M{0yu(s%h(qcdm=!wBX_LE z5HA}%Tqx|Lg<*-cP+w;2s@wSG8Yxqez8VEx5qotc@T}dO-+%hg1`7q6(Y?m(VC&uOf0%02eY z!t=gF_BSaWW<+<^1=1hRFY8Ndg|+zQql+sIKbjsC`R*Ev>b7 z)|gS(737V~x_GS?wq>E<07UioN9sO*O&Ff~yTlx1`)a^O5)RjUe}zp%3n*JuwGM$L zW8p}J1E@1g69gH)tlG5>c$VL+w1oZR2!Kn`x)8+w`cUI6a79~#EoECj4otdi@EgBk zTs${E4d$HhVK|1MmN5%?jca+Iz5lV|&(QRs?GJWAT{cWgGecG+tg#8XC;2`JKDNcO zC0j!%km+SrWRMDe9)g}z#S(|tX%vlrDczp9i(Q?Y(REtur~5ElSZV3W%e37g?>4qQ zbv)j0F3n=Kg8aBiV>OL8uzW#g$e_eGd;q))f}_=FBOzx>X1HQ4FpKF<^2Bd8Bdqtj zlV%nEpyK;9h{yx}gsDu?Wh--g7jwsRP;)y!evirSz7f6!lI1r9D%yW`)3wx*nPZ}4FgQcBp(AP*NMknvL8`!`|azxSy^BIp?)uE@k7f6RDZ z<&c^F5rNApGHdl|&RCC*!6a-{o`@?xg5x?m!WRR%eOC!^+tCM+dgAz(24)@z9lUNp z4?Si+drfrNZDpe#qU|Y@KRh?bS!@s!6e8wOJHpI@d!57B`>nHvo+!=B@Fk9^8Ce;3ovL+|ybqHcsSPesB&-B6k6KoGZ; ze$U?#`5EWFSTi%8^zTg=mC(lc6p>Y2yeJkRM~??L_kf&wFN`X6HX0{-7XdUKf*sBC zG57=PHu+o34!aN(&1Rh&GG)>FG&#5e<5u5N>M=_&qA~$Kwa@O|kv6WkT3(Gs_ z8l>f_Wpa8qT+*Y!Xw86L?dw21qtL!a+Wq#h5AzdLtkCaVlEn4SH9VzbJ)pI*kKaQ( zrC0>jADD%M&U=p;8oZZUjXclPO>taY7S!ZeLb!S|&B!_A0o5(I=Y!s7Y@dVf{FwuE z)u@R2JImi8{z3Z>*f&e(GjXn4HUt|5adui5U9C05Z3I0MoL8tE>tE)nRyLX#L9(W? zXXn1Q;l$~nAw*Q`cQ9q5YPqPCJa&7ugJD3q`sJthNDwXuXUV6sa1{=#pzFvzJp-j9 zXlDi3aJ)7Rc+67nN-uow4)MUC`iV!@aIX4G}vFWVj0GBJ6SzX0FxN z7*@2D6gVFr(-KAAehfP}_7O7lEN^g(`&x)#_Ng99*B=@tp6N2*%xBC>o0@7H?pNU5 zqlF{@&`JhLN(7?+K|}1aV=LqyHzX{bczawwkT+k_Q#uW|v(XQ^f2>3X{`R}>3Ya$X zIeGkSaT3U?&$o})CV+e$>eYk1o0e_#A~ty+Ah>o5R?jt7j0V~=aUd-ZT6&~ENdAH| z*vVV54BDh&vDNsAac`dZQ|k^E4V9dEl3_3GrVk}F7Q(^xt6l$^E=WWyHVR2w`(Ww7 zlkz-jtv1p-t4tEsxG%ZAr`*{B_wmuD_`mepINJ54xGeRdxi=IRUL6>k=iNaFvcfhF zIPRCco@u1Lkr76KT1hUJ=?TEPY{H@Vhsmyw$7-lv_@%pQCMVCf%;#8e?8KvqTr7ZD z91uFs6;Nh4Ukwr3+5$&L?>bs6B0nG2ik3+Qk{xZ(aBnp|p{L~w;`4C#S0ddp;PvgV ztFHUB;181YX~*$v&-tI9L@$*%bl9%^a?^05_X={7p1Az~x*QTd&t8hw)`G}&N!@1I zE`#p_a~~~X8IjyjhE1ZtdH=BaT935%0o%mL$XCD)o^Rzs6Du($`{$XDL4XKQ&@nBg ztYR3vxHos+LX#xH>WBew@#Leaz8`l8`^Mj2tn|jry}y=*y4?rckm2@sqvBgYwj;+l zliQscOq@N6%SSGufRNC4jmW?;?Tn5382T0JkWlXfqak5H2Heqk1O^p$CBj53;PaJE z8PkLmx}qBH?bf~Fi3~U}WNM7z9$RP=aTgy8+>P0Cb`f8f2B8nuN;l1Y z-j^StKT@QQgNePR66>-i)fMsHP{|!?tHfVhA{g$m&?Z*vhE#+-r&lsPlDc8+?U5cHIvBeD^T2Q$w*I0P8>kD69&&1_le zR`vwXunUwQX1GQiBwRT)M^fW9QV9<&e2&veU6VvXR!i8ll04kjdl^mEN*kpnUSwO8 zVJA2f`Ul{$0Ue-&8Hf^;exbJau=8CDBDu5HH?g=%?EkM4(xasC{HnX@x=B@(%z{R6N^C3>)OJ*BpL%x|reNG~~K{$J; zY^ueaW(O*jvs~>)wCgu^9QYoweZ(63-enVLG(>fqY)TZceoii6x-q1sqvvJn*_YKE zyO-&u;#3Mq!};Gi3*k@^{UKYnuP>T}YP$~xL4Ft}hMesF8{fKpvB1teW+py%H;w2j zA_Tv$6Yrvh<3`i7ex0O-|G{WKJ362I+1hLA2fL@RXkDxo%yJvU)y2}FV)=}qp9JgM z8!~qe65QSnCbf;$gb&%bTr#4$z}(jcUx#O{!0*xr+MQiakr3ELWw_C#uI)~f}V2v`@iX?Y9s-XBIF#OGw!@@qc57eTLM^=O=|-i)5p^3yy0%)uV9(%z$zXu2ROKRaRKJ#RHcV^B#7u@b zl9ohZ`Q|KX<{#N6>cZOP!`cZ~W1z0MoIQAzujj#$!x zi3$j~Sui(ot#=lpJ&6$16HzwzW!yI1R#;m2&bY-vjNl&QPuz#!2#SPA&W4qxEG+>4}v;@03KYD%7r%T7$~CgnY9f*7gz1HOu< zibbS-mu~Y3c2rD!+&)|OdwOTp2*Xk{!F`eUua%Us<#<$MSW65$hf$Lhc*$ga|Jsn1Cw>>tnnQ)(E8d54d%vZLmP&_emqh`*?0*LAn#dSA zh6?+FTlb*O)!P`VBqd=vk3D=i7N&5MdrTN#bYdFn>*A|@g}Cm_$#baSrBYY17v(U3 zL4lnDx+4YwW60-$*;`=b8L`as{8WQ6oo7+UXfwuLWo1nJoa|rK)fp?Q4BjY6OJJ-dIb|I%vHyNdbvW$b#>G90YN>GfnO#sHj7Cdgal0EN@Dj$95W;c141M2P9NP9} zpOf2DjSf?wtugG_Oc1{Xmg|W`i-7{tmGZw#MdEhQPz5N3_S%*t8EFyxPYIkO(Y$T` z{_f1uNufM_mdMS7W7kynVfsx&gU5qO>o~;wzQHgi%9-QLBb^Q8RZ3Z;IXnC^FV^J7 zOw~OLZ=J7Z+){$9i<4On1MuZE*|aNr1a1A}E2OW0Z}kqZ$q)Q?}s z9m}0Yp}{wkJN#)YcD{->7Y}xlA|!>vOAF_@)c_#7H1zTf(qKhff_Lb+3uEK#IFrSy zadAVaA0J01bVUo|TtB9K5SUCX$0aw`>5Fc18n-TI{2oxBCo8j3_7?;nw(#GQhMIi2 zCql4LLO9RNK=o(VLl?y&am`JJ_NY)QmDK3MH7|dE$@8tMDbW!KJJ&zz+HcY7zaaLMO~feHLNK!3 znmm^4X^CS26Zw1>yzfc&gl=^0*gl=ui0_{NZta&L1c zeyqv}B`t87x@10a-+0;^f6sw?%(d;5{zxSB$>!^hqx=3%aYX%Bt|>tS8UoDW#RP6OezI2%H#iM-Q`xpTke`8c1l zY(|jKz_GFu6~C2oF* zg7dxu20-aPGoE`+A0D|4FQKcC^i}fyg@}C&jvIg)%Gr4Z!G#y2RDctX`v9W#SrwY1 zU?0Hymklnf8^aH;3lB26IvFnmbDpN4qh)vRdX{ZXtS&Hc#_P5Ps5nHtK5i^bqNDQg zP^?Yiw)>GsLnoMFrP;d0{7;OWL*3vD>1v07`EWj~zP-hGnMO^q-N1?Mh6bU1Ae1+ISF7YLTcjM7rCJz(4qzPgs-=UE`@8E7Y8Ls(wpq)O;b zqfd{{8&l;cvtn{ae3%B2)ik`orU+Q(JW2GhXj^#(IX> zKx$O#8seK7Zg7vT98ced zO9fFIKdJ2iVLhRoyPlZB>%M-sS}sEm@CX(79p+JmW9!9a_xhFWWH-t&O-K8hULx|* zL)bSvJpNH(;&wNtgUr06luOg~Y;9MI+9;C!_WxX-Nnbhssw*`l3K%BI6i*nV`o;TBH`0E!0OmyuZH_c^OxL=?z5U*~PiW-x{v)C|A1y4^3 z0k!#HOX;EzYlCye{AUSzsQ1@ODn9{>4WFJE$&qez{s#O|p8d6MSZ(a$&0l2$?HU*= zufJYrVs=zH(?2bDn!c6_)-mB2GS(uY9bFC4Wb}Vctqq1DTK(Vz z6C#SkmY;AEH9L!lC-c=_vo>E;;$nl{<zxKZ$t6<1!OlXzEJpqNY2X%v z%_8!u-DNuoQHpD6Xg#(0!3)?($v+!FM963*D65G;HR1`Tv9`T8a|~O3Gc)iSHgQg!yn`+N~8nr?@ND{93) zY*Oy%3ZjY>S2g|9YvyE)+@=*)`0j#@zKFB2&+wxp(wgHSgJ{5g0C#+n8j(<)-kOKA|bj zgSXxDq6lH}sY>q0KDygP+N&21c2?_sU0+lYTQ?Pyej^spazLj4kBo-@FO62#w)q!L zrx7r*wee@s?bni<{}J*1REgyG3h0@5Qq%FN{EmUBMHZo_o_Q#BIF z<8;@j>Fei^*Phiv#pmr9JDqqnp5J~%Sy zomVy~@yG{6P>n2ikhHLj#uyZ9bf%wnlo1vA@2DOKg=KZEsxx?Td>aiZZQf#)E{7j% z&&Sl9y-*Nfn`np)h4Vn*iYDDzc0ZY*9OUg+CNiI`a~(YIm$LEFG3xeZ_tNp{Ief|! z^vy~=N^mEQ%7XWkNt{Ab=&M&Yxl@$5N6mgR;w{+3TeDz3=(C6JCxg+lBHQQtKcsr$rR_=eN8Pq1TvthNAv zmZcH3_ynD`Q+E`V5=2&e6KSI8%_k)F**13{0JAz3&52LJ3p99Ih(eMQLk*>7y88g2 zU^@0g0+p7T&ZkRjP@|^QSicjmXNJZABmD(=hncfyQGV@B7j^rg0H8G^7zB7XSPO69 z6Lf|K2`%&|5m1`JQqwR{<}hxr8=&~d7CY*-s>tC0yW!=l08Hv9)0QUc&S;8^RNuGJ zMFH#ge2j}{6u7?08Jg%WzQ~h-p*NJtwrpzNUgoku{qAhf{2A?xVi->f@g~06?;v1? z1ZaxKI65a(wRl2mnqHTNpUfw2*#qGr&CI2Xc9cN+lkvh~05M}qo2TaeRJ{q+#qRmp zNw7~oPB{6Q`XLS9UPe#xunBu++Gs~54XJ+_-Uov*r{UYD8z_ACPj?Cv-73Ktb|rkm zZc!&(&33CbrE9{@Px1hdq<}xM+7Y1k4mbj&D>~iuI|y(-Q_Z<&3NcrMVI&3wU}EP} zmZYP7R@`-|6d4R13Qa87oqA!(4;-55&uEvN2HKPn?t}?$ry2R~KO5>#uTIYb(_Ab+ z=^*Rg-TV(2D0EH$^EZf;cAdCr}_6 ziEi89@pf2%U@FH)gjmU8C5V3QX%x6xzpP9Rc3#KK*Sm4U`ZV$S9|R~Mg$4)+DG5|=VPZJG?g4CIKR0Ul;RV0w~m@&vOx zSXnGcdmezHMpIhieliC;?_&yTUv^+JX-}~Z-Y%uFaZznrAlTu6l@;X?{Ak*E)-*W; zu-&R5K~$MO2xc1RP1FYH;t=_Q5pXc2L@(BV<~&eS&rHkCLu6M$kd7 zW>Lz@_f2E9jtF<5@NHaJBow7R88F)w!Dt0)lM(uR!j$bNqdD|hP;G>PloriLRBCOn z1v0avg~M|R?rlZdH-g4k0TB+iwDoKnEy!6NUeO{Wu54=2X+S{~5xW?E{+=++InBKC zXIl_hS&3~mX&o2LbiI`htuT^Qd0M{(FBFmop%d8Dl^-QVu+f64YB2g?Ds}C84AE&J z@y4sQQdLaaHtI~Dg@;1OrXmhjD5@G7ExZw}K$-7LNx}{O^=P0c;Tf2p~gGT7&P}A_J^xJjt zA%aw_Pl3>hg0v9Qa`$CFIG-^}U?ddSwhLlFpwM?~;PHJMbxyiP^7=M#k86XUofKrk zbS6g$T5)7-m3^M?#LMztJ&o5!1}}?lmnFAt(d~1|?bo7P&pd|tHPi_bre+v5=tC)m z5)1bW@CJqEe9XV~%d~YW)1X1A1&T*>;_*=z*Kgg(=o+Lm%Hu!UEmZy2GrCcyX^j{> zVmMv~KC%pYuX|+T0k5DDAGzG-o0cpxUiCpAeHTWV(MQhNc-j6?;C)Z3kekb+Tu9#-1$(WBSF$4cd9z6RY-rtU-^jXI59)szZ1XN@~#u1-Z9 zK3sdPYPbIEB+Cu#C-ae(6?@hr4uyOqKXAjD(~$ud4ARh_(^N<04VEmMsud%%CcZ_d zX((#@Xv}Fo6$!{kY=ATQwyZY&bCgyZMPk%$`PiG&6u9WbSw31b=%@iJq7S$r-jLHe zr9Zn@*sb)llVZh`Wf;U69X-p#cO&SzA=%>WQQ=*)^0nx;ExBD5-G2WAF~;3*icKWp zWbBsU*Le+2ui?w%RvrH=x!Ip4Y4|!w@VTMLU@9~bENf55N&+c-m*H+ za4)3680r2ht#?g$pc4%nt8#_EYR=w)n87LYHBN9d^HNmI43E{6=~aK)I@4*>lt}p1 zG)7)zQ9-uFo9g-;&@;i01`<>DhSg^6h8UP)Oe^$tPw*pxvu=Z|%*%eBdN*A+?W{$E z&f0@H{!JO~1M~VZySCCeA&5qH)?Q1@#>`L*$hE2=t+3KM!IoV->#hiZ?X_tRL$`22 zymE{Uy8WpP#Q4&B1B4~^m-^3QgsVwq(GVzLYrA!Bky}&mp0| zqT%C3&Ajdh{e}r3*F1`M9tb`C1j70J1N+%YKq#EH07g?yzwPS!^-FIgR40D@6vEf} z2ln$#^)Nv8{k+=URMADn!!_$JEXcyx zthM{e)JOHqG(BUlrtHC4+=WeM9X#W}G)2DUTG&y~OtUZkLCt82fQ;3r(G&X)vT2QU z&&RK`_NtKp>B55^6C}D=;QMNu*Q)AwVWlVLIi-o65Ad$5b2CYCBZ73Lg@TNA9<4N? zy9u&sg9we2kD~fl@BE%HQ6<<3jM5+oC6+123iT)YYMhK<86uSp_^(e{y>YU#bhG>PTd6@Es1mKdX8tQkGX`Ns$ zMg_|x;S`X6%XMf;(W%C5UHP=$>6SwPASos(vhpk5rIFULo8_al^3^p#@65|FDyY?qCB+`zlH5o9@}=Yfe=nlL$l9oMRvu)6aiYUHNLA-{s4B zb*oF2?7%vB=VNwMrRlLl-*g+jP>P||155+C@5F1&AQCh6RF!56ofD*7NB~!M zHECNXj7>X$MkEOhG8V|ViSt166sN8U!DGh&P9uO%H6g*lnKS5-04-@wtEC4eeU^a7nsF>-Tp-V73R@^cx9h346Gf@+{MBA zNTG*Ep6eNYd>3$%8VUH&5aYNdwPufr; zfn=$10<-P}W}JYnrJhKe)gzlG>9o$zPRd#k%Ek`)Gyy8LwZhQD0MWw8VDL7RrlT6G zn~nnpQ;uN`wIh)h`ZF*8U7n%SH!D;n3MO|(LN$_UvcCdonib_k>nDwF0IzuIO7_G5{c-o=4LR~`w2HeP<$jh42p|_@ul%+D`-8!cOry;cw4zDs) zU+!ru;H{mefm8%Qeq;zqt1ledX4*=FTjan&)J)o2MOx>C$*gLC>Z9+-=*?+nEC2{H zYsrzcLlaIL+G67*D3SKodegi|w*ysiA&Qz$yB9kvjA9saq=4kda2hC0Kuw{wrn!7D z@l%lu(hl6EiL}-QJ|--P1ZPjA{T}yp?5>#@uZ<&5BC0J>G}ToF;|>d{$>j#vMA%zVwF%Zwt97?a*S= zq_$uK`uFTY%zP;{>A=bTr`O z*4Ty%z9&LJ;+mGR!reCyHzv?Dbb=~R$AYvwl-FA)l%O3oY-5SJ!wPJjAl`h{k;u-+ zf`)rs$4!gF0sii+tgJ{ItJ3=Bfh9vR@d8t51;Jihak&;H1szwJ7X7`pBCU&{gM}GJ zS(>>m@U_i#@UEQb+i;mZ<)+a>oi(tHA~1z|cL%*zm0=3DQNN!|ZB`6vpD;&&0fB{h zY+JQ0hX(34-8wGBMBxyoqNU}!YaDN6HX8ZBIOf3(3F z>~^w3w5HR#)hrM1j~Lc6oF&j`zb8zEy?UqF^O5$Cw5HKk9tw*Zrb(P-WJ)juB(Ts;)*C*zf--F{CP>rijM0tS}| zz;e@dKRYQgUytYWv-`nbr$KPD^gvn%g4t zy5!a~55FCZqoLx=P6OF>@GNFJ_Ej}Op=%ymD>f$`t{{n}(_nE#*uSO;>?Q14CTQKz zo`hup`}QjH5Q#}9_89_##feqjbEKlxzp?5K@%zT zOvq@%&}v}RVdQKA)I5E#AgPf;`XpW;psfY-#w$2M@ORF2@IV^;C;$*c(o-ZVug(&% zH2P)*OM_o4SqZY7SqCpt%n8VB{;zHd%2+xh2b9bNJ_ybK=%&``t})3pXgx5|utvCg zB79`?)GoeBGd7r(vza{~S&$qgC|(a{-4mS2ztRUb!8DV$7iwYVe{9^oX}aUA4z@*U zqr}LxxkRkwv+t?bstQ6CX;sFlMwr-L=-0hwD+aW4uz@JQ?_gV*2WSIekenU=Mf66W4*bEB*@O?OrY;AH*Cl}84oTv&H#Qqw*i5k3|HlYuPMnxGwSEU*VV z5f~1;mI)~p@J8?%d4QT@+njKiF@qQz- z6k07h{43eLg4Cch{SrPhX<4E0HqSP~JSKoi$~0VXp`~Vqy-QSOLBq8)PnJ=r;|4;~-+>s3=wn6?7ERb7Z#dl1t^e=^=IsGtRDdKds^cBX-Y ziRqnbrZ$pRd#lriPV)_c4w=yJnXt$X9Pv=1)U~R9!XE4iW*1O)QP?VB!G1ELAYc89 zS$j*OmF~-4S8=mrtfZvjNa{sdpe&aQ)p zE8k6I*X-!y>D;o!rHD-I#BqR=6=|C&EU*(v?c+)mXrwHFilN@GN=2OeHChT{^nX2Q9~p7S{nwHT5rSSW7=2wap(P3Wl$0vZQMxJRJd zhRn!X@V2X=5DkI-tFG-`gR9MC+SzefQA^v98oQn2s{T_+l@Si4E!fR|ds1wy&u0WH_i?KR3i?g827sG$VkJEJ*c3x0>2f6Fjak^oW6gDy0VvMs{iv>74x@ zG;}rhGx6IessYTjNc26Ah?X?%xD8}2<=3Ah=$ zlXjC9^()y}{n7LGdORROPXu)|x2hE|h@ktmWv?!R4tw!0;s!IJ6lFsin`f_8)iR+7 z>{Wv``v%xi3PiygnwE3h_hB6*?dIm~S)!fc?`)L=W8kaWErXS6NO93;!7u)YQVY*( zqLrHX5dy=2M5qQeUg&CY$i}Kn#YDXII^g++Vql=wNiS8E!vUc!z#WGHpp2lYr8!3l z0|_c23w%GB5ELn>03SP)5z>Q|W&#&N8@RRUd z9tJo;lqG0h?F0{W3lc5uTGc9#ak&A2QWac|6;Xz&l8^_%D`On&jdI9|dUGZb-qhfL zJvtZd;wu|42s#g-b_8w6=tCeDKomF#(J*Z@l`i+$Nrz+w%m+yjXOhA()3DO=NHbPe z6$Nbo2Vw_yi&DvlRqq3l9=8sj#QQXIGokV<c%;z`{m?vVZ-_^X1oXu}; z?!%rjfP)4Fx@Oae+m@NJqX$i4aN3B>{DOaOuqn<+a?K2IA0iS^$$;uQwyQLL{i*Id zaSbaOScnq2FGf9rsGtVJp{G?2HLimKo6o@TAuZd`;Rpt-+3pCLyIzo~df%+v{j73; z!ok(0!0kb$B|kVxo0{!qWCL;gSN_q;$$US0=xAvNGGFhfed5%#p|Kq& z(!?kLXo~RfP4}%W?O^7A7C^>5zo5q>fouaFRyeDq#w?W#n&O+SX>9HT*&0l;A^XV; zmmsxcIAXCQzL6tL<7TK51Cch*@AxhR7de;T%dS;*3*B)93W3ET3~PrYnXB3!DS2<*o{QSI<4=*nYq}2swEM%;n-eHPk-hnj?^kR5TpqGJOSc4Y7 z+sPnNfrhDza_i^(;RXpCCQ%1QW(c?vA=T9Od_>*mE?K?F2C5vI*~~`Iwkwkv4wSeD zog;ePd!t7Ql9eFU(mrW}^^xKd6@r(Qz9_eR)@K=36z0^V9n4M@&_MEI&n?Yu6Ma!S zjGNExKy8`b)(ntnY{qB@1#Mh|A2!6P{+70j@1Z37ASeof7>TlZpZ4x{Fl0Eksjtgh z;C91-jOl5RSRV-_2JZP7g~OGt#t5)(uLGU}xA`x@E zXf9vLfK`Ol^7bMCM!W&b!LPXKYPINXO?{1J9 zDX6J@54Jx^!-S$2{ALPXzS2)xnWl~8DgzVnK}wqT=+qN_HK* zzJe)pR_(@SY^4j~Q4Vpdk~a89wCie+SoD6Np#p-JxFZqmriQvl$_(R(WX$(%bPc3s zZ#WY-GHd+%e+eW`3Gm#h%c2cTnzev$f%i1!+UP z%le=oA!D~_G zz;we*9pyV#)(Q@&_wMG-y#;HFawwpRW_kxOyKS@~XaTXvm{3`AtOV@g8c9t+OzO9^ z-S>hS1t&7}WN1_hGqi19+E}g ztyko@>3s9^A3-ZaK|=%l4Kt+X?T78{#`lu}JO&>TWXBVxaq>&0kW=|&XPK_%eqVH3 z_U}!a10@|pm84nz-|@BAN9}%7EG!vAYS+w;Q;a*gnr9No$@uqRZv0zNSEp0$vr71_wK* z948tI$eCC3R0oEC?T=Q(NEKqd)SUQuK=MkobRB$92$7_H4jP(U%*&f{{{2~gH4{`7o9g(C=ow6i)Hpy4 z2eS1L0xo1y^6?4^uiXqx8=Cp?z0H32yq_>I7fcCpJ0|d+Y(0XWsF%HaCOr*gHk{(2 zBD0kmX$$0LI7Y%I+(TwJyKxRkCxufE$08&tM^*(K)*Vc9)@XKj*+0ojNO>t2rItt*N#*&Hzw}VWh z16cJLj@qdnj8E|yrp;^72<|UjCmJ({OE9gXJLGO_dp_EsfEg2*3U?QA8Fu=|cmSm9 z;BqIOhD!8p+HO`MeV~+&mh_APZ@|*L&gp*EkKC0 zJ5l{2jT%%qhmPikd&a0q{pYw=H3dns(G4oHALvrk@+AD#7iNa7>=Uc?OaFAaIxS^H zYM$-j8$D)cfifjJ5A>4@=kpK9cM#BJemm6zHx+EdypEHOWc4fV`IspnIzP9^*N?>E zPZ@i(!v!NSHSP?k{wv4=FPO7;E&wd z9tcrD(kDDgTa}hDR&A1OWpAbbxfu>Y+}+{QbPPV@6Tm8?;LD!L6u+?hwKQ$l$I^CY z{fwFV!IwLgxoaUKMXc<&Fs%`#e=4DsUD-rub=Vn~ri}#_eRk4UCiKg~0@rIJJ9a=m z{Kw;=9m3YY5<9_{cYKkvd3QegpXLT^`TL2)+td|ytV*+y$>JB-^RW<`+P>^3A{$O+ z1gWaDComJhYMiNu^)C%Q&&^-`yLW1>xU7Li4G)n3RlviaMF>W1T05)YRQ5*goVJa$ zDTM5vkF=V~cKgY+!L(Uah`g*$8%xvJo3M_EPn#8@>80iLf~rh7RzST4|D_p^ET|_I zBecFWJENnoVbcA(v{7JTtsZh!d%kshKDM>*#GCtgQ(Q^gI#aVRyU*5%nRW25ozHzD zi)l`-b6c5VEPw@ORt>nK2Gu<@zBK+-VNwfjwpC`-rzK~25+8gTgZdswFWi2>tBZmq z3*nc!(VPw&FnsmE{bcxOYnb-Ll}*(!7Nmh?HDAC1Dtgbyo|i_J(2!i;eu^*_T&ucv z9eifNjV#sxbhk86Q#xQHcAzUO>?Azv)7*tlvZrRapNyHCKjqK5G_Bq|{7NWjT5`|F zwwLDKYgKMFq3=$!F6~iRxaZ@SExQz}H60LCvErv!UzipPrFP72auSo0-1D*LrTs$7 zBuzRi*8uG$Z4vm{`B0|yM!27hmyLWEr#iQ1v;@BByuIsz@A=sD(tvzq$}`34G)FL{ zHb`2Y;P?8H_`w^S8~B|EvYYtEf>4Z7_*!y%S#%2``D?ek`)3AI+^U{!CH?b^O!n2k zXo%TVV77WQzS*(l=4w(iKiTh?agvZShEAhBc6Ry9$$41D$$c6g2TQH(g7TONAV5IE zD6^x{f{R97Lsr#xv|Bit2_63iY#H?<%`GlPN-NXNz*{=NSRg(Y)Ejj zL7?IxmUEXaHCnfy&dGisfmkDUcS|fJ*K>&7~tQ(k$hB&(;&(v4bVp$~@ ztc<5$NWBdMEeF-$)A?F6WWiWp`||mzz}{I=ijgRv2q|De7mJ~trpb60t$OmH(g$sc z$ECwocJeCJ(w{M7vke7iNVrr4HqlD%M23Zj;7B#V%B8mgov`OS%b+xqSN_>aEV6yV z`zc#F*~^>qX@l9!ap|Y~$s{K1ZNQ19K@YM0HtqCoI<{4$HF+4$sE|u(=UucTmIk!u zly$QrflF^qLq*{{0i^+H#6c6kssg)dTH-P%cd`l?sJS?(b*7=IoD(`pDy__JWtm+R zPOSYz$|Y&EJc9~uulus=T+waf{aNOA=K*C3mvrl;Z35cdQPgYFXlX+qu2dD-K|zfM z(%=5*QH5#9F_fVpy|qH>QCWbqY(E*&?TD32kKKI$j{uDj`eGN(j$Cr2+rjoFO5DE!hq z)7%gMR8Dqk-@(W?XjP#Rm!A5))%9{3)vw4SlA-{nVe?fL*g(@lkNKN<)0U{}QX1OZf>(<;u|M)#8e?SP4#4N9hEetmUl!v4}-_k0w^s^4K Any: + # we expect to get two valid on the dict x0, and x1 + url = body.get("url") + if not url: + raise ValueError("'url' entry not provided, expected http/s link to image") + + local_file = StorageManager.get_local_copy(remote_url=url) + image = Image.open(local_file) + image = ImageOps.grayscale(image).resize((28, 28)) + + return np.array(image).flatten() + + def postprocess(self, data: Any) -> dict: + # post process the data returned from the model inference engine + # data is the return value from model.predict we will put is inside a return value as Y + if not isinstance(data, np.ndarray): + # this should not happen + return dict(digit=-1) + + # data is returned as probability per class (10 class/digits) + return dict(digit=int(data.flatten().argmax())) diff --git a/examples/keras/readme.md b/examples/keras/readme.md new file mode 100644 index 0000000..185c1ca --- /dev/null +++ b/examples/keras/readme.md @@ -0,0 +1,46 @@ +# Train and Deploy Keras model with Nvidia Triton Engine + +## training mock model + +Run the mock python training code +```bash +python3 train_keras_mnist.py +``` + +The output will be a model created on the project "serving examples", by the name "train keras model" + +## setting up the serving service + +1. Create serving Service: `clearml-serving create --name "serving example"` (write down the service ID) +2. Create model endpoint: + `clearml-serving --id model add --engine triton --endpoint "test_model_keras" --preprocess "preprocess.py" --name "train keras model" --project "serving examples" --input-size 1 784 --input-name "dense_input" --input-type float32 --output-size -1 10 --output-name "activation_2" --output-type float32 +` +Or auto update +`clearml-serving --id model auto-update --engine triton --endpoint "test_model_auto" --preprocess "preprocess.py" --name "train keras model" --project "serving examples" --max-versions 2 + --input-size 1 784 --input-name "dense_input" --input-type float32 + --output-size -1 10 --output-name "activation_2" --output-type float32 +` +Or add Canary endpoint +`clearml-serving --id model canary --endpoint "test_model_auto" --weights 0.1 0.9 --input-endpoint-prefix test_model_auto` + +3. Run the Triton Engine `docker run -v ~/clearml.conf:/root/clearml.conf -p 8001:8001 -e CLEARML_SERVING_TASK_ID= clearml-serving-triton:latest` +4. Configure the Triton Engine IP on the Serving Service (if running on k8s, the gRPC ingest of the triton container) +`clearml-serving --id config --triton-grpc-server :8001` +5. Run the clearml-serving container `docker run -v ~/clearml.conf:/root/clearml.conf -p 8001:8001 -e CLEARML_SERVING_TASK_ID= clearml-serving:latest` +6. Test new endpoint: `curl -X POST "http://127.0.0.1:8080/serve/test_model_keras" -H "accept: application/json" -H "Content-Type: application/json" -d '{"url": "https://camo.githubusercontent.com/8385ca52c9cba1f6e629eb938ab725ec8c9449f12db81f9a34e18208cd328ce9/687474703a2f2f706574722d6d6172656b2e636f6d2f77702d636f6e74656e742f75706c6f6164732f323031372f30372f6465636f6d707265737365642e6a7067"}'` + +> **_Notice:_** You can also change the serving service while it is already running! +This includes adding/removing endpoints, adding canary model routing etc. + + +### Running / debugging the serving service manually +Once you have setup the Serving Service Task + +```bash +$ pip3 install -r clearml_serving/serving/requirements.txt +$ CLEARML_SERVING_TASK_ID= PYHTONPATH=$(pwd) python3 -m gunicorn \ + --preload clearml_serving.serving.main:app \ + --workers 4 \ + --worker-class uvicorn.workers.UvicornWorker \ + --bind 0.0.0.0:8080 +``` diff --git a/examples/keras/requirements.txt b/examples/keras/requirements.txt index 68a3593..d2043d9 100644 --- a/examples/keras/requirements.txt +++ b/examples/keras/requirements.txt @@ -1,2 +1,3 @@ tensorflow>=2.0 clearml +PIL \ No newline at end of file diff --git a/examples/keras/keras_mnist.py b/examples/keras/train_keras_mnist.py similarity index 75% rename from examples/keras/keras_mnist.py rename to examples/keras/train_keras_mnist.py index 90972ec..05ada4e 100644 --- a/examples/keras/keras_mnist.py +++ b/examples/keras/train_keras_mnist.py @@ -48,41 +48,6 @@ class TensorBoardImage(TensorBoard): self.writer.add_summary(summary, epoch) -def create_config_pbtxt(model, config_pbtxt_file): - platform = "tensorflow_savedmodel" - input_name = model.input_names[0] - output_name = model.output_names[0] - input_data_type = "TYPE_FP32" - output_data_type = "TYPE_FP32" - input_dims = str(model.input.shape.as_list()).replace("None", "-1") - output_dims = str(model.output.shape.as_list()).replace("None", "-1") - - config_pbtxt = """ - platform: "%s" - input [ - { - name: "%s" - data_type: %s - dims: %s - } - ] - output [ - { - name: "%s" - data_type: %s - dims: %s - } - ] - """ % ( - platform, - input_name, input_data_type, input_dims, - output_name, output_data_type, output_dims - ) - - with open(config_pbtxt_file, "w") as config_file: - config_file.write(config_pbtxt) - - def main(): parser = argparse.ArgumentParser(description='Keras MNIST Example - training CNN classification model') parser.add_argument('--batch-size', type=int, default=128, help='input batch size for training (default: 128)') @@ -126,7 +91,7 @@ def main(): # Connecting ClearML with the current process, # from here on everything is logged automatically - task = Task.init(project_name='examples', task_name='Keras MNIST serve example', output_uri=True) + task = Task.init(project_name='serving examples', task_name='train keras model', output_uri=True) # Advanced: setting model class enumeration labels = dict(('digit_%d' % i, i) for i in range(10)) @@ -155,12 +120,6 @@ def main(): # store the model in a format that can be served model.save('serving_model', include_optimizer=False) - # create the config.pbtxt for triton to be able to serve the model - create_config_pbtxt(model=model, config_pbtxt_file='config.pbtxt') - # store the configuration on the creating Task, - # this will allow us to skip over manually setting the config.pbtxt for `clearml-serving` - task.connect_configuration(configuration=Path('config.pbtxt'), name='config.pbtxt') - print('Test score: {}'.format(score[0])) print('Test accuracy: {}'.format(score[1])) diff --git a/examples/lightgbm/preprocess.py b/examples/lightgbm/preprocess.py new file mode 100644 index 0000000..5d7ebe7 --- /dev/null +++ b/examples/lightgbm/preprocess.py @@ -0,0 +1,23 @@ +from typing import Any + +import numpy as np + + +# Notice Preprocess class Must be named "Preprocess" +class Preprocess(object): + def __init__(self): + # set internal state, this will be called only once. (i.e. not per request) + pass + + def preprocess(self, body: dict) -> Any: + # we expect to get four valid numbers on the dict: x0, x1, x2, x3 + return np.array( + [[body.get("x0", None), body.get("x1", None), body.get("x2", None), body.get("x3", None)], ], + dtype=np.float32 + ) + + def postprocess(self, data: Any) -> dict: + # post process the data returned from the model inference engine + # data is the return value from model.predict we will put is inside a return value as Y + # we pick the most probably class and return the class index (argmax) + return dict(y=int(np.argmax(data)) if isinstance(data, np.ndarray) else data) diff --git a/examples/lightgbm/readme.md b/examples/lightgbm/readme.md new file mode 100644 index 0000000..274dcce --- /dev/null +++ b/examples/lightgbm/readme.md @@ -0,0 +1,42 @@ +# Train and Deploy LightGBM model + +## training mock model + +Run the mock python training code +```bash +python3 train_model.py +``` + +The output will be a model created on the project "serving examples", by the name "train lightgbm model" + +## setting up the serving service + +1. Create serving Service: `clearml-serving create --name "serving example"` (write down the service ID) + +2. Create model endpoint: + +3. `clearml-serving --id model add --engine lightgbm --endpoint "test_model_lgbm" --preprocess "preprocess.py" --name "train lightgbm model" --project "serving examples"` +Or auto-update +`clearml-serving --id model auto-update --engine lightgbm --endpoint "test_model_auto" --preprocess "preprocess.py" --name "train lightgbm model" --project "serving examples" --max-versions 2` +Or add Canary endpoint +`clearml-serving --id model canary --endpoint "test_model_auto" --weights 0.1 0.9 --input-endpoint-prefix test_model_auto` + +4. Run the clearml-serving container `docker run -v ~/clearml.conf:/root/clearml.conf -p 8080:8080 -e CLEARML_SERVING_TASK_ID= clearml-serving:latest` + +5. Test new endpoint: `curl -X POST "http://127.0.0.1:8080/serve/test_model_lgbm" -H "accept: application/json" -H "Content-Type: application/json" -d '{"x0": 1, "x1": 2, "x2": 3, "x3": 4}'` + +> **_Notice:_** You can also change the serving service while it is already running! +This includes adding/removing endpoints, adding canary model routing etc. + + +### Running / debugging the serving service manually +Once you have setup the Serving Service Task + +```bash +$ pip3 install -r clearml_serving/serving/requirements.txt +$ CLEARML_SERVING_TASK_ID= PYHTONPATH=$(pwd) python3 -m gunicorn \ + --preload clearml_serving.serving.main:app \ + --workers 4 \ + --worker-class uvicorn.workers.UvicornWorker \ + --bind 0.0.0.0:8080 +``` diff --git a/examples/lightgbm/train_model.py b/examples/lightgbm/train_model.py new file mode 100644 index 0000000..ab378d9 --- /dev/null +++ b/examples/lightgbm/train_model.py @@ -0,0 +1,22 @@ +import lightgbm as lgb +from sklearn.datasets import load_iris +from sklearn.model_selection import train_test_split + +from clearml import Task + +task = Task.init(project_name="serving examples", task_name="train lightgbm model", output_uri=True) + +iris = load_iris() +y = iris['target'] +X = iris['data'] +X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1) +dtrain = lgb.Dataset(X_train, label=y_train) + +params = { + 'objective': 'multiclass', + 'metric': 'softmax', + 'num_class': 3 +} +lgb_model = lgb.train(params=params, train_set=dtrain) + +lgb_model.save_model("lgbm_model") diff --git a/examples/pytorch/preprocess.py b/examples/pytorch/preprocess.py new file mode 100644 index 0000000..8a80002 --- /dev/null +++ b/examples/pytorch/preprocess.py @@ -0,0 +1,35 @@ +from typing import Any + +import numpy as np +from PIL import Image, ImageOps + + +# Notice Preprocess class Must be named "Preprocess" +from clearml import StorageManager + + +class Preprocess(object): + def __init__(self): + # set internal state, this will be called only once. (i.e. not per request) + pass + + def preprocess(self, body: dict) -> Any: + # we expect to get two valid on the dict x0, and x1 + url = body.get("url") + if not url: + raise ValueError("'url' entry not provided, expected http/s link to image") + + local_file = StorageManager.get_local_copy(remote_url=url) + image = Image.open(local_file) + image = ImageOps.grayscale(image).resize((28, 28)) + return np.array(image).flatten() + + def postprocess(self, data: Any) -> dict: + # post process the data returned from the model inference engine + # data is the return value from model.predict we will put is inside a return value as Y + if not isinstance(data, np.ndarray): + # this should not happen + return dict(digit=-1) + + # data is returned as probability per class (10 class/digits) + return dict(digit=int(data.flatten().argmax())) diff --git a/examples/pytorch/readme.md b/examples/pytorch/readme.md new file mode 100644 index 0000000..db89495 --- /dev/null +++ b/examples/pytorch/readme.md @@ -0,0 +1,49 @@ +# Train and Deploy Keras model with Nvidia Triton Engine + +## training mock model + +Run the mock python training code +```bash +python3 train_pytorch_mnist.py +``` + +The output will be a model created on the project "serving examples", by the name "train pytorch model" +*Notice* Only TorchScript models are supported by Triton server + +## setting up the serving service + +1. Create serving Service: `clearml-serving create --name "serving example"` (write down the service ID) +2. Create model endpoint: +`clearml-serving --id model add --engine triton --endpoint "test_model_pytorch" --preprocess "preprocess.py" --name "train pytorch model" --project "serving examples" + --input-size 28 28 1 --input-name "INPUT__0" --input-type float32 + --output-size -1 10 --output-name "OUTPUT__0" --output-type float32 +` +Or auto update +`clearml-serving --id model auto-update --engine triton --endpoint "test_model_pytorch_auto" --preprocess "preprocess.py" --name "train pytorch model" --project "serving examples" --max-versions 2 + --input-size 28 28 1 --input-name "INPUT__0" --input-type float32 + --output-size -1 10 --output-name "OUTPUT__0" --output-type float32 +` +Or add Canary endpoint +`clearml-serving --id model canary --endpoint "test_model_pytorch_auto" --weights 0.1 0.9 --input-endpoint-prefix test_model_pytorch_auto` + +3. Run the Triton Engine `docker run -v ~/clearml.conf:/root/clearml.conf -p 8001:8001 -e CLEARML_SERVING_TASK_ID= clearml-serving-triton:latest` +4. Configure the Triton Engine IP on the Serving Service (if running on k8s, the gRPC ingest of the triton container) +`clearml-serving --id config --triton-grpc-server :8001` +5. Run the clearml-serving container `docker run -v ~/clearml.conf:/root/clearml.conf -p 8001:8001 -e CLEARML_SERVING_TASK_ID= clearml-serving:latest` +6. Test new endpoint: `curl -X POST "http://127.0.0.1:8080/serve/test_model_pytorch" -H "accept: application/json" -H "Content-Type: application/json" -d '{"url": "https://camo.githubusercontent.com/8385ca52c9cba1f6e629eb938ab725ec8c9449f12db81f9a34e18208cd328ce9/687474703a2f2f706574722d6d6172656b2e636f6d2f77702d636f6e74656e742f75706c6f6164732f323031372f30372f6465636f6d707265737365642e6a7067"}'` + +> **_Notice:_** You can also change the serving service while it is already running! +This includes adding/removing endpoints, adding canary model routing etc. + + +### Running / debugging the serving service manually +Once you have setup the Serving Service Task + +```bash +$ pip3 install -r clearml_serving/serving/requirements.txt +$ CLEARML_SERVING_TASK_ID= PYHTONPATH=$(pwd) python3 -m gunicorn \ + --preload clearml_serving.serving.main:app \ + --workers 4 \ + --worker-class uvicorn.workers.UvicornWorker \ + --bind 0.0.0.0:8080 +``` diff --git a/examples/pytorch/requirements.txt b/examples/pytorch/requirements.txt new file mode 100644 index 0000000..878ee74 --- /dev/null +++ b/examples/pytorch/requirements.txt @@ -0,0 +1,5 @@ +torchvision +torch +clearml +PIL +setuptools<58 diff --git a/examples/pytorch/train_pytorch_mnist.py b/examples/pytorch/train_pytorch_mnist.py new file mode 100644 index 0000000..e03aca6 --- /dev/null +++ b/examples/pytorch/train_pytorch_mnist.py @@ -0,0 +1,142 @@ +# ClearML - Example of pytorch with tensorboard>=v1.14 +# +from __future__ import print_function + +import argparse +import os +from tempfile import gettempdir + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from torchvision import datasets, transforms +from torch.autograd import Variable +from torch.utils.tensorboard import SummaryWriter + +from clearml import Task, OutputModel + + +class Net(nn.Module): + + def __init__(self): + super(Net, self).__init__() + self.conv1 = nn.Conv2d(1, 10, kernel_size=5) + self.conv2 = nn.Conv2d(10, 20, kernel_size=5) + self.conv2_drop = nn.Dropout2d() + self.fc1 = nn.Linear(320, 50) + self.fc2 = nn.Linear(50, 10) + + def forward(self, x): + x = F.relu(F.max_pool2d(self.conv1(x), 2)) + x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) + x = x.view(-1, 320) + x = F.relu(self.fc1(x)) + x = F.dropout(x, training=self.training) + x = self.fc2(x) + return F.log_softmax(x, dim=1) + + +def train(model, epoch, train_loader, args, optimizer, writer): + model.train() + for batch_idx, (data, target) in enumerate(train_loader): + if args.cuda: + data, target = data.cuda(), target.cuda() + data, target = Variable(data), Variable(target) + optimizer.zero_grad() + output = model(data) + loss = F.nll_loss(output, target) + loss.backward() + optimizer.step() + if batch_idx % args.log_interval == 0: + print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( + epoch, batch_idx * len(data), len(train_loader.dataset), + 100. * batch_idx / len(train_loader), loss.data.item())) + niter = epoch*len(train_loader)+batch_idx + writer.add_scalar('Train/Loss', loss.data.item(), niter) + + +def test(model, test_loader, args, optimizer, writer): + model.eval() + test_loss = 0 + correct = 0 + for niter, (data, target) in enumerate(test_loader): + if args.cuda: + data, target = data.cuda(), target.cuda() + data, target = Variable(data), Variable(target) + output = model(data) + test_loss += F.nll_loss(output, target, reduction='sum').data.item() # sum up batch loss + pred = output.data.max(1)[1] # get the index of the max log-probability + pred = pred.eq(target.data).cpu().sum() + writer.add_scalar('Test/Loss', pred, niter) + correct += pred + if niter % 100 == 0: + writer.add_image('test', data[0, :, :, :], niter) + + test_loss /= len(test_loader.dataset) + print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( + test_loss, correct, len(test_loader.dataset), + 100. * correct / len(test_loader.dataset))) + + +def main(): + # Training settings + parser = argparse.ArgumentParser(description='PyTorch MNIST Example') + parser.add_argument('--batch-size', type=int, default=64, metavar='N', + help='input batch size for training (default: 64)') + parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N', + help='input batch size for testing (default: 1000)') + parser.add_argument('--epochs', type=int, default=10, metavar='N', + help='number of epochs to train (default: 10)') + parser.add_argument('--lr', type=float, default=0.01, metavar='LR', + help='learning rate (default: 0.01)') + parser.add_argument('--momentum', type=float, default=0.5, metavar='M', + help='SGD momentum (default: 0.5)') + parser.add_argument('--no-cuda', action='store_true', default=False, + help='disables CUDA training') + parser.add_argument('--seed', type=int, default=1, metavar='S', + help='random seed (default: 1)') + parser.add_argument('--log-interval', type=int, default=10, metavar='N', + help='how many batches to wait before logging training status') + args = parser.parse_args() + + # Connecting ClearML with the current process, + # from here on everything is logged automatically + task = Task.init(project_name='serving examples', task_name='train pytorch model', output_uri=True) # noqa: F841 + writer = SummaryWriter('runs') + writer.add_text('TEXT', 'This is some text', 0) + args.cuda = not args.no_cuda and torch.cuda.is_available() + + torch.manual_seed(args.seed) + if args.cuda: + torch.cuda.manual_seed(args.seed) + + kwargs = {'num_workers': 4, 'pin_memory': True} if args.cuda else {} + train_loader = torch.utils.data.DataLoader(datasets.MNIST('./data', train=True, download=True, + transform=transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,))])), + batch_size=args.batch_size, shuffle=True, **kwargs) + test_loader = torch.utils.data.DataLoader(datasets.MNIST('./data', train=False, + transform=transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,))])), + batch_size=args.test_batch_size, shuffle=True, **kwargs) + + model = Net() + if args.cuda: + model.cuda() + + optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum) + + for epoch in range(1, args.epochs + 1): + train(model, epoch, train_loader, args, optimizer, writer) + + # store in a way we can easily load into triton without having to have the model class + torch.jit.script(model).save('serving_model.pt') + OutputModel().update_weights('serving_model.pt') + test(model, test_loader, args, optimizer, writer) + + +if __name__ == "__main__": + main() diff --git a/examples/sklearn/preprocess.py b/examples/sklearn/preprocess.py new file mode 100644 index 0000000..079299a --- /dev/null +++ b/examples/sklearn/preprocess.py @@ -0,0 +1,19 @@ +from typing import Any + +import numpy as np + + +# Notice Preprocess class Must be named "Preprocess" +class Preprocess(object): + def __init__(self): + # set internal state, this will be called only once. (i.e. not per request) + pass + + def preprocess(self, body: dict) -> Any: + # we expect to get two valid on the dict x0, and x1 + return [[body.get("x0", None), body.get("x1", None)], ] + + def postprocess(self, data: Any) -> dict: + # post process the data returned from the model inference engine + # data is the return value from model.predict we will put is inside a return value as Y + return dict(y=data.tolist() if isinstance(data, np.ndarray) else data) diff --git a/examples/sklearn/readme.md b/examples/sklearn/readme.md new file mode 100644 index 0000000..9fcdb40 --- /dev/null +++ b/examples/sklearn/readme.md @@ -0,0 +1,39 @@ +# Train and Deploy Scikit-Learn model + +## training mock model + +Run the mock python training code +```bash +python3 train_model.py +``` + +The output will be a model created on the project "serving examples", by the name "train sklearn model" + +## setting up the serving service + +1. Create serving Service: `clearml-serving create --name "serving example"` (write down the service ID) +2. Create model endpoint: +`clearml-serving --id model add --engine sklearn --endpoint "test_model_sklearn" --preprocess "preprocess.py" --name "train sklearn model" --project "serving examples"` +Or auto update +`clearml-serving --id model auto-update --engine sklearn --endpoint "test_model_sklearn_auto" --preprocess "preprocess.py" --name "train sklearn model" --project "serving examples" --max-versions 2` +Or add Canary endpoint +`clearml-serving --id model canary --endpoint "test_model_sklearn_auto" --weights 0.1 0.9 --input-endpoint-prefix test_model_sklearn_auto` + +4. Run the clearml-serving container `docker run -v ~/clearml.conf:/root/clearml.conf -p 8080:8080 -e CLEARML_SERVING_TASK_ID= clearml-serving:latest` +5. Test new endpoint: `curl -X POST "http://127.0.0.1:8080/serve/test_model_sklearn" -H "accept: application/json" -H "Content-Type: application/json" -d '{"x0": 1, "x1": 2}'` + +> **_Notice:_** You can also change the serving service while it is already running! +This includes adding/removing endpoints, adding canary model routing etc. + + +### Running / debugging the serving service manually +Once you have setup the Serving Service Task + +```bash +$ pip3 install -r clearml_serving/serving/requirements.txt +$ CLEARML_SERVING_TASK_ID= PYHTONPATH=$(pwd) python3 -m gunicorn \ + --preload clearml_serving.serving.main:app \ + --workers 4 \ + --worker-class uvicorn.workers.UvicornWorker \ + --bind 0.0.0.0:8080 +``` diff --git a/examples/sklearn/train_model.py b/examples/sklearn/train_model.py new file mode 100644 index 0000000..94edb00 --- /dev/null +++ b/examples/sklearn/train_model.py @@ -0,0 +1,15 @@ +from sklearn.linear_model import LogisticRegression +from sklearn.datasets import make_blobs +from joblib import dump +from clearml import Task + +task = Task.init(project_name="serving examples", task_name="train sklearn model", output_uri=True) + +# generate 2d classification dataset +X, y = make_blobs(n_samples=100, centers=2, n_features=2, random_state=1) +# fit final model +model = LogisticRegression() +model.fit(X, y) + +dump(model, filename="sklearn-model.pkl", compress=9) + diff --git a/examples/xgboost/preprocess.py b/examples/xgboost/preprocess.py new file mode 100644 index 0000000..48acf33 --- /dev/null +++ b/examples/xgboost/preprocess.py @@ -0,0 +1,21 @@ +from typing import Any + +import numpy as np +import xgboost as xgb + + +# Notice Preprocess class Must be named "Preprocess" +class Preprocess(object): + def __init__(self): + # set internal state, this will be called only once. (i.e. not per request) + pass + + def preprocess(self, body: dict) -> Any: + # we expect to get four valid numbers on the dict: x0, x1, x2, x3 + return xgb.DMatrix( + [[body.get("x0", None), body.get("x1", None), body.get("x2", None), body.get("x3", None)]]) + + def postprocess(self, data: Any) -> dict: + # post process the data returned from the model inference engine + # data is the return value from model.predict we will put is inside a return value as Y + return dict(y=data.tolist() if isinstance(data, np.ndarray) else data) diff --git a/examples/xgboost/readme.md b/examples/xgboost/readme.md new file mode 100644 index 0000000..faee82a --- /dev/null +++ b/examples/xgboost/readme.md @@ -0,0 +1,40 @@ +# Train and Deploy XGBoost model + +## training mock model + +Run the mock python training code +```bash +python3 train_model.py +``` + +The output will be a model created on the project "serving examples", by the name "train xgboost model" + +## setting up the serving service + +1. Create serving Service: `clearml-serving create --name "serving example"` (write down the service ID) +2. Create model endpoint: + +3. `clearml-serving --id model add --engine xgboost --endpoint "test_model_xgb" --preprocess "preprocess.py" --name "train xgboost model" --project "serving examples"` +Or auto update +`clearml-serving --id model auto-update --engine xgboost --endpoint "test_model_xgb_auto" --preprocess "preprocess.py" --name "train xgboost model" --project "serving examples" --max-versions 2` +Or add Canary endpoint +`clearml-serving --id model canary --endpoint "test_model_xgb_auto" --weights 0.1 0.9 --input-endpoint-prefix test_model_xgb_auto` + +4. Run the clearml-serving container `docker run -v ~/clearml.conf:/root/clearml.conf -p 8080:8080 -e CLEARML_SERVING_TASK_ID= clearml-serving:latest` +5. Test new endpoint: `curl -X POST "http://127.0.0.1:8080/serve/test_model_xgb" -H "accept: application/json" -H "Content-Type: application/json" -d '{"x0": 1, "x1": 2, "x2": 3, "x3": 4}'` + +> **_Notice:_** You can also change the serving service while it is already running! +This includes adding/removing endpoints, adding canary model routing etc. + + +### Running / debugging the serving service manually +Once you have setup the Serving Service Task + +```bash +$ pip3 install -r clearml_serving/serving/requirements.txt +$ CLEARML_SERVING_TASK_ID= PYHTONPATH=$(pwd) python3 -m gunicorn \ + --preload clearml_serving.serving.main:app \ + --workers 4 \ + --worker-class uvicorn.workers.UvicornWorker \ + --bind 0.0.0.0:8080 +``` diff --git a/examples/xgboost/train_model.py b/examples/xgboost/train_model.py new file mode 100644 index 0000000..cb91cf3 --- /dev/null +++ b/examples/xgboost/train_model.py @@ -0,0 +1,28 @@ +import xgboost as xgb +from sklearn.datasets import load_iris +from sklearn.model_selection import train_test_split + +from clearml import Task + +task = Task.init(project_name="serving examples", task_name="train xgboost model", output_uri=True) + +X, y = load_iris(return_X_y=True) +X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=0.2, random_state=100 +) + +dtrain = xgb.DMatrix(X_train, label=y_train) +dtest = xgb.DMatrix(X_test, label=y_test) + +params = {"objective": "reg:squarederror", "eval_metric": "rmse"} + + +bst = xgb.train( + params, + dtrain, + num_boost_round=100, + evals=[(dtrain, "train"), (dtest, "test")], + verbose_eval=0, +) + +bst.save_model("xgb_model") diff --git a/requirements.txt b/requirements.txt index 7c60ed3..a37514d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1 +1 @@ -clearml >= 0.17.6rc1 +clearml >= 1.1.6 From 7052bab2d73673296102baade5a1bfb783890e3c Mon Sep 17 00:00:00 2001 From: Allegro AI <51604379+allegroai-git@users.noreply.github.com> Date: Sun, 6 Mar 2022 01:29:59 +0200 Subject: [PATCH 02/19] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index f325a4c..13781ac 100644 --- a/README.md +++ b/README.md @@ -61,7 +61,7 @@ Features: * Customizable * Open Source - + ## Installation From 0fb51c42d1c823bf503227af9714746357054fa0 Mon Sep 17 00:00:00 2001 From: Allegro AI <51604379+allegroai-git@users.noreply.github.com> Date: Sun, 6 Mar 2022 01:31:00 +0200 Subject: [PATCH 03/19] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 13781ac..da9d6d3 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ **ClearML Serving - Model deployment made easy** -## **`clearml-serving`
Model Serving (ML/DL), Orchestration and Repository Made Easy** +## **`clearml-serving`
Model Serving (ML/DL) Orchestration and Repository Made Easy** [![GitHub license](https://img.shields.io/github/license/allegroai/clearml-serving.svg)](https://img.shields.io/github/license/allegroai/clearml-serving.svg) From 59c8d343ead061eabf34f8bfc70d06edbe2c770d Mon Sep 17 00:00:00 2001 From: Allegro AI <51604379+allegroai-git@users.noreply.github.com> Date: Sun, 6 Mar 2022 01:39:59 +0200 Subject: [PATCH 04/19] Update README.md --- README.md | 34 ++++++++++++++++++---------------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/README.md b/README.md index da9d6d3..7566ff7 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ **ClearML Serving - Model deployment made easy** -## **`clearml-serving`
Model Serving (ML/DL) Orchestration and Repository Made Easy** +## **`clearml-serving`
:sparkles: Model Serving (ML/DL) Made Easy :tada:** [![GitHub license](https://img.shields.io/github/license/allegroai/clearml-serving.svg)](https://img.shields.io/github/license/allegroai/clearml-serving.svg) @@ -26,17 +26,17 @@ Features: * Support Machine Learning Models (Scikit Learn, XGBoost, LightGBM) * Support Deep Learning Models (Tensorflow, PyTorch, ONNX) * Customizable RestAPI for serving (i.e. allow per model pre/post-processing for easy integration) -* Flexibility +* Flexible * On-line model deployment * On-line endpoint model/version deployment (i.e. no need to take the service down) * Per model standalone preprocessing and postprocessing python code -* Scalability +* Scalable * Multi model per container * Multi models per serving service * Multi-service support (fully seperated multiple serving service running independently) * Multi cluster support * Out-of-the-box node auto-scaling based on load/usage -* Efficiency +* Efficient * multi-container resource utilization * Support for CPU & GPU nodes * Auto-batching for DL models @@ -55,11 +55,7 @@ Features: ### ClearML Serving Design Principles -* Modular -* Scalable -* Flexible -* Customizable -* Open Source +**Modular** , **Scalable** , **Flexible** , **Customizable** , **Open Source** @@ -67,13 +63,19 @@ Features: ### Concepts -CLI - Secure configuration interface for on-line model upgrade/deployment on running Serving Services -Serving Service Task - Control plane object storing configuration on all the endpoints. Support multiple separated instance, deployed on multiple clusters. -Inference Services - Inference containers, performing model serving pre/post processing. Also support CPU model inferencing. -Serving Engine Services - Inference engine containers (e.g. Nvidia Triton, TorchServe etc.) used by the Inference Services for heavier model inference. -Statistics Service - Single instance per Serving Service collecting and broadcasting model serving & performance statistics -Time-series DB - Statistics collection service used by the Statistics Service, e.g. Prometheus -Dashboard Service - Customizable dashboard-ing solution on top of the collected statistics, e.g. Grafana +**CLI** - Secure configuration interface for on-line model upgrade/deployment on running Serving Services + +**Serving Service Task** - Control plane object storing configuration on all the endpoints. Support multiple separated instance, deployed on multiple clusters. + +**Inference Services** - Inference containers, performing model serving pre/post processing. Also support CPU model inferencing. + +**Serving Engine Services** - Inference engine containers (e.g. Nvidia Triton, TorchServe etc.) used by the Inference Services for heavier model inference. + +**Statistics Service** - Single instance per Serving Service collecting and broadcasting model serving & performance statistics + +**Time-series DB** - Statistics collection service used by the Statistics Service, e.g. Prometheus + +**Dashboards** - Customizable dashboard-ing solution on top of the collected statistics, e.g. Grafana ### prerequisites From 78436106f5ae4134b8b5295d9c2a0cc855708ee7 Mon Sep 17 00:00:00 2001 From: Allegro AI <51604379+allegroai-git@users.noreply.github.com> Date: Sun, 6 Mar 2022 01:45:29 +0200 Subject: [PATCH 05/19] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 7566ff7..e2a11f3 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ **ClearML Serving - Model deployment made easy** -## **`clearml-serving`
:sparkles: Model Serving (ML/DL) Made Easy :tada:** +## **`clearml-serving v2.0`
:sparkles: Model Serving (ML/DL) Made Easy :tada:** [![GitHub license](https://img.shields.io/github/license/allegroai/clearml-serving.svg)](https://img.shields.io/github/license/allegroai/clearml-serving.svg) From 451e335ceb047e82dd1be26358660bd0d75c7b5b Mon Sep 17 00:00:00 2001 From: allegroai Date: Sun, 6 Mar 2022 02:05:52 +0200 Subject: [PATCH 06/19] Add missing requirements --- README.md | 2 +- .../engines/triton/requirements.txt | 2 +- clearml_serving/serving/requirements.txt | 4 +++- examples/keras/readme.md | 22 +++++-------------- examples/lightgbm/readme.md | 22 +++++-------------- examples/lightgbm/requirements.txt | 3 +++ examples/pytorch/readme.md | 21 +++++------------- examples/sklearn/readme.md | 22 +++++-------------- examples/sklearn/requirements.txt | 2 ++ examples/xgboost/readme.md | 22 +++++-------------- examples/xgboost/requirements.txt | 3 +++ 11 files changed, 38 insertions(+), 87 deletions(-) create mode 100644 examples/lightgbm/requirements.txt create mode 100644 examples/sklearn/requirements.txt create mode 100644 examples/xgboost/requirements.txt diff --git a/README.md b/README.md index e2a11f3..3678f1a 100644 --- a/README.md +++ b/README.md @@ -248,7 +248,7 @@ Example: - `curl -X POST "http://127.0.0.1:8080/serve/test_model" -H "accept: application/json" -H "Content-Type: application/json" -d '{"x0": 1, "x1": 2}'` -### Model inference Examples +### Model Serving Examples - Scikit-Learn [example](examples/sklearn/readme.md) - random data - XGBoost [example](examples/xgboost/readme.md) - iris dataset diff --git a/clearml_serving/engines/triton/requirements.txt b/clearml_serving/engines/triton/requirements.txt index aec1e2f..4f45b00 100644 --- a/clearml_serving/engines/triton/requirements.txt +++ b/clearml_serving/engines/triton/requirements.txt @@ -1,6 +1,6 @@ clearml >= 1.1.6 clearml-serving -tritonclient +tritonclient[grpc] grpcio Pillow pathlib2 \ No newline at end of file diff --git a/clearml_serving/serving/requirements.txt b/clearml_serving/serving/requirements.txt index 281b0f8..2bacc7a 100644 --- a/clearml_serving/serving/requirements.txt +++ b/clearml_serving/serving/requirements.txt @@ -11,4 +11,6 @@ numpy pandas scikit-learn grpcio -Pillow \ No newline at end of file +Pillow +xgboost +lightgbm diff --git a/examples/keras/readme.md b/examples/keras/readme.md index 185c1ca..96a6a99 100644 --- a/examples/keras/readme.md +++ b/examples/keras/readme.md @@ -1,10 +1,11 @@ # Train and Deploy Keras model with Nvidia Triton Engine -## training mock model +## training mnist digit classifier model Run the mock python training code ```bash -python3 train_keras_mnist.py +pip install -r examples/keras/requirements.txt +python examples/keras/train_keras_mnist.py ``` The output will be a model created on the project "serving examples", by the name "train keras model" @@ -13,10 +14,10 @@ The output will be a model created on the project "serving examples", by the nam 1. Create serving Service: `clearml-serving create --name "serving example"` (write down the service ID) 2. Create model endpoint: - `clearml-serving --id model add --engine triton --endpoint "test_model_keras" --preprocess "preprocess.py" --name "train keras model" --project "serving examples" --input-size 1 784 --input-name "dense_input" --input-type float32 --output-size -1 10 --output-name "activation_2" --output-type float32 + `clearml-serving --id model add --engine triton --endpoint "test_model_keras" --preprocess "examples/keras/preprocess.py" --name "train keras model" --project "serving examples" --input-size 1 784 --input-name "dense_input" --input-type float32 --output-size -1 10 --output-name "activation_2" --output-type float32 ` Or auto update -`clearml-serving --id model auto-update --engine triton --endpoint "test_model_auto" --preprocess "preprocess.py" --name "train keras model" --project "serving examples" --max-versions 2 +`clearml-serving --id model auto-update --engine triton --endpoint "test_model_auto" --preprocess "examples/keras/preprocess.py" --name "train keras model" --project "serving examples" --max-versions 2 --input-size 1 784 --input-name "dense_input" --input-type float32 --output-size -1 10 --output-name "activation_2" --output-type float32 ` @@ -31,16 +32,3 @@ Or add Canary endpoint > **_Notice:_** You can also change the serving service while it is already running! This includes adding/removing endpoints, adding canary model routing etc. - - -### Running / debugging the serving service manually -Once you have setup the Serving Service Task - -```bash -$ pip3 install -r clearml_serving/serving/requirements.txt -$ CLEARML_SERVING_TASK_ID= PYHTONPATH=$(pwd) python3 -m gunicorn \ - --preload clearml_serving.serving.main:app \ - --workers 4 \ - --worker-class uvicorn.workers.UvicornWorker \ - --bind 0.0.0.0:8080 -``` diff --git a/examples/lightgbm/readme.md b/examples/lightgbm/readme.md index 274dcce..3b5656e 100644 --- a/examples/lightgbm/readme.md +++ b/examples/lightgbm/readme.md @@ -1,10 +1,11 @@ # Train and Deploy LightGBM model -## training mock model +## training iris classifier model Run the mock python training code ```bash -python3 train_model.py +pip install -r examples/lightgbm/requirements.txt +python examples/lightgbm/train_model.py ``` The output will be a model created on the project "serving examples", by the name "train lightgbm model" @@ -15,9 +16,9 @@ The output will be a model created on the project "serving examples", by the nam 2. Create model endpoint: -3. `clearml-serving --id model add --engine lightgbm --endpoint "test_model_lgbm" --preprocess "preprocess.py" --name "train lightgbm model" --project "serving examples"` +3. `clearml-serving --id model add --engine lightgbm --endpoint "test_model_lgbm" --preprocess "examples/lightgbm/preprocess.py" --name "train lightgbm model" --project "serving examples"` Or auto-update -`clearml-serving --id model auto-update --engine lightgbm --endpoint "test_model_auto" --preprocess "preprocess.py" --name "train lightgbm model" --project "serving examples" --max-versions 2` +`clearml-serving --id model auto-update --engine lightgbm --endpoint "test_model_auto" --preprocess "examples/lightgbm/preprocess.py" --name "train lightgbm model" --project "serving examples" --max-versions 2` Or add Canary endpoint `clearml-serving --id model canary --endpoint "test_model_auto" --weights 0.1 0.9 --input-endpoint-prefix test_model_auto` @@ -27,16 +28,3 @@ Or add Canary endpoint > **_Notice:_** You can also change the serving service while it is already running! This includes adding/removing endpoints, adding canary model routing etc. - - -### Running / debugging the serving service manually -Once you have setup the Serving Service Task - -```bash -$ pip3 install -r clearml_serving/serving/requirements.txt -$ CLEARML_SERVING_TASK_ID= PYHTONPATH=$(pwd) python3 -m gunicorn \ - --preload clearml_serving.serving.main:app \ - --workers 4 \ - --worker-class uvicorn.workers.UvicornWorker \ - --bind 0.0.0.0:8080 -``` diff --git a/examples/lightgbm/requirements.txt b/examples/lightgbm/requirements.txt new file mode 100644 index 0000000..ddc5c29 --- /dev/null +++ b/examples/lightgbm/requirements.txt @@ -0,0 +1,3 @@ +clearml >= 1.1.6 +lightgbm + diff --git a/examples/pytorch/readme.md b/examples/pytorch/readme.md index db89495..926472c 100644 --- a/examples/pytorch/readme.md +++ b/examples/pytorch/readme.md @@ -1,10 +1,11 @@ # Train and Deploy Keras model with Nvidia Triton Engine -## training mock model +## training mnist digit classifier model Run the mock python training code ```bash -python3 train_pytorch_mnist.py +pip install -r examples/pytorch/requirements.txt +python examples/pytorch/train_pytorch_mnist.py ``` The output will be a model created on the project "serving examples", by the name "train pytorch model" @@ -14,12 +15,12 @@ The output will be a model created on the project "serving examples", by the nam 1. Create serving Service: `clearml-serving create --name "serving example"` (write down the service ID) 2. Create model endpoint: -`clearml-serving --id model add --engine triton --endpoint "test_model_pytorch" --preprocess "preprocess.py" --name "train pytorch model" --project "serving examples" +`clearml-serving --id model add --engine triton --endpoint "test_model_pytorch" --preprocess "examples/pytorch/preprocess.py" --name "train pytorch model" --project "serving examples" --input-size 28 28 1 --input-name "INPUT__0" --input-type float32 --output-size -1 10 --output-name "OUTPUT__0" --output-type float32 ` Or auto update -`clearml-serving --id model auto-update --engine triton --endpoint "test_model_pytorch_auto" --preprocess "preprocess.py" --name "train pytorch model" --project "serving examples" --max-versions 2 +`clearml-serving --id model auto-update --engine triton --endpoint "test_model_pytorch_auto" --preprocess "examples/pytorch/preprocess.py" --name "train pytorch model" --project "serving examples" --max-versions 2 --input-size 28 28 1 --input-name "INPUT__0" --input-type float32 --output-size -1 10 --output-name "OUTPUT__0" --output-type float32 ` @@ -35,15 +36,3 @@ Or add Canary endpoint > **_Notice:_** You can also change the serving service while it is already running! This includes adding/removing endpoints, adding canary model routing etc. - -### Running / debugging the serving service manually -Once you have setup the Serving Service Task - -```bash -$ pip3 install -r clearml_serving/serving/requirements.txt -$ CLEARML_SERVING_TASK_ID= PYHTONPATH=$(pwd) python3 -m gunicorn \ - --preload clearml_serving.serving.main:app \ - --workers 4 \ - --worker-class uvicorn.workers.UvicornWorker \ - --bind 0.0.0.0:8080 -``` diff --git a/examples/sklearn/readme.md b/examples/sklearn/readme.md index 9fcdb40..ae4908a 100644 --- a/examples/sklearn/readme.md +++ b/examples/sklearn/readme.md @@ -1,10 +1,11 @@ # Train and Deploy Scikit-Learn model -## training mock model +## training mock logistic regression model Run the mock python training code ```bash -python3 train_model.py +pip install -r examples/sklearn/requirements.txt +python examples/sklearn/train_model.py ``` The output will be a model created on the project "serving examples", by the name "train sklearn model" @@ -13,9 +14,9 @@ The output will be a model created on the project "serving examples", by the nam 1. Create serving Service: `clearml-serving create --name "serving example"` (write down the service ID) 2. Create model endpoint: -`clearml-serving --id model add --engine sklearn --endpoint "test_model_sklearn" --preprocess "preprocess.py" --name "train sklearn model" --project "serving examples"` +`clearml-serving --id model add --engine sklearn --endpoint "test_model_sklearn" --preprocess "examples/sklearn/preprocess.py" --name "train sklearn model" --project "serving examples"` Or auto update -`clearml-serving --id model auto-update --engine sklearn --endpoint "test_model_sklearn_auto" --preprocess "preprocess.py" --name "train sklearn model" --project "serving examples" --max-versions 2` +`clearml-serving --id model auto-update --engine sklearn --endpoint "test_model_sklearn_auto" --preprocess "examples/sklearn/preprocess.py" --name "train sklearn model" --project "serving examples" --max-versions 2` Or add Canary endpoint `clearml-serving --id model canary --endpoint "test_model_sklearn_auto" --weights 0.1 0.9 --input-endpoint-prefix test_model_sklearn_auto` @@ -24,16 +25,3 @@ Or add Canary endpoint > **_Notice:_** You can also change the serving service while it is already running! This includes adding/removing endpoints, adding canary model routing etc. - - -### Running / debugging the serving service manually -Once you have setup the Serving Service Task - -```bash -$ pip3 install -r clearml_serving/serving/requirements.txt -$ CLEARML_SERVING_TASK_ID= PYHTONPATH=$(pwd) python3 -m gunicorn \ - --preload clearml_serving.serving.main:app \ - --workers 4 \ - --worker-class uvicorn.workers.UvicornWorker \ - --bind 0.0.0.0:8080 -``` diff --git a/examples/sklearn/requirements.txt b/examples/sklearn/requirements.txt new file mode 100644 index 0000000..eb862f7 --- /dev/null +++ b/examples/sklearn/requirements.txt @@ -0,0 +1,2 @@ +clearml >= 1.1.6 +scikit-learn diff --git a/examples/xgboost/readme.md b/examples/xgboost/readme.md index faee82a..00b054f 100644 --- a/examples/xgboost/readme.md +++ b/examples/xgboost/readme.md @@ -1,10 +1,11 @@ # Train and Deploy XGBoost model -## training mock model +## training iris classifier model Run the mock python training code ```bash -python3 train_model.py +pip install -r examples/xgboost/requirements.txt +python examples/xgboost/train_model.py ``` The output will be a model created on the project "serving examples", by the name "train xgboost model" @@ -14,9 +15,9 @@ The output will be a model created on the project "serving examples", by the nam 1. Create serving Service: `clearml-serving create --name "serving example"` (write down the service ID) 2. Create model endpoint: -3. `clearml-serving --id model add --engine xgboost --endpoint "test_model_xgb" --preprocess "preprocess.py" --name "train xgboost model" --project "serving examples"` +3. `clearml-serving --id model add --engine xgboost --endpoint "test_model_xgb" --preprocess "examples/xgboost/preprocess.py" --name "train xgboost model" --project "serving examples"` Or auto update -`clearml-serving --id model auto-update --engine xgboost --endpoint "test_model_xgb_auto" --preprocess "preprocess.py" --name "train xgboost model" --project "serving examples" --max-versions 2` +`clearml-serving --id model auto-update --engine xgboost --endpoint "test_model_xgb_auto" --preprocess "examples/xgboost/preprocess.py" --name "train xgboost model" --project "serving examples" --max-versions 2` Or add Canary endpoint `clearml-serving --id model canary --endpoint "test_model_xgb_auto" --weights 0.1 0.9 --input-endpoint-prefix test_model_xgb_auto` @@ -25,16 +26,3 @@ Or add Canary endpoint > **_Notice:_** You can also change the serving service while it is already running! This includes adding/removing endpoints, adding canary model routing etc. - - -### Running / debugging the serving service manually -Once you have setup the Serving Service Task - -```bash -$ pip3 install -r clearml_serving/serving/requirements.txt -$ CLEARML_SERVING_TASK_ID= PYHTONPATH=$(pwd) python3 -m gunicorn \ - --preload clearml_serving.serving.main:app \ - --workers 4 \ - --worker-class uvicorn.workers.UvicornWorker \ - --bind 0.0.0.0:8080 -``` diff --git a/examples/xgboost/requirements.txt b/examples/xgboost/requirements.txt new file mode 100644 index 0000000..0b0fe4b --- /dev/null +++ b/examples/xgboost/requirements.txt @@ -0,0 +1,3 @@ +clearml >= 1.1.6 +xgboost + From 3560159de0b4b8bb5687edc5b38991d5a79ce17b Mon Sep 17 00:00:00 2001 From: Allegro AI <51604379+allegroai-git@users.noreply.github.com> Date: Sun, 6 Mar 2022 02:10:30 +0200 Subject: [PATCH 07/19] Update readme.md --- examples/sklearn/readme.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/examples/sklearn/readme.md b/examples/sklearn/readme.md index ae4908a..7e7acf7 100644 --- a/examples/sklearn/readme.md +++ b/examples/sklearn/readme.md @@ -15,13 +15,17 @@ The output will be a model created on the project "serving examples", by the nam 1. Create serving Service: `clearml-serving create --name "serving example"` (write down the service ID) 2. Create model endpoint: `clearml-serving --id model add --engine sklearn --endpoint "test_model_sklearn" --preprocess "examples/sklearn/preprocess.py" --name "train sklearn model" --project "serving examples"` + Or auto update + `clearml-serving --id model auto-update --engine sklearn --endpoint "test_model_sklearn_auto" --preprocess "examples/sklearn/preprocess.py" --name "train sklearn model" --project "serving examples" --max-versions 2` + Or add Canary endpoint + `clearml-serving --id model canary --endpoint "test_model_sklearn_auto" --weights 0.1 0.9 --input-endpoint-prefix test_model_sklearn_auto` -4. Run the clearml-serving container `docker run -v ~/clearml.conf:/root/clearml.conf -p 8080:8080 -e CLEARML_SERVING_TASK_ID= clearml-serving:latest` -5. Test new endpoint: `curl -X POST "http://127.0.0.1:8080/serve/test_model_sklearn" -H "accept: application/json" -H "Content-Type: application/json" -d '{"x0": 1, "x1": 2}'` +3. Run the clearml-serving container `docker run -v ~/clearml.conf:/root/clearml.conf -p 8080:8080 -e CLEARML_SERVING_TASK_ID= clearml-serving:latest` +4. Test new endpoint: `curl -X POST "http://127.0.0.1:8080/serve/test_model_sklearn" -H "accept: application/json" -H "Content-Type: application/json" -d '{"x0": 1, "x1": 2}'` > **_Notice:_** You can also change the serving service while it is already running! This includes adding/removing endpoints, adding canary model routing etc. From b35100568a836e1c8f3a7d2263129c4df87e5117 Mon Sep 17 00:00:00 2001 From: Allegro AI <51604379+allegroai-git@users.noreply.github.com> Date: Sun, 6 Mar 2022 02:10:55 +0200 Subject: [PATCH 08/19] Update readme.md --- examples/xgboost/readme.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/examples/xgboost/readme.md b/examples/xgboost/readme.md index 00b054f..52d39ca 100644 --- a/examples/xgboost/readme.md +++ b/examples/xgboost/readme.md @@ -16,9 +16,13 @@ The output will be a model created on the project "serving examples", by the nam 2. Create model endpoint: 3. `clearml-serving --id model add --engine xgboost --endpoint "test_model_xgb" --preprocess "examples/xgboost/preprocess.py" --name "train xgboost model" --project "serving examples"` + Or auto update + `clearml-serving --id model auto-update --engine xgboost --endpoint "test_model_xgb_auto" --preprocess "examples/xgboost/preprocess.py" --name "train xgboost model" --project "serving examples" --max-versions 2` + Or add Canary endpoint + `clearml-serving --id model canary --endpoint "test_model_xgb_auto" --weights 0.1 0.9 --input-endpoint-prefix test_model_xgb_auto` 4. Run the clearml-serving container `docker run -v ~/clearml.conf:/root/clearml.conf -p 8080:8080 -e CLEARML_SERVING_TASK_ID= clearml-serving:latest` From a49fa0321e8cbdd95976ad4e1a3b1b27daa9757e Mon Sep 17 00:00:00 2001 From: Allegro AI <51604379+allegroai-git@users.noreply.github.com> Date: Sun, 6 Mar 2022 02:12:02 +0200 Subject: [PATCH 09/19] Update readme.md --- examples/pytorch/readme.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/examples/pytorch/readme.md b/examples/pytorch/readme.md index 926472c..2ea4a26 100644 --- a/examples/pytorch/readme.md +++ b/examples/pytorch/readme.md @@ -15,16 +15,20 @@ The output will be a model created on the project "serving examples", by the nam 1. Create serving Service: `clearml-serving create --name "serving example"` (write down the service ID) 2. Create model endpoint: + `clearml-serving --id model add --engine triton --endpoint "test_model_pytorch" --preprocess "examples/pytorch/preprocess.py" --name "train pytorch model" --project "serving examples" --input-size 28 28 1 --input-name "INPUT__0" --input-type float32 --output-size -1 10 --output-name "OUTPUT__0" --output-type float32 ` + Or auto update + `clearml-serving --id model auto-update --engine triton --endpoint "test_model_pytorch_auto" --preprocess "examples/pytorch/preprocess.py" --name "train pytorch model" --project "serving examples" --max-versions 2 --input-size 28 28 1 --input-name "INPUT__0" --input-type float32 - --output-size -1 10 --output-name "OUTPUT__0" --output-type float32 -` + --output-size -1 10 --output-name "OUTPUT__0" --output-type float32` + Or add Canary endpoint + `clearml-serving --id model canary --endpoint "test_model_pytorch_auto" --weights 0.1 0.9 --input-endpoint-prefix test_model_pytorch_auto` 3. Run the Triton Engine `docker run -v ~/clearml.conf:/root/clearml.conf -p 8001:8001 -e CLEARML_SERVING_TASK_ID= clearml-serving-triton:latest` From f2f0c07c387ee58142883e2c864402bf28ea9637 Mon Sep 17 00:00:00 2001 From: Allegro AI <51604379+allegroai-git@users.noreply.github.com> Date: Sun, 6 Mar 2022 02:12:33 +0200 Subject: [PATCH 10/19] Update readme.md --- examples/lightgbm/readme.md | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/examples/lightgbm/readme.md b/examples/lightgbm/readme.md index 3b5656e..701c409 100644 --- a/examples/lightgbm/readme.md +++ b/examples/lightgbm/readme.md @@ -16,15 +16,19 @@ The output will be a model created on the project "serving examples", by the nam 2. Create model endpoint: -3. `clearml-serving --id model add --engine lightgbm --endpoint "test_model_lgbm" --preprocess "examples/lightgbm/preprocess.py" --name "train lightgbm model" --project "serving examples"` +`clearml-serving --id model add --engine lightgbm --endpoint "test_model_lgbm" --preprocess "examples/lightgbm/preprocess.py" --name "train lightgbm model" --project "serving examples"` + Or auto-update + `clearml-serving --id model auto-update --engine lightgbm --endpoint "test_model_auto" --preprocess "examples/lightgbm/preprocess.py" --name "train lightgbm model" --project "serving examples" --max-versions 2` + Or add Canary endpoint + `clearml-serving --id model canary --endpoint "test_model_auto" --weights 0.1 0.9 --input-endpoint-prefix test_model_auto` -4. Run the clearml-serving container `docker run -v ~/clearml.conf:/root/clearml.conf -p 8080:8080 -e CLEARML_SERVING_TASK_ID= clearml-serving:latest` +3. Run the clearml-serving container `docker run -v ~/clearml.conf:/root/clearml.conf -p 8080:8080 -e CLEARML_SERVING_TASK_ID= clearml-serving:latest` -5. Test new endpoint: `curl -X POST "http://127.0.0.1:8080/serve/test_model_lgbm" -H "accept: application/json" -H "Content-Type: application/json" -d '{"x0": 1, "x1": 2, "x2": 3, "x3": 4}'` +4. Test new endpoint: `curl -X POST "http://127.0.0.1:8080/serve/test_model_lgbm" -H "accept: application/json" -H "Content-Type: application/json" -d '{"x0": 1, "x1": 2, "x2": 3, "x3": 4}'` > **_Notice:_** You can also change the serving service while it is already running! This includes adding/removing endpoints, adding canary model routing etc. From d84829bb8aadd93b934c78add255c86295e2ccf2 Mon Sep 17 00:00:00 2001 From: Allegro AI <51604379+allegroai-git@users.noreply.github.com> Date: Sun, 6 Mar 2022 02:14:33 +0200 Subject: [PATCH 11/19] Update readme.md --- examples/keras/readme.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/examples/keras/readme.md b/examples/keras/readme.md index 96a6a99..f8a6904 100644 --- a/examples/keras/readme.md +++ b/examples/keras/readme.md @@ -14,14 +14,18 @@ The output will be a model created on the project "serving examples", by the nam 1. Create serving Service: `clearml-serving create --name "serving example"` (write down the service ID) 2. Create model endpoint: + `clearml-serving --id model add --engine triton --endpoint "test_model_keras" --preprocess "examples/keras/preprocess.py" --name "train keras model" --project "serving examples" --input-size 1 784 --input-name "dense_input" --input-type float32 --output-size -1 10 --output-name "activation_2" --output-type float32 ` + Or auto update + `clearml-serving --id model auto-update --engine triton --endpoint "test_model_auto" --preprocess "examples/keras/preprocess.py" --name "train keras model" --project "serving examples" --max-versions 2 --input-size 1 784 --input-name "dense_input" --input-type float32 - --output-size -1 10 --output-name "activation_2" --output-type float32 -` + --output-size -1 10 --output-name "activation_2" --output-type float32` + Or add Canary endpoint + `clearml-serving --id model canary --endpoint "test_model_auto" --weights 0.1 0.9 --input-endpoint-prefix test_model_auto` 3. Run the Triton Engine `docker run -v ~/clearml.conf:/root/clearml.conf -p 8001:8001 -e CLEARML_SERVING_TASK_ID= clearml-serving-triton:latest` From a7eb36383c9c2ba93124b52d9f3860b472f0e4a1 Mon Sep 17 00:00:00 2001 From: Allegro AI <51604379+allegroai-git@users.noreply.github.com> Date: Sun, 6 Mar 2022 02:22:11 +0200 Subject: [PATCH 12/19] Update README.md --- README.md | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 3678f1a..1bf268e 100644 --- a/README.md +++ b/README.md @@ -61,7 +61,7 @@ Features: ## Installation -### Concepts +### :information_desk_person: Concepts **CLI** - Secure configuration interface for on-line model upgrade/deployment on running Serving Services @@ -84,7 +84,7 @@ Features: * CLI : Configuration & model deployment interface -### Initial Setup +### :nail_care: Initial Setup 1. Setup your [**ClearML Server**](https://github.com/allegroai/clearml-server) or use the [Free tier Hosting](https://app.community.clear.ml) 2. Install the CLI on your laptop `clearml` and `clearml-serving` @@ -95,7 +95,7 @@ Features: - The new serving service UID should be printed `"New Serving Service created: id=aa11bb22aa11bb22` 4. Write down the Serving Service UID -### Toy model (scikit learn) deployment example +### :point_right: Toy model (scikit learn) deployment example 1. Train toy scikit-learn model - create new python virtual environment @@ -124,7 +124,7 @@ Features: > To learn more on training models and the ClearML model repository, see the [ClearML documentation](https://clear.ml/docs) -### Nvidia Triton serving engine setup +### :muscle: Nvidia Triton serving engine setup Nvidia Triton Serving Engine is used by clearml-serving to do the heavy lifting of deep-learning models on both GPU & CPU nodes. Inside the Triton container a clearml controller is spinning and monitoring the Triton server. @@ -151,7 +151,7 @@ Now eny model that will register with "Triton" engine, will run the pre/post pro See Tensorflow [example](examples/keras/readme.md) and Pytorch [example](examples/pytorch/readme.md) for further details. -### Container Configuration Variables +### :ocean: Container Configuration Variables When spinning the Inference container or the Triton Engine container, we need to specify the `clearml-server` address and access credentials @@ -178,7 +178,7 @@ AZURE_STORAGE_ACCOUNT AZURE_STORAGE_KEY ``` -### Registering & Deploying new models manually +### :turtle: Registering & Deploying new models manually Uploading an existing model file into the model repository can be done via the `clearml` RestAPI, the python interface, or with the `clearml-serving` CLI @@ -196,7 +196,7 @@ Uploading an existing model file into the model repository can be done via the ` `--destination="s3://bucket/folder"`, `gs://bucket/folder`, `azure://bucket/folder`. Yhere is no need to provide a unique path tp the destination argument, the location of the model will be a unique path based on the serving service ID and the model name -### Automatic model deployment +### :rabbit: Automatic model deployment The clearml Serving Service support automatic model deployment and upgrades, directly connected with the model repository and API. When the model auto-deploy is configured, a new model versions will be automatically deployed when you "publish" or "tag" a new model in the `clearml` model repository. This automation interface allows for simpler CI/CD model deployment process, as a single API automatically deploy (or remove) a model from the Serving Service. @@ -214,9 +214,9 @@ from clearml import Model Model(model_id="unique_model_id_here").publish() ``` 4. The new model is available on a new endpoint version (1), test with: -`curl -X POST "http://127.0.0.1:8080/serve/test_model_sklearn_auto/1" -H "accept: application/json" -H "Content-Type: application/json" -d '{"x0": 1, "x1": 2}' +`curl -X POST "http://127.0.0.1:8080/serve/test_model_sklearn_auto/1" -H "accept: application/json" -H "Content-Type: application/json" -d '{"x0": 1, "x1": 2}'` -### Canary endpoint setup +### :bird: Canary endpoint setup Canary endpoint deployment add a new endpoint where the actual request is sent to a preconfigured set of endpoints with pre-provided distribution. For example, let's create a new endpoint "test_model_sklearn_canary", we can provide a list of endpoints and probabilities (weights). @@ -248,7 +248,7 @@ Example: - `curl -X POST "http://127.0.0.1:8080/serve/test_model" -H "accept: application/json" -H "Content-Type: application/json" -d '{"x0": 1, "x1": 2}'` -### Model Serving Examples +### :fire: Model Serving Examples - Scikit-Learn [example](examples/sklearn/readme.md) - random data - XGBoost [example](examples/xgboost/readme.md) - iris dataset @@ -256,7 +256,7 @@ Example: - PyTorch [example](examples/pytorch/readme.md) - mnist dataset - TensorFlow/Keras [example](examples/keras/readme.md) - mnist dataset -### Status +### :pray: Status - [x] FastAPI integration for inference service - [x] multi-process Gunicorn for inference service From 34e5a0b2c80ffaa541f5531bcd98a6b3c21369fd Mon Sep 17 00:00:00 2001 From: allegroai Date: Sun, 6 Mar 2022 03:14:06 +0200 Subject: [PATCH 13/19] Fix routing --- clearml_serving/serving/main.py | 3 +++ examples/pytorch/readme.md | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/clearml_serving/serving/main.py b/clearml_serving/serving/main.py index 96f8127..2b67327 100644 --- a/clearml_serving/serving/main.py +++ b/clearml_serving/serving/main.py @@ -79,6 +79,9 @@ router = APIRouter( ) +# cover all routing options for model version `/{model_id}`, `/{model_id}/123`, `/{model_id}?version=123` +@router.post("/{model_id}/{version}") +@router.post("/{model_id}/") @router.post("/{model_id}") async def serve_model(model_id: str, version: Optional[str] = None, request: Dict[Any, Any] = None): try: diff --git a/examples/pytorch/readme.md b/examples/pytorch/readme.md index 2ea4a26..13d4579 100644 --- a/examples/pytorch/readme.md +++ b/examples/pytorch/readme.md @@ -17,14 +17,14 @@ The output will be a model created on the project "serving examples", by the nam 2. Create model endpoint: `clearml-serving --id model add --engine triton --endpoint "test_model_pytorch" --preprocess "examples/pytorch/preprocess.py" --name "train pytorch model" --project "serving examples" - --input-size 28 28 1 --input-name "INPUT__0" --input-type float32 + --input-size 1 28 28 --input-name "INPUT__0" --input-type float32 --output-size -1 10 --output-name "OUTPUT__0" --output-type float32 ` Or auto update `clearml-serving --id model auto-update --engine triton --endpoint "test_model_pytorch_auto" --preprocess "examples/pytorch/preprocess.py" --name "train pytorch model" --project "serving examples" --max-versions 2 - --input-size 28 28 1 --input-name "INPUT__0" --input-type float32 + --input-size 1 28 28 --input-name "INPUT__0" --input-type float32 --output-size -1 10 --output-name "OUTPUT__0" --output-type float32` Or add Canary endpoint From d684169367c98ddaa761e9c6a1f0732a56283baa Mon Sep 17 00:00:00 2001 From: allegroai Date: Wed, 9 Mar 2022 04:02:03 +0200 Subject: [PATCH 14/19] Add model ensemble and model pipelines support --- README.md | 6 +- clearml_serving/__main__.py | 15 +-- .../engines/triton/triton_helper.py | 12 ++- clearml_serving/preprocess/example.py | 40 -------- .../preprocess/preprocess_template.py | 66 +++++++++++++ clearml_serving/serving/endpoints.py | 75 ++++++++++++++ clearml_serving/serving/main.py | 4 +- .../serving/model_request_processor.py | 38 ++++--- clearml_serving/serving/preprocess_service.py | 98 +++++++------------ clearml_serving/serving/requirements.txt | 1 + examples/ensemble/preprocess.py | 19 ++++ examples/ensemble/readme.md | 31 ++++++ examples/ensemble/requirements.txt | 2 + examples/ensemble/train_ensemble.py | 23 +++++ examples/pipeline/preprocess.py | 32 ++++++ examples/pipeline/readme.md | 26 +++++ 16 files changed, 346 insertions(+), 142 deletions(-) delete mode 100644 clearml_serving/preprocess/example.py create mode 100644 clearml_serving/preprocess/preprocess_template.py create mode 100644 clearml_serving/serving/endpoints.py create mode 100644 examples/ensemble/preprocess.py create mode 100644 examples/ensemble/readme.md create mode 100644 examples/ensemble/requirements.txt create mode 100644 examples/ensemble/train_ensemble.py create mode 100644 examples/pipeline/preprocess.py create mode 100644 examples/pipeline/readme.md diff --git a/README.md b/README.md index 1bf268e..e5a0fd8 100644 --- a/README.md +++ b/README.md @@ -251,10 +251,12 @@ Example: ### :fire: Model Serving Examples - Scikit-Learn [example](examples/sklearn/readme.md) - random data +- Scikit-Learn Model Ensemble [example](examples/ensemble/readme.md) - random data - XGBoost [example](examples/xgboost/readme.md) - iris dataset - LightGBM [example](examples/lightgbm/readme.md) - iris dataset - PyTorch [example](examples/pytorch/readme.md) - mnist dataset - TensorFlow/Keras [example](examples/keras/readme.md) - mnist dataset +- Model Pipeline [example](examples/pipeline/readme.md) - random data ### :pray: Status @@ -279,8 +281,8 @@ Example: - [x] LightGBM example - [x] PyTorch example - [x] TensorFlow/Keras example - - [ ] Model ensemble example - - [ ] Model pipeline example + - [x] Model ensemble example + - [x] Model pipeline example - [ ] Statistics Service - [ ] Kafka install instructions - [ ] Prometheus install instructions diff --git a/clearml_serving/__main__.py b/clearml_serving/__main__.py index af41569..2a50347 100644 --- a/clearml_serving/__main__.py +++ b/clearml_serving/__main__.py @@ -4,7 +4,7 @@ from argparse import ArgumentParser from pathlib import Path from clearml_serving.serving.model_request_processor import ModelRequestProcessor, CanaryEP -from clearml_serving.serving.preprocess_service import ModelMonitoring, ModelEndpoint +from clearml_serving.serving.endpoints import ModelMonitoring, ModelEndpoint verbosity = False @@ -92,8 +92,8 @@ def func_model_remove(args): elif request_processor.remove_canary_endpoint(endpoint_url=args.endpoint): print("Removing model canary endpoint: {}".format(args.endpoint)) else: - print("Error: Could not find base endpoint URL: {}".format(args.endpoint)) - return + raise ValueError("Could not find base endpoint URL: {}".format(args.endpoint)) + print("Updating serving service") request_processor.serialize() @@ -111,8 +111,7 @@ def func_canary_add(args): load_endpoint_prefix=args.input_endpoint_prefix, ) ): - print("Error: Could not add canary endpoint URL: {}".format(args.endpoint)) - return + raise ValueError("Could not add canary endpoint URL: {}".format(args.endpoint)) print("Updating serving service") request_processor.serialize() @@ -152,7 +151,8 @@ def func_model_auto_update_add(args): ), preprocess_code=args.preprocess ): - print("Error: Could not find base endpoint URL: {}".format(args.endpoint)) + raise ValueError("Could not find base endpoint URL: {}".format(args.endpoint)) + print("Updating serving service") request_processor.serialize() @@ -192,7 +192,8 @@ def func_model_endpoint_add(args): model_tags=args.tags or None, model_published=args.published, ): - print("Error: Could not find base endpoint URL: {}".format(args.endpoint)) + raise ValueError("Could not find base endpoint URL: {}".format(args.endpoint)) + print("Updating serving service") request_processor.serialize() diff --git a/clearml_serving/engines/triton/triton_helper.py b/clearml_serving/engines/triton/triton_helper.py index 5371a19..cf31957 100644 --- a/clearml_serving/engines/triton/triton_helper.py +++ b/clearml_serving/engines/triton/triton_helper.py @@ -2,17 +2,18 @@ import os import re import shutil import subprocess -import numpy as np from argparse import ArgumentParser from time import time from typing import Optional -from pathlib2 import Path - +import numpy as np from clearml import Task, Logger, InputModel from clearml.backend_api.utils import get_http_session_with_retry -from clearml_serving.serving.model_request_processor import ModelRequestProcessor, ModelEndpoint from clearml.utilities.pyhocon import ConfigFactory, ConfigTree, HOCONConverter +from pathlib2 import Path + +from clearml_serving.serving.endpoints import ModelEndpoint +from clearml_serving.serving.model_request_processor import ModelRequestProcessor class TritonHelper(object): @@ -268,6 +269,7 @@ class TritonHelper(object): Full spec available here: https://github.com/triton-inference-server/server/blob/main/docs/model_configuration.md """ + def _convert_lists(config): if isinstance(config, list): return [_convert_lists(i) for i in config] @@ -346,7 +348,7 @@ class TritonHelper(object): if config_dict: config_dict = _convert_lists(config_dict) # Convert HOCON standard to predefined message format - config_pbtxt = "\n" + HOCONConverter.to_hocon(config_dict).\ + config_pbtxt = "\n" + HOCONConverter.to_hocon(config_dict). \ replace("=", ":").replace(" : ", ": ") # conform types (remove string quotes) if input_type: diff --git a/clearml_serving/preprocess/example.py b/clearml_serving/preprocess/example.py deleted file mode 100644 index b09cfdd..0000000 --- a/clearml_serving/preprocess/example.py +++ /dev/null @@ -1,40 +0,0 @@ -from typing import Any, Optional - -import numpy as np - - -# Notice Preprocess class Must be named "Preprocess" -class Preprocess(object): - serving_config = None - # example: { - # 'base_serving_url': 'http://127.0.0.1:8080/serve/', - # 'triton_grpc_server': '127.0.0.1:9001', - # }" - - def __init__(self): - # set internal state, this will be called only once. (i.e. not per request) - pass - - def load(self, local_file_name: str) -> Optional[Any]: - """ - Optional, provide loading method for the model - useful if we need to load a model in a specific way for the prediction engine to work - :param local_file_name: file name / path to read load the model from - :return: Object that will be called with .predict() method for inference - """ - pass - - def preprocess(self, body: dict) -> Any: - # do something with the request data, return any type of object. - # The returned object will be passed as is to the inference engine - return body - - def postprocess(self, data: Any) -> dict: - # post process the data returned from the model inference engine - # returned dict will be passed back as the request result as is. - return data - - def process(self, data: Any) -> Any: - # do something with the actual data, return any type of object. - # The returned object will be passed as is to the postprocess function engine - return data diff --git a/clearml_serving/preprocess/preprocess_template.py b/clearml_serving/preprocess/preprocess_template.py new file mode 100644 index 0000000..769ca5f --- /dev/null +++ b/clearml_serving/preprocess/preprocess_template.py @@ -0,0 +1,66 @@ +from typing import Any, Optional + + +# Notice Preprocess class Must be named "Preprocess" +# Otherwise there are No limitations, No need to inherit or to implement all methods +class Preprocess(object): + serving_config = None + # example: { + # 'base_serving_url': 'http://127.0.0.1:8080/serve/', + # 'triton_grpc_server': '127.0.0.1:9001', + # }" + + def __init__(self): + # set internal state, this will be called only once. (i.e. not per request) + pass + + def load(self, local_file_name: str) -> Optional[Any]: # noqa + """ + Optional, provide loading method for the model + useful if we need to load a model in a specific way for the prediction engine to work + :param local_file_name: file name / path to read load the model from + :return: Object that will be called with .predict() method for inference + """ + pass + + def preprocess(self, body: dict) -> Any: # noqa + """ + do something with the request data, return any type of object. + The returned object will be passed as is to the inference engine + """ + return body + + def postprocess(self, data: Any) -> dict: # noqa + """ + post process the data returned from the model inference engine + returned dict will be passed back as the request result as is. + """ + return data + + def process(self, data: Any) -> Any: # noqa + """ + do something with the actual data, return any type of object. + The returned object will be passed as is to the postprocess function engine + """ + return data + + def send_request( # noqa + self, + endpoint: str, + version: Optional[str] = None, + data: Optional[dict] = None + ) -> Optional[dict]: + """ + NOTICE: This method will be replaced in runtime, by the inference service + + Helper method to send model inference requests to the inference service itself. + This is designed to help with model ensemble, model pipelines, etc. + On request error return None, otherwise the request result data dictionary + + Usage example: + + >>> x0, x1 = 1, 2 + >>> result = self.send_request(endpoint="test_model_sklearn", version="1", data={"x0": x0, "x1": x1}) + >>> y = result["y"] + """ + return None diff --git a/clearml_serving/serving/endpoints.py b/clearml_serving/serving/endpoints.py new file mode 100644 index 0000000..e1cd9d4 --- /dev/null +++ b/clearml_serving/serving/endpoints.py @@ -0,0 +1,75 @@ +import numpy as np +from attr import attrib, attrs, asdict + + +def _engine_validator(inst, attr, value): # noqa + from .preprocess_service import BasePreprocessRequest + if not BasePreprocessRequest.validate_engine_type(value): + raise TypeError("{} not supported engine type".format(value)) + + +def _matrix_type_validator(inst, attr, value): # noqa + if value and not np.dtype(value): + raise TypeError("{} not supported matrix type".format(value)) + + +@attrs +class ModelMonitoring(object): + base_serving_url = attrib(type=str) # serving point url prefix (example: "detect_cat") + engine_type = attrib(type=str, validator=_engine_validator) # engine type + monitor_project = attrib(type=str, default=None) # monitor model project (for model auto update) + monitor_name = attrib(type=str, default=None) # monitor model name (for model auto update, regexp selection) + monitor_tags = attrib(type=list, default=[]) # monitor model tag (for model auto update) + only_published = attrib(type=bool, default=False) # only select published models + max_versions = attrib(type=int, default=None) # Maximum number of models to keep serving (latest X models) + input_size = attrib(type=list, default=None) # optional, model matrix size + input_type = attrib(type=str, default=None, validator=_matrix_type_validator) # optional, model matrix type + input_name = attrib(type=str, default=None) # optional, layer name to push the input to + output_size = attrib(type=list, default=None) # optional, model matrix size + output_type = attrib(type=str, default=None, validator=_matrix_type_validator) # optional, model matrix type + output_name = attrib(type=str, default=None) # optional, layer name to pull the results from + preprocess_artifact = attrib( + type=str, default=None) # optional artifact name storing the model preprocessing code + auxiliary_cfg = attrib(type=dict, default=None) # Auxiliary configuration (e.g. triton conf), Union[str, dict] + + def as_dict(self, remove_null_entries=False): + if not remove_null_entries: + return asdict(self) + return {k: v for k, v in asdict(self).items() if v is not None} + + +@attrs +class ModelEndpoint(object): + engine_type = attrib(type=str, validator=_engine_validator) # engine type + serving_url = attrib(type=str) # full serving point url (including version) example: "detect_cat/v1" + model_id = attrib(type=str, default=None) # model ID to serve (and download) + version = attrib(type=str, default="") # key (version string), default no version + preprocess_artifact = attrib( + type=str, default=None) # optional artifact name storing the model preprocessing code + input_size = attrib(type=list, default=None) # optional, model matrix size + input_type = attrib(type=str, default=None, validator=_matrix_type_validator) # optional, model matrix type + input_name = attrib(type=str, default=None) # optional, layer name to push the input to + output_size = attrib(type=list, default=None) # optional, model matrix size + output_type = attrib(type=str, default=None, validator=_matrix_type_validator) # optional, model matrix type + output_name = attrib(type=str, default=None) # optional, layer name to pull the results from + auxiliary_cfg = attrib(type=dict, default=None) # Optional: Auxiliary configuration (e.g. triton conf), [str, dict] + + def as_dict(self, remove_null_entries=False): + if not remove_null_entries: + return asdict(self) + return {k: v for k, v in asdict(self).items() if v is not None} + + +@attrs +class CanaryEP(object): + endpoint = attrib(type=str) # load balancer endpoint + weights = attrib(type=list) # list of weights (order should be matching fixed_endpoints or prefix) + load_endpoints = attrib(type=list, default=[]) # list of endpoints to balance and route + load_endpoint_prefix = attrib( + type=str, default=None) # endpoint prefix to list + # (any endpoint starting with this prefix will be listed, sorted lexicographically, or broken into /) + + def as_dict(self, remove_null_entries=False): + if not remove_null_entries: + return asdict(self) + return {k: v for k, v in asdict(self).items() if v is not None} diff --git a/clearml_serving/serving/main.py b/clearml_serving/serving/main.py index 2b67327..2083ebd 100644 --- a/clearml_serving/serving/main.py +++ b/clearml_serving/serving/main.py @@ -19,7 +19,7 @@ class GzipRequest(Request): body = await super().body() if "gzip" in self.headers.getlist("Content-Encoding"): body = gzip.decompress(body) - self._body = body + self._body = body # noqa return self._body @@ -83,7 +83,7 @@ router = APIRouter( @router.post("/{model_id}/{version}") @router.post("/{model_id}/") @router.post("/{model_id}") -async def serve_model(model_id: str, version: Optional[str] = None, request: Dict[Any, Any] = None): +def serve_model(model_id: str, version: Optional[str] = None, request: Dict[Any, Any] = None): try: return_value = processor.process_request( base_url=model_id, diff --git a/clearml_serving/serving/model_request_processor.py b/clearml_serving/serving/model_request_processor.py index ef25347..0186ebd 100644 --- a/clearml_serving/serving/model_request_processor.py +++ b/clearml_serving/serving/model_request_processor.py @@ -8,25 +8,10 @@ import threading from multiprocessing import Lock from numpy.random import choice -from attr import attrib, attrs, asdict from clearml import Task, Model from clearml.storage.util import hash_dict -from .preprocess_service import ModelEndpoint, ModelMonitoring, BasePreprocessRequest - - -@attrs -class CanaryEP(object): - endpoint = attrib(type=str) # load balancer endpoint - weights = attrib(type=list) # list of weights (order should be matching fixed_endpoints or prefix) - load_endpoints = attrib(type=list, default=[]) # list of endpoints to balance and route - load_endpoint_prefix = attrib( - type=str, default=None) # endpoint prefix to list - # (any endpoint starting with this prefix will be listed, sorted lexicographically, or broken into /) - - def as_dict(self, remove_null_entries=False): - if not remove_null_entries: - return asdict(self) - return {k: v for k, v in asdict(self).items() if v is not None} +from .preprocess_service import BasePreprocessRequest +from .endpoints import ModelEndpoint, ModelMonitoring, CanaryEP class FastWriteCounter(object): @@ -98,6 +83,7 @@ class ModelRequestProcessor(object): sleep(1) # retry to process return self.process_request(base_url=base_url, version=version, request_body=request_body) + try: # normalize url and version url = self._normalize_endpoint_url(base_url, version) @@ -120,9 +106,8 @@ class ModelRequestProcessor(object): self._engine_processor_lookup[url] = processor return_value = self._process_request(processor=processor, url=url, body=request_body) - except Exception: + finally: self._request_processing_state.dec() - raise return return_value @@ -194,7 +179,7 @@ class ModelRequestProcessor(object): if url in self._endpoints: print("Warning: Model endpoint \'{}\' overwritten".format(url)) - if not endpoint.model_id: + if not endpoint.model_id and any([model_project, model_name, model_tags]): model_query = dict( project_name=model_project, model_name=model_name, @@ -208,6 +193,8 @@ class ModelRequestProcessor(object): if len(models) > 1: print("Warning: Found multiple Models for \'{}\', selecting id={}".format(model_query, models[0].id)) endpoint.model_id = models[0].id + elif not endpoint.model_id: + print("Warning: No Model provided for \'{}\'".format(url)) # upload as new artifact if preprocess_code: @@ -237,6 +224,11 @@ class ModelRequestProcessor(object): if not isinstance(monitoring, ModelMonitoring): monitoring = ModelMonitoring(**monitoring) + # make sure we actually have something to monitor + if not any([monitoring.monitor_project, monitoring.monitor_name, monitoring.monitor_tags]): + raise ValueError("Model monitoring requires at least a " + "project / name / tag to monitor, none were provided.") + # make sure we have everything configured self._validate_model(monitoring) @@ -384,6 +376,10 @@ class ModelRequestProcessor(object): # release stall lock self._update_lock_flag = False + # update the state on the inference task + if Task.current_task() and Task.current_task().id != self._task.id: + self.serialize(task=Task.current_task()) + return True def serialize(self, task: Optional[Task] = None) -> None: @@ -878,7 +874,7 @@ class ModelRequestProcessor(object): return task @classmethod - def _normalize_endpoint_url(cls, endpoint: str, version : Optional[str] = None) -> str: + def _normalize_endpoint_url(cls, endpoint: str, version: Optional[str] = None) -> str: return "{}/{}".format(endpoint.rstrip("/"), version or "").rstrip("/") @classmethod diff --git a/clearml_serving/serving/preprocess_service.py b/clearml_serving/serving/preprocess_service.py index b59bc80..44e8428 100644 --- a/clearml_serving/serving/preprocess_service.py +++ b/clearml_serving/serving/preprocess_service.py @@ -1,73 +1,20 @@ -import numpy as np +import os from typing import Optional, Any, Callable, List -from attr import attrib, attrs, asdict - +import numpy as np from clearml import Task, Model from clearml.binding.artifacts import Artifacts from clearml.storage.util import sha256sum +from requests import post as request_post - -def _engine_validator(inst, attr, value): # noqa - if not BasePreprocessRequest.validate_engine_type(value): - raise TypeError("{} not supported engine type".format(value)) - - -def _matrix_type_validator(inst, attr, value): # noqa - if value and not np.dtype(value): - raise TypeError("{} not supported matrix type".format(value)) - - -@attrs -class ModelMonitoring(object): - base_serving_url = attrib(type=str) # serving point url prefix (example: "detect_cat") - monitor_project = attrib(type=str) # monitor model project (for model auto update) - monitor_name = attrib(type=str) # monitor model name (for model auto update, regexp selection) - monitor_tags = attrib(type=list) # monitor model tag (for model auto update) - engine_type = attrib(type=str, validator=_engine_validator) # engine type - only_published = attrib(type=bool, default=False) # only select published models - max_versions = attrib(type=int, default=None) # Maximum number of models to keep serving (latest X models) - input_size = attrib(type=list, default=None) # optional, model matrix size - input_type = attrib(type=str, default=None, validator=_matrix_type_validator) # optional, model matrix type - input_name = attrib(type=str, default=None) # optional, layer name to push the input to - output_size = attrib(type=list, default=None) # optional, model matrix size - output_type = attrib(type=str, default=None, validator=_matrix_type_validator) # optional, model matrix type - output_name = attrib(type=str, default=None) # optional, layer name to pull the results from - preprocess_artifact = attrib( - type=str, default=None) # optional artifact name storing the model preprocessing code - auxiliary_cfg = attrib(type=dict, default=None) # Auxiliary configuration (e.g. triton conf), Union[str, dict] - - def as_dict(self, remove_null_entries=False): - if not remove_null_entries: - return asdict(self) - return {k: v for k, v in asdict(self).items() if v is not None} - - -@attrs -class ModelEndpoint(object): - engine_type = attrib(type=str, validator=_engine_validator) # engine type - serving_url = attrib(type=str) # full serving point url (including version) example: "detect_cat/v1" - model_id = attrib(type=str) # list of model IDs to serve (order implies versions first is v1) - version = attrib(type=str, default="") # key (version string), default no version - preprocess_artifact = attrib( - type=str, default=None) # optional artifact name storing the model preprocessing code - input_size = attrib(type=list, default=None) # optional, model matrix size - input_type = attrib(type=str, default=None, validator=_matrix_type_validator) # optional, model matrix type - input_name = attrib(type=str, default=None) # optional, layer name to push the input to - output_size = attrib(type=list, default=None) # optional, model matrix size - output_type = attrib(type=str, default=None, validator=_matrix_type_validator) # optional, model matrix type - output_name = attrib(type=str, default=None) # optional, layer name to pull the results from - auxiliary_cfg = attrib(type=dict, default=None) # Optional: Auxiliary configuration (e.g. triton conf), [str, dict] - - def as_dict(self, remove_null_entries=False): - if not remove_null_entries: - return asdict(self) - return {k: v for k, v in asdict(self).items() if v is not None} +from .endpoints import ModelEndpoint class BasePreprocessRequest(object): __preprocessing_lookup = {} __preprocessing_modules = set() + _default_serving_base_url = "http://127.0.0.1:8080/serve/" + _timeout = None # timeout in seconds for the entire request, set in __init__ def __init__( self, @@ -83,6 +30,8 @@ class BasePreprocessRequest(object): self._preprocess = None self._model = None self._server_config = server_config or {} + if self._timeout is None: + self._timeout = int(float(os.environ.get('GUNICORN_SERVING_TIMEOUT', 600)) * 0.8) # load preprocessing code here if self.model_endpoint.preprocess_artifact: if not task or self.model_endpoint.preprocess_artifact not in task.artifacts: @@ -111,7 +60,10 @@ class BasePreprocessRequest(object): spec = importlib.util.spec_from_file_location("Preprocess", path) _preprocess = importlib.util.module_from_spec(spec) spec.loader.exec_module(_preprocess) - self._preprocess = _preprocess.Preprocess() # noqa + Preprocess = _preprocess.Preprocess # noqa + # override `send_request` method + Preprocess.send_request = BasePreprocessRequest._preprocess_send_request + self._preprocess = Preprocess() self._preprocess.serving_config = server_config or {} if callable(getattr(self._preprocess, 'load', None)): self._model = self._preprocess.load(self._get_local_model_file()) @@ -125,7 +77,7 @@ class BasePreprocessRequest(object): Raise exception to report an error Return value will be passed to serving engine """ - if self._preprocess is not None: + if self._preprocess is not None and hasattr(self._preprocess, 'preprocess'): return self._preprocess.preprocess(request) return request @@ -135,7 +87,7 @@ class BasePreprocessRequest(object): Raise exception to report an error Return value will be passed to serving engine """ - if self._preprocess is not None: + if self._preprocess is not None and hasattr(self._preprocess, 'postprocess'): return self._preprocess.postprocess(data) return data @@ -162,6 +114,7 @@ class BasePreprocessRequest(object): """ A decorator to register an annotation type name for classes deriving from Annotation """ + def wrapper(cls): cls.__preprocessing_lookup[engine_name] = cls return cls @@ -181,6 +134,17 @@ class BasePreprocessRequest(object): except (ImportError, TypeError): pass + @staticmethod + def _preprocess_send_request(self, endpoint: str, version: str = None, data: dict = None) -> Optional[dict]: + endpoint = "{}/{}".format(endpoint.strip("/"), version.strip("/")) if version else endpoint.strip("/") + base_url = self.serving_config.get("base_serving_url") if self.serving_config else None + base_url = (base_url or BasePreprocessRequest._default_serving_base_url).strip("/") + url = "{}/{}".format(base_url, endpoint.strip("/")) + return_value = request_post(url, json=data, timeout=BasePreprocessRequest._timeout) + if not return_value.ok: + return None + return return_value.json() + @BasePreprocessRequest.register_engine("triton", modules=["grpc", "tritonclient"]) class TritonPreprocessRequest(BasePreprocessRequest): @@ -224,7 +188,7 @@ class TritonPreprocessRequest(BasePreprocessRequest): Detect gRPC server and send the request to it """ # allow to override bt preprocessing class - if self._preprocess is not None and getattr(self._preprocess, "process", None): + if self._preprocess is not None and hasattr(self._preprocess, "process"): return self._preprocess.process(data) # Create gRPC stub for communicating with the server @@ -268,7 +232,11 @@ class TritonPreprocessRequest(BasePreprocessRequest): output0.name = self.model_endpoint.output_name request.outputs.extend([output0]) - response = grpc_stub.ModelInfer(request, compression=self._ext_grpc.Compression.Gzip) + response = grpc_stub.ModelInfer( + request, + compression=self._ext_grpc.Compression.Gzip, + timeout=self._timeout + ) output_results = [] index = 0 @@ -351,6 +319,6 @@ class CustomPreprocessRequest(BasePreprocessRequest): The actual processing function. We run the process in this context """ - if self._preprocess is not None: + if self._preprocess is not None and hasattr(self._preprocess, 'process'): return self._preprocess.process(data) return None diff --git a/clearml_serving/serving/requirements.txt b/clearml_serving/serving/requirements.txt index 2bacc7a..8ab970d 100644 --- a/clearml_serving/serving/requirements.txt +++ b/clearml_serving/serving/requirements.txt @@ -14,3 +14,4 @@ grpcio Pillow xgboost lightgbm +requests diff --git a/examples/ensemble/preprocess.py b/examples/ensemble/preprocess.py new file mode 100644 index 0000000..079299a --- /dev/null +++ b/examples/ensemble/preprocess.py @@ -0,0 +1,19 @@ +from typing import Any + +import numpy as np + + +# Notice Preprocess class Must be named "Preprocess" +class Preprocess(object): + def __init__(self): + # set internal state, this will be called only once. (i.e. not per request) + pass + + def preprocess(self, body: dict) -> Any: + # we expect to get two valid on the dict x0, and x1 + return [[body.get("x0", None), body.get("x1", None)], ] + + def postprocess(self, data: Any) -> dict: + # post process the data returned from the model inference engine + # data is the return value from model.predict we will put is inside a return value as Y + return dict(y=data.tolist() if isinstance(data, np.ndarray) else data) diff --git a/examples/ensemble/readme.md b/examples/ensemble/readme.md new file mode 100644 index 0000000..e3cac56 --- /dev/null +++ b/examples/ensemble/readme.md @@ -0,0 +1,31 @@ +# Train and Deploy Scikit-Learn model ensemble + +## training mock voting regression model + +Run the mock python training code +```bash +pip install -r examples/ensemble/requirements.txt +python examples/ensemble/train_ensemble.py +``` + +The output will be a model created on the project "serving examples", by the name "train model ensemble" + +## setting up the serving service + +1. Create serving Service: `clearml-serving create --name "serving example"` (write down the service ID) +2. Create model endpoint: +`clearml-serving --id model add --engine sklearn --endpoint "test_model_ensemble" --preprocess "examples/ensemble/preprocess.py" --name "train model ensemble" --project "serving examples"` + +Or auto update + +`clearml-serving --id model auto-update --engine sklearn --endpoint "test_model_ensemble_auto" --preprocess "examples/ensemble/preprocess.py" --name "train model ensemble" --project "serving examples" --max-versions 2` + +Or add Canary endpoint + +`clearml-serving --id model canary --endpoint "test_model_ensemble_auto" --weights 0.1 0.9 --input-endpoint-prefix test_model_ensemble_auto` + +3. Run the clearml-serving container `docker run -v ~/clearml.conf:/root/clearml.conf -p 8080:8080 -e CLEARML_SERVING_TASK_ID= clearml-serving:latest` +4. Test new endpoint: `curl -X POST "http://127.0.0.1:8080/serve/test_model_ensemble" -H "accept: application/json" -H "Content-Type: application/json" -d '{"x0": 1, "x1": 2}'` + +> **_Notice:_** You can also change the serving service while it is already running! +This includes adding/removing endpoints, adding canary model routing etc. diff --git a/examples/ensemble/requirements.txt b/examples/ensemble/requirements.txt new file mode 100644 index 0000000..eb862f7 --- /dev/null +++ b/examples/ensemble/requirements.txt @@ -0,0 +1,2 @@ +clearml >= 1.1.6 +scikit-learn diff --git a/examples/ensemble/train_ensemble.py b/examples/ensemble/train_ensemble.py new file mode 100644 index 0000000..518673b --- /dev/null +++ b/examples/ensemble/train_ensemble.py @@ -0,0 +1,23 @@ +from sklearn.neighbors import KNeighborsRegressor +from sklearn.ensemble import RandomForestRegressor +from sklearn.ensemble import VotingRegressor +from sklearn.datasets import make_blobs +from joblib import dump +from clearml import Task + +task = Task.init(project_name="serving examples", task_name="train model ensemble", output_uri=True) + +# generate 2d classification dataset +X, y = make_blobs(n_samples=100, centers=2, n_features=2, random_state=1) + +knn = KNeighborsRegressor(n_neighbors=5) +knn.fit(X, y) + +rf = RandomForestRegressor(n_estimators=50) +rf.fit(X, y) + +estimators = [("knn", knn), ("rf", rf), ] +ensemble = VotingRegressor(estimators) +ensemble.fit(X, y) + +dump(ensemble, filename="ensemble-vr.pkl", compress=9) diff --git a/examples/pipeline/preprocess.py b/examples/pipeline/preprocess.py new file mode 100644 index 0000000..07598e5 --- /dev/null +++ b/examples/pipeline/preprocess.py @@ -0,0 +1,32 @@ +from typing import Any, List + + +# Notice Preprocess class Must be named "Preprocess" +class Preprocess(object): + def __init__(self): + # set internal state, this will be called only once. (i.e. not per request) + pass + + def postprocess(self, data: List[dict]) -> dict: + # we will here average the results and return the new value + # assume data is a list of dicts greater than 1 + + # average result + return dict(y=0.5 * data[0]['y'][0] + 0.5 * data[1]['y'][0]) + + def process(self, data: Any) -> Any: + """ + do something with the actual data, return any type of object. + The returned object will be passed as is to the postprocess function engine + """ + predict_a = self.send_request(endpoint="/test_model_sklearn_a/", version=None, data=data) + predict_b = self.send_request(endpoint="/test_model_sklearn_b/", version=None, data=data) + if not predict_b or not predict_a: + raise ValueError("Error requesting inference endpoint test_model_sklearn a/b") + + return [predict_a, predict_b] + + def send_request(self, endpoint, version, data) -> List[dict]: + # Mock Function! + # replaced by real send request function when constructed by the inference service + pass diff --git a/examples/pipeline/readme.md b/examples/pipeline/readme.md new file mode 100644 index 0000000..8b51268 --- /dev/null +++ b/examples/pipeline/readme.md @@ -0,0 +1,26 @@ +# Deploy a model inference pipeline + +## prerequisites + +Training a scikit-learn model (see example/sklearn) + +## setting up the serving service + +1. Create serving Service (if not already running): +`clearml-serving create --name "serving example"` (write down the service ID) + +2. Create model base two endpoints: +`clearml-serving --id model add --engine sklearn --endpoint "test_model_sklearn_a" --preprocess "examples/sklearn/preprocess.py" --name "train sklearn model" --project "serving examples"` + +`clearml-serving --id model add --engine sklearn --endpoint "test_model_sklearn_b" --preprocess "examples/sklearn/preprocess.py" --name "train sklearn model" --project "serving examples"` + +3. Create pipeline model endpoint: +`clearml-serving --id model add --engine custom --endpoint "test_model_pipeline" --preprocess "examples/pipeline/preprocess.py"` + +4. Run the clearml-serving container `docker run -v ~/clearml.conf:/root/clearml.conf -p 8080:8080 -e CLEARML_SERVING_TASK_ID= clearml-serving:latest` + +5. Test new endpoint: `curl -X POST "http://127.0.0.1:8080/serve/test_model_pipeline" -H "accept: application/json" -H "Content-Type: application/json" -d '{"x0": 1, "x1": 2}'` + + +> **_Notice:_** You can also change the serving service while it is already running! +This includes adding/removing endpoints, adding canary model routing etc. From 4355c1b1f44edd07cd08309cb99f56eb0ce73293 Mon Sep 17 00:00:00 2001 From: allegroai Date: Mon, 21 Mar 2022 01:00:19 +0200 Subject: [PATCH 15/19] Add model metric logging --- .gitignore | 7 + README.md | 191 ++++++---- clearml_serving/__main__.py | 131 ++++++- .../engines/triton/requirements.txt | 6 +- .../engines/triton/triton_helper.py | 13 +- .../preprocess/preprocess_template.py | 67 +++- clearml_serving/serving/endpoints.py | 57 ++- clearml_serving/serving/entrypoint.sh | 47 ++- clearml_serving/serving/main.py | 4 + .../serving/model_request_processor.py | 308 +++++++++++++-- clearml_serving/serving/preprocess_service.py | 177 +++++---- clearml_serving/serving/requirements.txt | 30 +- clearml_serving/statistics/Dockerfile | 21 ++ clearml_serving/statistics/__init__.py | 0 clearml_serving/statistics/entrypoint.sh | 26 ++ clearml_serving/statistics/main.py | 41 ++ clearml_serving/statistics/metrics.py | 352 ++++++++++++++++++ clearml_serving/statistics/requirements.txt | 6 + docker/datasource.yml | 8 + docker/docker-compose-triton-gpu.yml | 151 ++++++++ docker/docker-compose-triton.yml | 146 ++++++++ docker/docker-compose.yml | 125 +++++++ docker/example.env | 6 + docker/prometheus.yml | 22 ++ examples/ensemble/preprocess.py | 4 +- examples/ensemble/readme.md | 1 + examples/keras/preprocess.py | 4 +- examples/keras/readme.md | 3 + examples/lightgbm/preprocess.py | 4 +- examples/pipeline/preprocess.py | 4 +- examples/pipeline/readme.md | 1 + examples/pytorch/preprocess.py | 4 +- examples/pytorch/readme.md | 5 +- examples/sklearn/preprocess.py | 4 +- examples/sklearn/readme.md | 1 + examples/xgboost/preprocess.py | 4 +- setup.py | 5 +- 37 files changed, 1733 insertions(+), 253 deletions(-) create mode 100644 clearml_serving/statistics/Dockerfile create mode 100644 clearml_serving/statistics/__init__.py create mode 100755 clearml_serving/statistics/entrypoint.sh create mode 100644 clearml_serving/statistics/main.py create mode 100644 clearml_serving/statistics/metrics.py create mode 100644 clearml_serving/statistics/requirements.txt create mode 100644 docker/datasource.yml create mode 100644 docker/docker-compose-triton-gpu.yml create mode 100644 docker/docker-compose-triton.yml create mode 100644 docker/docker-compose.yml create mode 100644 docker/example.env create mode 100644 docker/prometheus.yml diff --git a/.gitignore b/.gitignore index 1e12a66..f3038c1 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,7 @@ dist/ build/ *.egg-info/ +.tmp/ # Compiled Python bytecode @@ -23,6 +24,12 @@ Thumbs.db *.app *.exe *.war +*.pkl +*.pt +*.pb +data/ +runs/ +variables/ # Large media files *.mp4 diff --git a/README.md b/README.md index e5a0fd8..eeeb896 100644 --- a/README.md +++ b/README.md @@ -61,6 +61,73 @@ Features: ## Installation +### prerequisites + +* ClearML-Server : Model repository, Service Health, Control plane +* Kubernetes / Single-instance Machine : Deploying containers +* CLI : Configuration & model deployment interface + +### :nail_care: Initial Setup + +1. Setup your [**ClearML Server**](https://github.com/allegroai/clearml-server) or use the [Free tier Hosting](https://app.clear.ml) +2. Setup local access (if you haven't already), see introductions [here](https://clear.ml/docs/latest/docs/getting_started/ds/ds_first_steps#install-clearml) +3. Install clearml-serving CLI: +```bash +pip3 istall clearml-serving +``` +4. Create the Serving Service Controller + - `clearml-serving create --name "serving example"` + - The new serving service UID should be printed `"New Serving Service created: id=aa11bb22aa11bb22` +5. Write down the Serving Service UID +6. Clone clearml-serving repository +```bash +git clone https://github.com/allegroai/clearml-serving.git +``` +7. Edit the environment variables file (`docker/example.env`) with your clearml-server credentials and Serving Service UID. For example, you should have something like +```bash +cat docker/example.env +``` +```bash + CLEARML_WEB_HOST="https://app.clear.ml" + CLEARML_API_HOST="https://api.clear.ml" + CLEARML_FILES_HOST="https://files.clear.ml" + CLEARML_API_ACCESS_KEY="" + CLEARML_API_SECRET_KEY="" + CLEARML_SERVING_TASK_ID="" +``` +8. Spin the clearml-serving containers with docker-compose (or if running on Kubernetes use the helm chart) +```bash +cd docker && docker-compose --env-file example.env -f docker-compose.yml up +``` +If you need Triton support (keras/pytorch/onnx etc.), use the triton docker-compose file +```bash +cd docker && docker-compose --env-file example.env -f docker-compose-triton.yml up +``` +:muscle: If running on a GPU instance w/ Triton support (keras/pytorch/onnx etc.), use the triton gpu docker-compose file +```bash +cd docker && docker-compose --env-file example.env -f docker-compose-triton-gpu.yml up +``` + +> **Notice**: Any model that registers with "Triton" engine, will run the pre/post processing code on the Inference service container, and the model inference itself will be executed on the Triton Engine container. + + +### :ocean: Optional: advanced setup - S3/GS/Azure access + +To add access credentials and allow the inference containers to download models from your S3/GS/Azure object-storage, +add the respected environment variables to your env files (`example.env`) +See further details on configuring the storage access [here](https://clear.ml/docs/latest/docs/integrations/storage#configuring-storage) + +```bash +AWS_ACCESS_KEY_ID +AWS_SECRET_ACCESS_KEY +AWS_DEFAULT_REGION + +GOOGLE_APPLICATION_CREDENTIALS + +AZURE_STORAGE_ACCOUNT +AZURE_STORAGE_KEY +``` + ### :information_desk_person: Concepts **CLI** - Secure configuration interface for on-line model upgrade/deployment on running Serving Services @@ -77,24 +144,6 @@ Features: **Dashboards** - Customizable dashboard-ing solution on top of the collected statistics, e.g. Grafana -### prerequisites - -* ClearML-Server : Model repository, Service Health, Control plane -* Kubernetes / Single-instance VM : Deploying containers -* CLI : Configuration & model deployment interface - - -### :nail_care: Initial Setup - -1. Setup your [**ClearML Server**](https://github.com/allegroai/clearml-server) or use the [Free tier Hosting](https://app.community.clear.ml) -2. Install the CLI on your laptop `clearml` and `clearml-serving` - - `pip3 install https://github.com/allegroai/clearml-serving.git@dev` - - Make sure to configure your machine to connect to your `clearml-server` see [clearml-init](https://clear.ml/docs/latest/docs/getting_started/ds/ds_first_steps#install-clearml) for details -3. Create the Serving Service Controller - - `clearml-serving create --name "serving example"` - - The new serving service UID should be printed `"New Serving Service created: id=aa11bb22aa11bb22` -4. Write down the Serving Service UID - ### :point_right: Toy model (scikit learn) deployment example 1. Train toy scikit-learn model @@ -123,61 +172,6 @@ Features: > To learn more on training models and the ClearML model repository, see the [ClearML documentation](https://clear.ml/docs) - -### :muscle: Nvidia Triton serving engine setup - -Nvidia Triton Serving Engine is used by clearml-serving to do the heavy lifting of deep-learning models on both GPU & CPU nodes. -Inside the Triton container a clearml controller is spinning and monitoring the Triton server. -All the triton models are automatically downloaded into the triton container in real-time, configured, and served. -A single Triton serving container is serving multiple models, based on the registered models on the Serving Service -Communication from the Inference container to the Triton container is done transparently over compressed gRPC channel. - -#### setup - -Optional: build the Triton container - - Customize container [Dockerfile](clearml_serving/engines/triton/Dockerfile) - - Build container `docker build --tag clearml-serving-triton:latest -f clearml_serving/engines/triton/Dockerfile .` - -Spin the triton engine container: `docker run -v ~/clearml.conf:/root/clearml.conf -p 8001:8001 -e CLEARML_SERVING_TASK_ID= -e CLEARML_TRITON_POLL_FREQ=5 -e CLEARML_TRITON_METRIC_FREQ=1 clearml-serving-triton:latest` - -Configure the "Serving Service" with the new Triton Engine gRPC IP:Port. Notice that when deploying on a Kubernetes cluster this should be a TCP ingest endpoint, to allow for transparent auto-scaling of the Triton Engine Containers - -`clearml-serving --id config --triton-grpc-server :8001` - -Spin the inference service (this is the external RestAPI interface) -`docker run -v ~/clearml.conf:/root/clearml.conf -p 8080:8080 -e CLEARML_SERVING_TASK_ID= -e CLEARML_SERVING_POLL_FREQ=5 clearml-serving-inference:latest` - -Now eny model that will register with "Triton" engine, will run the pre/post processing code on the Inference service container, and the model inference itself will be executed on the Triton Engine container. -See Tensorflow [example](examples/keras/readme.md) and Pytorch [example](examples/pytorch/readme.md) for further details. - - -### :ocean: Container Configuration Variables - -When spinning the Inference container or the Triton Engine container, -we need to specify the `clearml-server` address and access credentials -One way of achieving that is by mounting the `clearml.conf` file into the container's HOME folder (i.e. `-v ~/clearml.conf:/root/clearml.conf`) -We can also pass environment variables instead (see [details](https://clear.ml/docs/latest/docs/configs/env_vars#server-connection): -```bash -CLEARML_API_HOST="https://api.clear.ml" -CLEARML_WEB_HOST="https://app.clear.ml" -CLEARML_FILES_HOST="https://files.clear.ml" -CLEARML_API_ACCESS_KEY="access_key_here" -CLEARML_API_SECRET_KEY="secret_key_here" -``` - -To access models stored on an S3 buckets, Google Storage or Azure blob storage (notice that with GS you also need to make sure the access json is available inside the containers). See further details on configuring the storage access [here](https://clear.ml/docs/latest/docs/integrations/storage#configuring-storage) - -```bash -AWS_ACCESS_KEY_ID -AWS_SECRET_ACCESS_KEY -AWS_DEFAULT_REGION - -GOOGLE_APPLICATION_CREDENTIALS - -AZURE_STORAGE_ACCOUNT -AZURE_STORAGE_KEY -``` - ### :turtle: Registering & Deploying new models manually Uploading an existing model file into the model repository can be done via the `clearml` RestAPI, the python interface, or with the `clearml-serving` CLI @@ -200,7 +194,7 @@ Uploading an existing model file into the model repository can be done via the ` The clearml Serving Service support automatic model deployment and upgrades, directly connected with the model repository and API. When the model auto-deploy is configured, a new model versions will be automatically deployed when you "publish" or "tag" a new model in the `clearml` model repository. This automation interface allows for simpler CI/CD model deployment process, as a single API automatically deploy (or remove) a model from the Serving Service. -#### automatic model deployment example +#### Automatic model deployment example 1. Configure the model auto-update on the Serving Service - `clearml-serving --id model auto-update --engine sklearn --endpoint "test_model_sklearn_auto" --preprocess "preprocess.py" --name "train sklearn model" --project "serving examples" --max-versions 2` @@ -248,6 +242,42 @@ Example: - `curl -X POST "http://127.0.0.1:8080/serve/test_model" -H "accept: application/json" -H "Content-Type: application/json" -d '{"x0": 1, "x1": 2}'` +### Model monitoring and performance metrics + +ClearML serving instances send serving statistics (count/latency) automatically to Prometheus and Grafana can be used +to visualize and create live dashboards. + +The default docker-compose installation is preconfigured with Prometheus and Grafana, do notice that by default data/ate of both containers is *not* persistent. To add persistence we do recommend adding a volume mount. + +You can also add many custom metrics on the input/predictions of your models. +Once a model endpoint is registered, adding custom metric can be done using the CLI. +For example, assume we have our mock scikit-learn model deployed on endpoint `test_model_sklearn`, +we can log the requests inputs and outputs (see examples/sklearn/preprocess.py example): +```bash +clearml-serving --id metrics add --endpoint test_model_sklearn --variable-scalar +x0=0,0.1,0.5,1,10 x1=0,0.1,0.5,1,10 y=0,0.1,0.5,0.75,1 +``` + +This will create a distribution histogram (buckets specified via a list of less-equal values after `=` sign), +that we will be able to visualize on Grafana. +Notice we can also log time-series values with `--variable-value x2` or discrete results (e.g. classifications strings) with `--variable-enum animal=cat,dog,sheep`. +Additional custom variables can be in the preprocess and postprocess with a call to `collect_custom_statistics_fn({'new_var': 1.337})` see clearml_serving/preprocess/preprocess_template.py + +With the new metrics logged we can create a visualization dashboard over the latency of the calls, and the output distribution. + +Grafana model performance example: + +- browse to http://localhost:3000 +- login with: admin/admin +- create a new dashboard +- select Prometheus as data source +- Add a query: `100 * delta(test_model_sklearn:_latency_bucket[1m]) / delta(test_model_sklearn:_latency_sum[1m])` +- Change type to heatmap, and select on the right hand-side under "Data Format" select "Time series buckets" +- You now have the latency distribution, over time. +- Repeat the same process for x0, the query would be `100 * delta(test_model_sklearn:x0_bucket[1m]) / delta(test_model_sklearn:x0_sum[1m])` + +> **Notice**: If not specified all serving requests will be logged, to change the default configure "CLEARML_DEFAULT_METRIC_LOG_FREQ", for example CLEARML_DEFAULT_METRIC_LOG_FREQ=0.2 means only 20% of all requests will be logged. You can also specify per endpoint log frequency with the `clearml-serving` CLI. Check the CLI documentation with `cleamrl-serving metrics --help` + ### :fire: Model Serving Examples - Scikit-Learn [example](examples/sklearn/readme.md) - random data @@ -274,8 +304,9 @@ Example: - [x] CLI configuration tool - [x] Nvidia Triton integration - [x] GZip request compression - - [ ] TorchServe engine integration - - [ ] Prebuilt Docker containers (dockerhub) + - [x] TorchServe engine integration + - [x] Prebuilt Docker containers (dockerhub) + - [x] Docker-compose deployment (CPU/GPU) - [x] Scikit-Learn example - [x] XGBoost example - [x] LightGBM example @@ -283,10 +314,10 @@ Example: - [x] TensorFlow/Keras example - [x] Model ensemble example - [x] Model pipeline example - - [ ] Statistics Service - - [ ] Kafka install instructions - - [ ] Prometheus install instructions - - [ ] Grafana install instructions + - [x] Statistics Service + - [x] Kafka install instructions + - [x] Prometheus install instructions + - [x] Grafana install instructions - [ ] Kubernetes Helm Chart ## Contributing diff --git a/clearml_serving/__main__.py b/clearml_serving/__main__.py index 2a50347..b1d97ac 100644 --- a/clearml_serving/__main__.py +++ b/clearml_serving/__main__.py @@ -4,11 +4,78 @@ from argparse import ArgumentParser from pathlib import Path from clearml_serving.serving.model_request_processor import ModelRequestProcessor, CanaryEP -from clearml_serving.serving.endpoints import ModelMonitoring, ModelEndpoint +from clearml_serving.serving.endpoints import ModelMonitoring, ModelEndpoint, EndpointMetricLogging verbosity = False +def func_metric_ls(args): + request_processor = ModelRequestProcessor(task_id=args.id) + print("List endpoint metrics, control task id={}".format(request_processor.get_id())) + request_processor.deserialize(skip_sync=True) + print("Logged Metrics:\n{}".format( + json.dumps({k: v.as_dict() for k, v in request_processor.list_metric_logging().items()}, indent=2))) + + +def func_metric_rm(args): + request_processor = ModelRequestProcessor(task_id=args.id) + print("Serving service Task {}, Removing metrics from endpoint={}".format( + request_processor.get_id(), args.endpoint)) + request_processor.deserialize(skip_sync=True) + for v in (args.variable or []): + if request_processor.remove_metric_logging(endpoint=args.endpoint, variable_name=v): + print("Removing static endpoint: {}".format(args.endpoint)) + else: + raise ValueError("Could not remove {} from endpoin {}".format(v, args.endpoint)) + print("Updating serving service") + request_processor.serialize() + + +def func_metric_add(args): + request_processor = ModelRequestProcessor(task_id=args.id) + print("Serving service Task {}, Adding metric logging endpoint \'/{}/\'".format( + request_processor.get_id(), args.endpoint)) + request_processor.deserialize(skip_sync=True) + metric = EndpointMetricLogging(endpoint=args.endpoint) + if args.log_freq is not None: + metric.log_frequency = float(args.log_freq) + for v in (args.variable_scalar or []): + if '=' not in v: + raise ValueError("Variable '{}' should be in the form of = " + "example: x1=0,1,2,3,4,5".format(v)) + name, buckets = v.split('=', 1) + if name in metric.metrics: + print("Warning: {} defined twice".format(name)) + if '/' in buckets: + b_min, b_max, b_step = [float(b.strip()) for b in buckets.split('/', 2)] + buckets = list(range(b_min, b_max, b_step)) + else: + buckets = [float(b.strip()) for b in buckets.split(',')] + metric.metrics[name] = dict(type="scalar", buckets=buckets) + + for v in (args.variable_enum or []): + if '=' not in v: + raise ValueError("Variable '{}' should be in the form of = " + "example: x1=cat,dog,sheep".format(v)) + name, buckets = v.split('=', 1) + if name in metric.metrics: + print("Warning: {} defined twice".format(name)) + buckets = [str(b.strip()) for b in buckets.split(',')] + metric.metrics[name] = dict(type="enum", buckets=buckets) + + for v in (args.variable_value or []): + name = v.strip() + if name in metric.metrics: + print("Warning: {} defined twice".format(name)) + metric.metrics[name] = dict(type="variable", buckets=None) + + if not request_processor.add_metric_logging(metric=metric): + raise ValueError("Could not add metric logging endpoint {}".format(args.endpoint)) + + print("Updating serving service") + request_processor.serialize() + + def func_model_upload(args): if not args.path and not args.url: raise ValueError("Either --path or --url must be specified") @@ -46,9 +113,12 @@ def func_model_ls(args): request_processor = ModelRequestProcessor(task_id=args.id) print("List model serving and endpoints, control task id={}".format(request_processor.get_id())) request_processor.deserialize(skip_sync=True) - print("Endpoints:\n{}".format(json.dumps(request_processor.get_endpoints(), indent=2))) - print("Model Monitoring:\n{}".format(json.dumps(request_processor.get_model_monitoring(), indent=2))) - print("Canary:\n{}".format(json.dumps(request_processor.get_canary_endpoints(), indent=2))) + print("Endpoints:\n{}".format( + json.dumps({k: v.as_dict() for k, v in request_processor.get_endpoints().items()}, indent=2))) + print("Model Monitoring:\n{}".format( + json.dumps({k: v.as_dict() for k, v in request_processor.get_model_monitoring().items()}, indent=2))) + print("Canary:\n{}".format( + json.dumps({k: v.as_dict() for k, v in request_processor.get_canary_endpoints().items()}, indent=2))) def func_create_service(args): @@ -69,6 +139,10 @@ def func_config_service(args): print("Configuring serving service [id={}] triton_grpc_server={}".format( request_processor.get_id(), args.triton_grpc_server)) request_processor.configure(external_triton_grpc_server=args.triton_grpc_server) + if args.kafka_metric_server: + request_processor.configure(external_kafka_service_server=args.kafka_metric_server) + if args.metric_log_freq is not None: + pass def func_list_services(_): @@ -224,6 +298,47 @@ def cli(): help='[Optional] Specify project for the serving service. Default: DevOps') parser_create.set_defaults(func=func_create_service) + parser_metrics = subparsers.add_parser('metrics', help='Configure inference metrics Service') + parser_metrics.set_defaults(func=parser_metrics.print_help) + + metric_cmd = parser_metrics.add_subparsers(help='model metric command help') + + parser_metrics_add = metric_cmd.add_parser('add', help='Add/modify metric for a specific endpoint') + parser_metrics_add.add_argument( + '--endpoint', type=str, required=True, + help='metric endpoint name including version, e.g. "model/1" or a prefix "model/*" ' + 'Notice: it will override any previous endpoint logged metrics') + parser_metrics_add.add_argument( + '--log-freq', type=float, + help='Optional: logging request frequency, between 0.0 to 1.0 ' + 'example: 1.0 means all requests are logged, 0.5 means half of the requests are logged ' + 'if not specified, use global logging frequency, see `config --metric-log-freq`') + parser_metrics_add.add_argument( + '--variable-scalar', type=str, nargs='+', + help='Add float (scalar) argument to the metric logger, ' + '= example with specific buckets: "x1=0,0.2,0.4,0.6,0.8,1" or ' + 'with min/max/num_buckets "x1=0.0/1.0/5"') + parser_metrics_add.add_argument( + '--variable-enum', type=str, nargs='+', + help='Add enum (string) argument to the metric logger, ' + '= example: "detect=cat,dog,sheep"') + parser_metrics_add.add_argument( + '--variable-value', type=str, nargs='+', + help='Add non-samples scalar argument to the metric logger, ' + ' example: "latency"') + parser_metrics_add.set_defaults(func=func_metric_add) + + parser_metrics_rm = metric_cmd.add_parser('remove', help='Remove metric from a specific endpoint') + parser_metrics_rm.add_argument( + '--endpoint', type=str, help='metric endpoint name including version, e.g. "model/1" or a prefix "model/*"') + parser_metrics_rm.add_argument( + '--variable', type=str, nargs='+', + help='Remove (scalar/enum) argument from the metric logger, example: "x1"') + parser_metrics_rm.set_defaults(func=func_metric_rm) + + parser_metrics_ls = metric_cmd.add_parser('list', help='list metrics logged on all endpoints') + parser_metrics_ls.set_defaults(func=func_metric_ls) + parser_config = subparsers.add_parser('config', help='Configure a new Serving Service') parser_config.add_argument( '--base-serving-url', type=str, @@ -231,6 +346,12 @@ def cli(): parser_config.add_argument( '--triton-grpc-server', type=str, help='External ClearML-Triton serving container gRPC address. example: 127.0.0.1:9001') + parser_config.add_argument( + '--kafka-metric-server', type=str, + help='External Kafka service url. example: 127.0.0.1:9092') + parser_config.add_argument( + '--metric-log-freq', type=float, + help='Set default metric logging frequency. 1.0 is 100% of all requests are logged') parser_config.set_defaults(func=func_config_service) parser_model = subparsers.add_parser('model', help='Configure Model endpoints for an already running Service') @@ -273,7 +394,7 @@ def cli(): 'https://domain/model.bin)') parser_model_upload.add_argument( '--destination', type=str, - help='Optional, Specifying the target destination for the model to be uploaded' + help='Optional, Specifying the target destination for the model to be uploaded ' '(e.g. s3://bucket/folder/, gs://bucket/folder/, azure://bucket/folder/)') parser_model_upload.set_defaults(func=func_model_upload) diff --git a/clearml_serving/engines/triton/requirements.txt b/clearml_serving/engines/triton/requirements.txt index 4f45b00..1bc4db8 100644 --- a/clearml_serving/engines/triton/requirements.txt +++ b/clearml_serving/engines/triton/requirements.txt @@ -1,6 +1,6 @@ -clearml >= 1.1.6 +clearml >= 1.3.1 clearml-serving -tritonclient[grpc] +tritonclient[grpc]>=2.18.0,<2.19 grpcio -Pillow +Pillow>=9.0.1,<10 pathlib2 \ No newline at end of file diff --git a/clearml_serving/engines/triton/triton_helper.py b/clearml_serving/engines/triton/triton_helper.py index cf31957..fd6b760 100644 --- a/clearml_serving/engines/triton/triton_helper.py +++ b/clearml_serving/engines/triton/triton_helper.py @@ -10,7 +10,7 @@ import numpy as np from clearml import Task, Logger, InputModel from clearml.backend_api.utils import get_http_session_with_retry from clearml.utilities.pyhocon import ConfigFactory, ConfigTree, HOCONConverter -from pathlib2 import Path +from pathlib import Path from clearml_serving.serving.endpoints import ModelEndpoint from clearml_serving.serving.model_request_processor import ModelRequestProcessor @@ -413,10 +413,10 @@ def main(): '--serving-id', default=os.environ.get('CLEARML_SERVING_TASK_ID'), type=str, help='Specify main serving service Task ID') parser.add_argument( - '--project', default='serving', type=str, + '--project', default=None, type=str, help='Optional specify project for the serving engine Task') parser.add_argument( - '--name', default='nvidia-triton', type=str, + '--name', default='triton engine', type=str, help='Optional specify task name for the serving engine Task') parser.add_argument( '--update-frequency', default=os.environ.get('CLEARML_TRITON_POLL_FREQ') or 10., type=float, @@ -481,8 +481,13 @@ def main(): t = type(getattr(args, args_var, None)) setattr(args, args_var, type(t)(v) if t is not None else v) + # noinspection PyProtectedMember + serving_task = ModelRequestProcessor._get_control_plane_task(task_id=args.inference_task_id) + task = Task.init( - project_name=args.project, task_name=args.name, task_type=Task.TaskTypes.inference, + project_name=args.project or serving_task.get_project_name() or "serving", + task_name="{} - {}".format(serving_task.name, args.name), + task_type=Task.TaskTypes.inference, continue_last_task=args.inference_task_id or None ) print("configuration args: {}".format(args)) diff --git a/clearml_serving/preprocess/preprocess_template.py b/clearml_serving/preprocess/preprocess_template.py index 769ca5f..274a8c3 100644 --- a/clearml_serving/preprocess/preprocess_template.py +++ b/clearml_serving/preprocess/preprocess_template.py @@ -1,14 +1,14 @@ -from typing import Any, Optional +from typing import Any, Optional, List, Callable -# Notice Preprocess class Must be named "Preprocess" -# Otherwise there are No limitations, No need to inherit or to implement all methods +# Preprocess class Must be named "Preprocess" +# No need to inherit or to implement all methods class Preprocess(object): - serving_config = None - # example: { - # 'base_serving_url': 'http://127.0.0.1:8080/serve/', - # 'triton_grpc_server': '127.0.0.1:9001', - # }" + """ + Preprocess class Must be named "Preprocess" + Otherwise there are No limitations, No need to inherit or to implement all methods + Notice! This is not thread safe! the same instance may be accessed from multiple threads simultaneously + """ def __init__(self): # set internal state, this will be called only once. (i.e. not per request) @@ -16,31 +16,66 @@ class Preprocess(object): def load(self, local_file_name: str) -> Optional[Any]: # noqa """ - Optional, provide loading method for the model + Optional: provide loading method for the model useful if we need to load a model in a specific way for the prediction engine to work :param local_file_name: file name / path to read load the model from :return: Object that will be called with .predict() method for inference """ pass - def preprocess(self, body: dict) -> Any: # noqa + def preprocess(self, body: dict, collect_custom_statistics_fn: Optional[Callable[[dict], None]]) -> Any: # noqa """ - do something with the request data, return any type of object. + Optional: do something with the request data, return any type of object. The returned object will be passed as is to the inference engine + + :param body: dictionary as recieved from the RestAPI + :param collect_custom_statistics_fn: Optional, if provided allows to send a custom set of key/values + to the statictics collector servicd. + None is passed if statiscs collector is not configured, or if the current request should not be collected + + Usage example: + >>> print(body) + {"x0": 1, "x1": 2} + >>> if collect_custom_statistics_fn: + >>> collect_custom_statistics_fn({"x0": 1, "x1": 2}) + + :return: Object to be passed directly to the model inference """ return body - def postprocess(self, data: Any) -> dict: # noqa + def postprocess(self, data: Any, collect_custom_statistics_fn: Optional[Callable[[dict], None]]) -> dict: # noqa """ - post process the data returned from the model inference engine + Optional: post process the data returned from the model inference engine returned dict will be passed back as the request result as is. + + :param data: object as recieved from the inference model function + :param collect_custom_statistics_fn: Optional, if provided allows to send a custom set of key/values + to the statictics collector servicd. + None is passed if statiscs collector is not configured, or if the current request should not be collected + + Usage example: + >>> if collect_custom_statistics_fn: + >>> collect_custom_statistics_fn({"y": 1}) + + :return: Dictionary passed directly as the returned result of the RestAPI """ return data - def process(self, data: Any) -> Any: # noqa + def process(self, data: Any, collect_custom_statistics_fn: Optional[Callable[[dict], None]]) -> Any: # noqa """ - do something with the actual data, return any type of object. + Optional: do something with the actual data, return any type of object. The returned object will be passed as is to the postprocess function engine + + :param data: object as recieved from the preprocessing function + :param collect_custom_statistics_fn: Optional, if provided allows to send a custom set of key/values + to the statictics collector servicd. + None is passed if statiscs collector is not configured, or if the current request should not be collected + + Usage example: + >>> if collect_custom_statistics_fn: + >>> collect_custom_statistics_fn({"type": "classification"}) + + :return: Object to be passed tp the post-processing function """ return data @@ -63,4 +98,4 @@ class Preprocess(object): >>> result = self.send_request(endpoint="test_model_sklearn", version="1", data={"x0": x0, "x1": x1}) >>> y = result["y"] """ - return None + pass diff --git a/clearml_serving/serving/endpoints.py b/clearml_serving/serving/endpoints.py index e1cd9d4..67eaa61 100644 --- a/clearml_serving/serving/endpoints.py +++ b/clearml_serving/serving/endpoints.py @@ -1,5 +1,5 @@ import numpy as np -from attr import attrib, attrs, asdict +from attr import attrib, attrs, asdict, validators def _engine_validator(inst, attr, value): # noqa @@ -14,7 +14,15 @@ def _matrix_type_validator(inst, attr, value): # noqa @attrs -class ModelMonitoring(object): +class BaseStruct(object): + def as_dict(self, remove_null_entries=False): + if not remove_null_entries: + return asdict(self) + return {k: v for k, v in asdict(self).items() if v is not None} + + +@attrs +class ModelMonitoring(BaseStruct): base_serving_url = attrib(type=str) # serving point url prefix (example: "detect_cat") engine_type = attrib(type=str, validator=_engine_validator) # engine type monitor_project = attrib(type=str, default=None) # monitor model project (for model auto update) @@ -32,14 +40,9 @@ class ModelMonitoring(object): type=str, default=None) # optional artifact name storing the model preprocessing code auxiliary_cfg = attrib(type=dict, default=None) # Auxiliary configuration (e.g. triton conf), Union[str, dict] - def as_dict(self, remove_null_entries=False): - if not remove_null_entries: - return asdict(self) - return {k: v for k, v in asdict(self).items() if v is not None} - @attrs -class ModelEndpoint(object): +class ModelEndpoint(BaseStruct): engine_type = attrib(type=str, validator=_engine_validator) # engine type serving_url = attrib(type=str) # full serving point url (including version) example: "detect_cat/v1" model_id = attrib(type=str, default=None) # model ID to serve (and download) @@ -54,14 +57,9 @@ class ModelEndpoint(object): output_name = attrib(type=str, default=None) # optional, layer name to pull the results from auxiliary_cfg = attrib(type=dict, default=None) # Optional: Auxiliary configuration (e.g. triton conf), [str, dict] - def as_dict(self, remove_null_entries=False): - if not remove_null_entries: - return asdict(self) - return {k: v for k, v in asdict(self).items() if v is not None} - @attrs -class CanaryEP(object): +class CanaryEP(BaseStruct): endpoint = attrib(type=str) # load balancer endpoint weights = attrib(type=list) # list of weights (order should be matching fixed_endpoints or prefix) load_endpoints = attrib(type=list, default=[]) # list of endpoints to balance and route @@ -69,7 +67,34 @@ class CanaryEP(object): type=str, default=None) # endpoint prefix to list # (any endpoint starting with this prefix will be listed, sorted lexicographically, or broken into /) + +@attrs +class EndpointMetricLogging(BaseStruct): + @attrs + class MetricType(BaseStruct): + type = attrib(type=str, validator=validators.in_(("scalar", "enum", "value", "counter"))) + buckets = attrib(type=list, default=None) + + endpoint = attrib(type=str) # Specific endpoint to log metrics w/ version (example: "model/1") + # If endpoint name ends with a "*" any endpoint with a matching prefix will be selected + + log_frequency = attrib(type=float, default=None) # Specific endpoint to log frequency + # (0.0 to 1.0, where 1.0 is 100% of all requests are logged) + + metrics = attrib( + type=dict, default={}, + converter=lambda x: {k: v if isinstance(v, EndpointMetricLogging.MetricType) + else EndpointMetricLogging.MetricType(**v) for k, v in x.items()}) # key=variable, value=MetricType) + # example: + # {"x1": dict(type="scalar", buckets=[0,1,2,3]), + # "y": dict(type="enum", buckets=["cat", "dog"]). + # "latency": dict(type="value", buckets=[]). + # } + def as_dict(self, remove_null_entries=False): if not remove_null_entries: - return asdict(self) - return {k: v for k, v in asdict(self).items() if v is not None} + return {k: v.as_dict(remove_null_entries) if isinstance(v, BaseStruct) else v + for k, v in asdict(self).items()} + + return {k: v.as_dict(remove_null_entries) if isinstance(v, BaseStruct) else v + for k, v in asdict(self).items() if v is not None} diff --git a/clearml_serving/serving/entrypoint.sh b/clearml_serving/serving/entrypoint.sh index 84e63fa..2e1bf71 100755 --- a/clearml_serving/serving/entrypoint.sh +++ b/clearml_serving/serving/entrypoint.sh @@ -3,26 +3,53 @@ # print configuration echo CLEARML_SERVING_TASK_ID="$CLEARML_SERVING_TASK_ID" echo CLEARML_SERVING_PORT="$CLEARML_SERVING_PORT" +echo CLEARML_USE_GUNICORN="$CLEARML_USE_GUNICORN" echo EXTRA_PYTHON_PACKAGES="$EXTRA_PYTHON_PACKAGES" echo CLEARML_SERVING_NUM_PROCESS="$CLEARML_SERVING_NUM_PROCESS" echo CLEARML_SERVING_POLL_FREQ="$CLEARML_SERVING_POLL_FREQ" +echo CLEARML_DEFAULT_KAFKA_SERVE_URL="$CLEARML_DEFAULT_KAFKA_SERVE_URL" +echo CLEARML_DEFAULT_KAFKA_SERVE_URL="$CLEARML_DEFAULT_KAFKA_SERVE_URL" +SERVING_PORT="${CLEARML_SERVING_PORT:-8080}" GUNICORN_NUM_PROCESS="${CLEARML_SERVING_NUM_PROCESS:-4}" -GUNICORN_SERVING_PORT="${CLEARML_SERVING_PORT:-8080}" +GUNICORN_SERVING_TIMEOUT="${GUNICORN_SERVING_TIMEOUT:-600}" +UVICORN_SERVE_LOOP="${UVICORN_SERVE_LOOP:-asyncio}" +# set default internal serve endpoint (for request pipelining) +CLEARML_DEFAULT_BASE_SERVE_URL="${CLEARML_DEFAULT_BASE_SERVE_URL:-http://127.0.0.1:$SERVING_PORT/serve}" +CLEARML_DEFAULT_TRITON_GRPC_ADDR="${CLEARML_DEFAULT_TRITON_GRPC_ADDR:-127.0.0.1:8001}" + +# print configuration +echo WEB_CONCURRENCY="$WEB_CONCURRENCY" +echo SERVING_PORT="$SERVING_PORT" echo GUNICORN_NUM_PROCESS="$GUNICORN_NUM_PROCESS" -echo GUNICORN_SERVING_PORT="$GUNICORN_SERVING_PORT" - -# we should also have clearml-server configurations +echo GUNICORN_SERVING_TIMEOUT="$GUNICORN_SERVING_PORT" +echo GUNICORN_EXTRA_ARGS="$GUNICORN_EXTRA_ARGS" +echo UVICORN_SERVE_LOOP="$UVICORN_SERVE_LOOP" +echo UVICORN_EXTRA_ARGS="$UVICORN_EXTRA_ARGS" +echo CLEARML_DEFAULT_BASE_SERVE_URL="$CLEARML_DEFAULT_BASE_SERVE_URL" +echo CLEARML_DEFAULT_TRITON_GRPC_ADDR="$CLEARML_DEFAULT_TRITON_GRPC_ADDR" +# runtime add extra python packages if [ ! -z "$EXTRA_PYTHON_PACKAGES" ] then python3 -m pip install $EXTRA_PYTHON_PACKAGES fi -# start service -PYTHONPATH=$(pwd) python3 -m gunicorn \ - --preload clearml_serving.serving.main:app \ - --workers $GUNICORN_NUM_PROCESS \ - --worker-class uvicorn.workers.UvicornWorker \ - --bind 0.0.0.0:$GUNICORN_SERVING_PORT +if [ -z "$CLEARML_USE_GUNICORN" ] +then + echo "Starting Uvicorn server" + PYTHONPATH=$(pwd) python3 -m uvicorn \ + clearml_serving.serving.main:app --host 0.0.0.0 --port $SERVING_PORT --loop $UVICORN_SERVE_LOOP \ + $UVICORN_EXTRA_ARGS +else + echo "Starting Gunicorn server" + # start service + PYTHONPATH=$(pwd) python3 -m gunicorn \ + --preload clearml_serving.serving.main:app \ + --workers $GUNICORN_NUM_PROCESS \ + --worker-class uvicorn.workers.UvicornWorker \ + --timeout $GUNICORN_SERVING_TIMEOUT \ + --bind 0.0.0.0:$SERVING_PORT \ + $GUNICORN_EXTRA_ARGS +fi diff --git a/clearml_serving/serving/main.py b/clearml_serving/serving/main.py index 2083ebd..f3473e7 100644 --- a/clearml_serving/serving/main.py +++ b/clearml_serving/serving/main.py @@ -47,12 +47,16 @@ except (ValueError, TypeError): # get the serving controller task # noinspection PyProtectedMember serving_task = ModelRequestProcessor._get_control_plane_task(task_id=serving_service_task_id) +# set to running (because we are here) +if serving_task.status != "in_progress": + serving_task.started(force=True) # create a new serving instance (for visibility and monitoring) instance_task = Task.init( project_name=serving_task.get_project_name(), task_name="{} - serve instance".format(serving_task.name), task_type="inference", ) +instance_task.set_system_tags(["service"]) processor = None # type: Optional[ModelRequestProcessor] # preload modules into memory before forking BasePreprocessRequest.load_modules() diff --git a/clearml_serving/serving/model_request_processor.py b/clearml_serving/serving/model_request_processor.py index 0186ebd..5d7d271 100644 --- a/clearml_serving/serving/model_request_processor.py +++ b/clearml_serving/serving/model_request_processor.py @@ -1,7 +1,9 @@ import json import os from pathlib import Path -from time import sleep +from queue import Queue +from random import random +from time import sleep, time from typing import Optional, Union, Dict, List import itertools import threading @@ -11,7 +13,7 @@ from numpy.random import choice from clearml import Task, Model from clearml.storage.util import hash_dict from .preprocess_service import BasePreprocessRequest -from .endpoints import ModelEndpoint, ModelMonitoring, CanaryEP +from .endpoints import ModelEndpoint, ModelMonitoring, CanaryEP, EndpointMetricLogging class FastWriteCounter(object): @@ -30,7 +32,12 @@ class FastWriteCounter(object): class ModelRequestProcessor(object): - _system_tag = 'serving-control-plane' + _system_tag = "serving-control-plane" + _kafka_topic = "clearml_inference_stats" + _config_key_serving_base_url = "serving_base_url" + _config_key_triton_grpc = "triton_grpc_server" + _config_key_kafka_stats = "kafka_service_server" + _config_key_def_metric_freq = "metric_logging_freq" def __init__( self, @@ -60,15 +67,24 @@ class ModelRequestProcessor(object): self._canary_endpoints = dict() # type: Dict[str, CanaryEP] self._canary_route = dict() # type: Dict[str, dict] self._engine_processor_lookup = dict() # type: Dict[str, BasePreprocessRequest] + self._metric_logging = dict() # type: Dict[str, EndpointMetricLogging] + self._endpoint_metric_logging = dict() # type: Dict[str, EndpointMetricLogging] self._last_update_hash = None self._sync_daemon_thread = None + self._stats_sending_thread = None + self._stats_queue = Queue() # this is used for Fast locking mechanisms (so we do not actually need to use Locks) self._update_lock_flag = False self._request_processing_state = FastWriteCounter() self._update_lock_guard = update_lock_guard or threading.Lock() + self._instance_task = None # serving server config self._configuration = {} - self._instance_task = None + # deserialized values go here + self._kafka_stats_url = None + self._triton_grpc = None + self._serving_base_url = None + self._metric_log_freq = None def process_request(self, base_url: str, version: str, request_body: dict) -> dict: """ @@ -100,9 +116,7 @@ class ModelRequestProcessor(object): processor = self._engine_processor_lookup.get(url) if not processor: processor_cls = BasePreprocessRequest.get_engine_cls(ep.engine_type) - processor = processor_cls( - model_endpoint=ep, task=self._task, server_config=dict(**self._configuration) - ) + processor = processor_cls(model_endpoint=ep, task=self._task) self._engine_processor_lookup[url] = processor return_value = self._process_request(processor=processor, url=url, body=request_body) @@ -124,6 +138,8 @@ class ModelRequestProcessor(object): self, external_serving_base_url: Optional[str] = None, external_triton_grpc_server: Optional[str] = None, + external_kafka_service_server: Optional[str] = None, + default_metric_log_freq: Optional[float] = None, ): """ Set ModelRequestProcessor configuration arguments. @@ -133,21 +149,40 @@ class ModelRequestProcessor(object): allowing it to concatenate and combine multiple model requests into one :param external_triton_grpc_server: set the external grpc tcp port of the Nvidia Triton clearml container. Used by the clearml triton engine class to send inference requests + :param external_kafka_service_server: Optional, Kafka endpoint for the statistics controller collection. + :param default_metric_log_freq: Default request metric logging (0 to 1.0, 1. means 100% of requests are logged) """ if external_serving_base_url is not None: self._task.set_parameter( - name="General/serving_base_url", + name="General/{}".format(self._config_key_serving_base_url), value=str(external_serving_base_url), value_type="str", description="external base http endpoint for the serving service" ) if external_triton_grpc_server is not None: self._task.set_parameter( - name="General/triton_grpc_server", + name="General/{}".format(self._config_key_triton_grpc), value=str(external_triton_grpc_server), value_type="str", description="external grpc tcp port of the Nvidia Triton ClearML container running" ) + if external_kafka_service_server is not None: + self._task.set_parameter( + name="General/{}".format(self._config_key_kafka_stats), + value=str(external_kafka_service_server), + value_type="str", + description="external Kafka service url for the statistics controller server" + ) + if default_metric_log_freq is not None: + self._task.set_parameter( + name="General/{}".format(self._config_key_def_metric_freq), + value=str(default_metric_log_freq), + value_type="float", + description="Request metric logging frequency" + ) + + def get_configuration(self) -> dict: + return dict(**self._configuration) def add_endpoint( self, @@ -301,13 +336,92 @@ class ModelRequestProcessor(object): self._canary_endpoints.pop(endpoint_url, None) return True - def deserialize(self, task: Task = None, prefetch_artifacts=False, skip_sync=False) -> bool: + def add_metric_logging(self, metric: Union[EndpointMetricLogging, dict]) -> bool: + """ + Add metric logging to a specific endpoint + Valid metric variable are any variables on the request or response dictionary, + or a custom preprocess reported variable + + When overwriting and existing monitored variable, output a warning. + + :param metric: Metric variable to monitor + :return: True if successful + """ + if not isinstance(metric, EndpointMetricLogging): + metric = EndpointMetricLogging(**metric) + + name = str(metric.endpoint).strip("/") + metric.endpoint = name + + if name not in self._endpoints and not name.endswith('*'): + raise ValueError("Metric logging \'{}\' references a nonexistent endpoint".format(name)) + + if name in self._metric_logging: + print("Warning: Metric logging \'{}\' overwritten".format(name)) + + self._metric_logging[name] = metric + return True + + def remove_metric_logging( + self, + endpoint: str, + variable_name: str = None, + ) -> bool: + """ + Remove existing logged metric variable. Use variable name and endpoint as unique identifier + + :param endpoint: Endpoint name (including version, e.g. "model/1" or "model/*") + :param variable_name: Variable name (str), pass None to remove the entire endpoint logging + + :return: True if successful + """ + + name = str(endpoint).strip("/") + + if name not in self._metric_logging or \ + (variable_name and variable_name not in self._metric_logging[name].metrics): + return False + + if not variable_name: + self._metric_logging.pop(name, None) + else: + self._metric_logging[name].metrics.pop(variable_name, None) + + return True + + def list_metric_logging(self) -> Dict[str, EndpointMetricLogging]: + """ + List existing logged metric variables. + + :return: Dictionary, key='endpoint/version' value=EndpointMetricLogging + """ + + return dict(**self._metric_logging) + + def list_endpoint_logging(self) -> Dict[str, EndpointMetricLogging]: + """ + List endpoints (fully synced) current metric logging state. + + :return: Dictionary, key='endpoint/version' value=EndpointMetricLogging + """ + + return dict(**self._endpoint_metric_logging) + + def deserialize( + self, + task: Task = None, + prefetch_artifacts: bool = False, + skip_sync: bool = False, + update_current_task: bool = True + ) -> bool: """ Restore ModelRequestProcessor state from Task return True if actually needed serialization, False nothing changed :param task: Load data from Task :param prefetch_artifacts: If True prefetch artifacts requested by the endpoints :param skip_sync: If True do not update the canary/monitoring state + :param update_current_task: is not skip_sync, and is True, + update the current Task with the configuration synced from the serving service Task """ if not task: task = self._task @@ -315,7 +429,15 @@ class ModelRequestProcessor(object): endpoints = task.get_configuration_object_as_dict(name='endpoints') or {} canary_ep = task.get_configuration_object_as_dict(name='canary') or {} model_monitoring = task.get_configuration_object_as_dict(name='model_monitoring') or {} - hashed_conf = hash_dict(dict(endpoints=endpoints, canary_ep=canary_ep, model_monitoring=model_monitoring)) + metric_logging = task.get_configuration_object_as_dict(name='metric_logging') or {} + + hashed_conf = hash_dict( + dict(endpoints=endpoints, + canary_ep=canary_ep, + model_monitoring=model_monitoring, + metric_logging=metric_logging, + configuration=configuration) + ) if self._last_update_hash == hashed_conf and not self._model_monitoring_update_request: return False print("Info: syncing model endpoint configuration, state hash={}".format(hashed_conf)) @@ -333,13 +455,18 @@ class ModelRequestProcessor(object): k: CanaryEP(**{i: j for i, j in v.items() if hasattr(CanaryEP.__attrs_attrs__, i)}) for k, v in canary_ep.items() } + metric_logging = { + k: EndpointMetricLogging(**{i: j for i, j in v.items() if hasattr(EndpointMetricLogging.__attrs_attrs__, i)}) + for k, v in metric_logging.items() + } # if there is no need to sync Canary and Models we can just leave if skip_sync: self._endpoints = endpoints self._model_monitoring = model_monitoring self._canary_endpoints = canary_endpoints - self._configuration = configuration + self._metric_logging = metric_logging + self._deserialize_conf_dict(configuration) return True # make sure we only have one stall request at any given moment @@ -366,18 +493,21 @@ class ModelRequestProcessor(object): self._endpoints = endpoints self._model_monitoring = model_monitoring self._canary_endpoints = canary_endpoints - self._configuration = configuration + self._metric_logging = metric_logging + self._deserialize_conf_dict(configuration) # if we have models we need to sync, now is the time self._sync_monitored_models() self._update_canary_lookup() + self._sync_metric_logging() + # release stall lock self._update_lock_flag = False # update the state on the inference task - if Task.current_task() and Task.current_task().id != self._task.id: + if update_current_task and Task.current_task() and Task.current_task().id != self._task.id: self.serialize(task=Task.current_task()) return True @@ -394,6 +524,8 @@ class ModelRequestProcessor(object): task.set_configuration_object(name='canary', config_dict=config_dict) config_dict = {k: v.as_dict(remove_null_entries=True) for k, v in self._model_monitoring.items()} task.set_configuration_object(name='model_monitoring', config_dict=config_dict) + config_dict = {k: v.as_dict(remove_null_entries=True) for k, v in self._metric_logging.items()} + task.set_configuration_object(name='metric_logging', config_dict=config_dict) def _update_canary_lookup(self): canary_route = {} @@ -547,6 +679,32 @@ class ModelRequestProcessor(object): ) return True + def _sync_metric_logging(self, force: bool = False) -> bool: + if not force and not self._metric_logging: + return False + + fixed_metric_endpoint = { + k: v for k, v in self._metric_logging.items() if "*/" not in k + } + prefix_metric_endpoint = {k.split("*/")[0]: v for k, v in self._metric_logging.items() if "*/" in k} + + endpoint_metric_logging = {} + for k, ep in list(self._endpoints.items()) + list(self._model_monitoring_endpoints.items()): + if k in fixed_metric_endpoint: + if k not in endpoint_metric_logging: + endpoint_metric_logging[k] = fixed_metric_endpoint[k] + + continue + for p, v in prefix_metric_endpoint.items(): + if k.startswith(p): + if k not in endpoint_metric_logging: + endpoint_metric_logging[k] = v + + break + + self._endpoint_metric_logging = endpoint_metric_logging + return True + def launch(self, poll_frequency_sec=300): """ Launch the background synchronization thread and monitoring thread @@ -572,10 +730,15 @@ class ModelRequestProcessor(object): return self._sync_daemon_thread = threading.Thread( target=self._sync_daemon, args=(poll_frequency_sec, ), daemon=True) + self._stats_sending_thread = threading.Thread( + target=self._stats_send_loop, daemon=True) + self._sync_daemon_thread.start() + self._stats_sending_thread.start() + # we return immediately - def _sync_daemon(self, poll_frequency_sec=300): + def _sync_daemon(self, poll_frequency_sec: float = 300) -> None: """ Background thread, syncing model changes into request service. """ @@ -617,6 +780,44 @@ class ModelRequestProcessor(object): except Exception as ex: print("Exception occurred in monitoring thread: {}".format(ex)) + def _stats_send_loop(self) -> None: + """ + Background thread for sending stats to Kafka service + """ + if not self._kafka_stats_url: + print("No Kafka Statistics service configured, shutting down statistics report") + return + + print("Starting Kafka Statistics reporting: {}".format(self._kafka_stats_url)) + + from kafka import KafkaProducer # noqa + + while True: + try: + producer = KafkaProducer( + bootstrap_servers=self._kafka_stats_url, # ['localhost:9092'], + value_serializer=lambda x: json.dumps(x).encode('utf-8'), + compression_type='lz4', # requires python lz4 package + ) + break + except Exception as ex: + print("Error: failed opening Kafka consumer [{}]: {}".format(self._kafka_stats_url, ex)) + print("Retrying in 30 seconds") + sleep(30) + + while True: + try: + stats_dict = self._stats_queue.get(block=True) + except Exception as ex: + print("Warning: Statistics thread exception: {}".format(ex)) + break + # send into kafka service + try: + producer.send(self._kafka_topic, value=stats_dict).get() + except Exception as ex: + print("Warning: Failed to send statistics packet to Kafka service: {}".format(ex)) + pass + def get_id(self) -> str: return self._task.id @@ -781,12 +982,76 @@ class ModelRequestProcessor(object): self._instance_task.get_logger().report_plotly( title='Serving Endpoints Layout', series='', iteration=0, figure=fig) - @staticmethod - def _process_request(processor: BasePreprocessRequest, url: str, body: dict) -> dict: - # todo: add some statistics - preprocessed = processor.preprocess(body) - processed = processor.process(data=preprocessed) - return processor.postprocess(data=processed) + def _deserialize_conf_dict(self, configuration: dict) -> None: + self._configuration = configuration + + # deserialized values go here + self._kafka_stats_url = \ + configuration.get(self._config_key_kafka_stats) or \ + os.environ.get("CLEARML_DEFAULT_KAFKA_SERVE_URL") + self._triton_grpc = \ + configuration.get(self._config_key_triton_grpc) or \ + os.environ.get("CLEARML_DEFAULT_TRITON_GRPC_ADDR") + self._serving_base_url = \ + configuration.get(self._config_key_serving_base_url) or \ + os.environ.get("CLEARML_DEFAULT_BASE_SERVE_URL") + self._metric_log_freq = \ + float(configuration.get(self._config_key_def_metric_freq, + os.environ.get("CLEARML_DEFAULT_METRIC_LOG_FREQ", 1.0))) + # update back configuration + self._configuration[self._config_key_kafka_stats] = self._kafka_stats_url + self._configuration[self._config_key_triton_grpc] = self._triton_grpc + self._configuration[self._config_key_serving_base_url] = self._serving_base_url + self._configuration[self._config_key_def_metric_freq] = self._metric_log_freq + # update preprocessing classes + BasePreprocessRequest.set_server_config(self._configuration) + + def _process_request(self, processor: BasePreprocessRequest, url: str, body: dict) -> dict: + # collect statistics for this request + stats = {} + stats_collect_fn = None + collect_stats = False + freq = 1 + # decide if we are collecting the stats + metric_endpoint = self._metric_logging.get(url) + if self._kafka_stats_url: + freq = metric_endpoint.log_frequency if metric_endpoint and metric_endpoint.log_frequency is not None \ + else self._metric_log_freq + + if freq and random() <= freq: + stats_collect_fn = stats.update + collect_stats = True + + tic = time() + preprocessed = processor.preprocess(body, stats_collect_fn) + processed = processor.process(preprocessed, stats_collect_fn) + return_value = processor.postprocess(processed, stats_collect_fn) + tic = time() - tic + if collect_stats: + # 10th of a millisecond should be enough + stats['_latency'] = round(tic, 4) + stats['_count'] = int(1.0/freq) + stats['_url'] = url + + # collect inputs + if metric_endpoint and body: + for k, v in body.items(): + if k in metric_endpoint.metrics: + stats[k] = v + # collect outputs + if metric_endpoint and return_value: + for k, v in return_value.items(): + if k in metric_endpoint.metrics: + stats[k] = v + + # send stats in background, push it into a thread queue + # noinspection PyBroadException + try: + self._stats_queue.put(stats, block=False) + except Exception: + pass + + return return_value @classmethod def list_control_plane_tasks( @@ -894,4 +1159,3 @@ class ModelRequestProcessor(object): if not endpoint.auxiliary_cfg and missing: raise ValueError("Triton engine requires input description - missing values in {}".format(missing)) return True - diff --git a/clearml_serving/serving/preprocess_service.py b/clearml_serving/serving/preprocess_service.py index 44e8428..0f1195c 100644 --- a/clearml_serving/serving/preprocess_service.py +++ b/clearml_serving/serving/preprocess_service.py @@ -14,13 +14,13 @@ class BasePreprocessRequest(object): __preprocessing_lookup = {} __preprocessing_modules = set() _default_serving_base_url = "http://127.0.0.1:8080/serve/" + _server_config = {} # externally configured by the serving inference service _timeout = None # timeout in seconds for the entire request, set in __init__ def __init__( self, model_endpoint: ModelEndpoint, task: Task = None, - server_config: dict = None, ): """ Notice this object is not be created per request, but once per Process @@ -29,71 +29,102 @@ class BasePreprocessRequest(object): self.model_endpoint = model_endpoint self._preprocess = None self._model = None - self._server_config = server_config or {} if self._timeout is None: self._timeout = int(float(os.environ.get('GUNICORN_SERVING_TIMEOUT', 600)) * 0.8) + # load preprocessing code here if self.model_endpoint.preprocess_artifact: if not task or self.model_endpoint.preprocess_artifact not in task.artifacts: - print("Warning: could not find preprocessing artifact \'{}\' on Task id={}".format( + raise ValueError("Error: could not find preprocessing artifact \'{}\' on Task id={}".format( self.model_endpoint.preprocess_artifact, task.id)) else: try: - path = task.artifacts[self.model_endpoint.preprocess_artifact].get_local_copy() - # check file content hash, should only happens once?! - # noinspection PyProtectedMember - file_hash, _ = sha256sum(path, block_size=Artifacts._hash_block_size) - if file_hash != task.artifacts[self.model_endpoint.preprocess_artifact].hash: - print("INFO: re-downloading artifact '{}' hash changed".format( - self.model_endpoint.preprocess_artifact)) - path = task.artifacts[self.model_endpoint.preprocess_artifact].get_local_copy( - extract_archive=True, - force_download=True, - ) - else: - # extract zip if we need to, otherwise it will be the same - path = task.artifacts[self.model_endpoint.preprocess_artifact].get_local_copy( - extract_archive=True, - ) - - import importlib.util - spec = importlib.util.spec_from_file_location("Preprocess", path) - _preprocess = importlib.util.module_from_spec(spec) - spec.loader.exec_module(_preprocess) - Preprocess = _preprocess.Preprocess # noqa - # override `send_request` method - Preprocess.send_request = BasePreprocessRequest._preprocess_send_request - self._preprocess = Preprocess() - self._preprocess.serving_config = server_config or {} - if callable(getattr(self._preprocess, 'load', None)): - self._model = self._preprocess.load(self._get_local_model_file()) + self._instantiate_custom_preprocess_cls(task) except Exception as ex: - print("Warning: Failed loading preprocess code for \'{}\': {}".format( + raise ValueError("Error: Failed loading preprocess code for \'{}\': {}".format( self.model_endpoint.preprocess_artifact, ex)) - def preprocess(self, request): - # type: (dict) -> Optional[Any] + def _instantiate_custom_preprocess_cls(self, task: Task) -> None: + path = task.artifacts[self.model_endpoint.preprocess_artifact].get_local_copy() + # check file content hash, should only happens once?! + # noinspection PyProtectedMember + file_hash, _ = sha256sum(path, block_size=Artifacts._hash_block_size) + if file_hash != task.artifacts[self.model_endpoint.preprocess_artifact].hash: + print("INFO: re-downloading artifact '{}' hash changed".format( + self.model_endpoint.preprocess_artifact)) + path = task.artifacts[self.model_endpoint.preprocess_artifact].get_local_copy( + extract_archive=True, + force_download=True, + ) + else: + # extract zip if we need to, otherwise it will be the same + path = task.artifacts[self.model_endpoint.preprocess_artifact].get_local_copy( + extract_archive=True, + ) + + import importlib.util + spec = importlib.util.spec_from_file_location("Preprocess", path) + _preprocess = importlib.util.module_from_spec(spec) + spec.loader.exec_module(_preprocess) + Preprocess = _preprocess.Preprocess # noqa + # override `send_request` method + Preprocess.send_request = BasePreprocessRequest._preprocess_send_request + # create preprocess class + self._preprocess = Preprocess() + # custom model load callback function + if callable(getattr(self._preprocess, 'load', None)): + self._model = self._preprocess.load(self._get_local_model_file()) + + def preprocess(self, request: dict, collect_custom_statistics_fn: Callable[[dict], None] = None) -> Optional[Any]: """ Raise exception to report an error Return value will be passed to serving engine + + :param request: dictionary as recieved from the RestAPI + :param collect_custom_statistics_fn: Optional, allows to send a custom set of key/values + to the statictics collector servicd + + Usage example: + >>> print(request) + {"x0": 1, "x1": 2} + >>> collect_custom_statistics_fn({"x0": 1, "x1": 2}) + + :return: Object to be passed directly to the model inference """ if self._preprocess is not None and hasattr(self._preprocess, 'preprocess'): - return self._preprocess.preprocess(request) + return self._preprocess.preprocess(request, collect_custom_statistics_fn) return request - def postprocess(self, data): - # type: (Any) -> Optional[dict] + def postprocess(self, data: Any, collect_custom_statistics_fn: Callable[[dict], None] = None) -> Optional[dict]: """ Raise exception to report an error Return value will be passed to serving engine + + :param data: object as recieved from the inference model function + :param collect_custom_statistics_fn: Optional, allows to send a custom set of key/values + to the statictics collector servicd + + Usage example: + >>> collect_custom_statistics_fn({"y": 1}) + + :return: Dictionary passed directly as the returned result of the RestAPI """ if self._preprocess is not None and hasattr(self._preprocess, 'postprocess'): - return self._preprocess.postprocess(data) + return self._preprocess.postprocess(data, collect_custom_statistics_fn) return data - def process(self, data: Any) -> Any: + def process(self, data: Any, collect_custom_statistics_fn: Callable[[dict], None] = None) -> Any: """ The actual processing function. Can be send to external service + + :param data: object as recieved from the preprocessing function + :param collect_custom_statistics_fn: Optional, allows to send a custom set of key/values + to the statictics collector servicd + + Usage example: + >>> collect_custom_statistics_fn({"type": "classification"}) + + :return: Object to be passed tp the post-processing function """ pass @@ -101,6 +132,14 @@ class BasePreprocessRequest(object): model_repo_object = Model(model_id=self.model_endpoint.model_id) return model_repo_object.get_local_copy() + @classmethod + def set_server_config(cls, server_config: dict) -> None: + cls._server_config = server_config + + @classmethod + def get_server_config(cls) -> dict: + return cls._server_config + @classmethod def validate_engine_type(cls, engine: str) -> bool: return engine in cls.__preprocessing_lookup @@ -137,7 +176,7 @@ class BasePreprocessRequest(object): @staticmethod def _preprocess_send_request(self, endpoint: str, version: str = None, data: dict = None) -> Optional[dict]: endpoint = "{}/{}".format(endpoint.strip("/"), version.strip("/")) if version else endpoint.strip("/") - base_url = self.serving_config.get("base_serving_url") if self.serving_config else None + base_url = BasePreprocessRequest.get_server_config().get("base_serving_url") base_url = (base_url or BasePreprocessRequest._default_serving_base_url).strip("/") url = "{}/{}".format(base_url, endpoint.strip("/")) return_value = request_post(url, json=data, timeout=BasePreprocessRequest._timeout) @@ -159,40 +198,50 @@ class TritonPreprocessRequest(BasePreprocessRequest): np.float32: 'fp32_contents', np.float64: 'fp64_contents', } + _default_grpc_address = "127.0.0.1:8001" _ext_grpc = None _ext_np_to_triton_dtype = None _ext_service_pb2 = None _ext_service_pb2_grpc = None - def __init__(self, model_endpoint: ModelEndpoint, task: Task = None, server_config: dict = None): + def __init__(self, model_endpoint: ModelEndpoint, task: Task = None): super(TritonPreprocessRequest, self).__init__( - model_endpoint=model_endpoint, task=task, server_config=server_config) + model_endpoint=model_endpoint, task=task) # load Triton Module if self._ext_grpc is None: - import grpc + import grpc # noqa self._ext_grpc = grpc if self._ext_np_to_triton_dtype is None: - from tritonclient.utils import np_to_triton_dtype + from tritonclient.utils import np_to_triton_dtype # noqa self._ext_np_to_triton_dtype = np_to_triton_dtype if self._ext_service_pb2 is None: - from tritonclient.grpc import service_pb2, service_pb2_grpc + from tritonclient.grpc import service_pb2, service_pb2_grpc # noqa self._ext_service_pb2 = service_pb2 self._ext_service_pb2_grpc = service_pb2_grpc - def process(self, data: Any) -> Any: + def process(self, data: Any, collect_custom_statistics_fn: Callable[[dict], None] = None) -> Any: """ The actual processing function. Detect gRPC server and send the request to it + + :param data: object as recieved from the preprocessing function + :param collect_custom_statistics_fn: Optional, allows to send a custom set of key/values + to the statictics collector servicd + + Usage example: + >>> collect_custom_statistics_fn({"type": "classification"}) + + :return: Object to be passed tp the post-processing function """ # allow to override bt preprocessing class if self._preprocess is not None and hasattr(self._preprocess, "process"): - return self._preprocess.process(data) + return self._preprocess.process(data, collect_custom_statistics_fn) # Create gRPC stub for communicating with the server - triton_server_address = self._server_config.get("triton_grpc_server") + triton_server_address = self._server_config.get("triton_grpc_server") or self._default_grpc_address if not triton_server_address: raise ValueError("External Triton gRPC server is not configured!") try: @@ -255,15 +304,15 @@ class TritonPreprocessRequest(BasePreprocessRequest): @BasePreprocessRequest.register_engine("sklearn", modules=["joblib", "sklearn"]) class SKLearnPreprocessRequest(BasePreprocessRequest): - def __init__(self, model_endpoint: ModelEndpoint, task: Task = None, server_config: dict = None): + def __init__(self, model_endpoint: ModelEndpoint, task: Task = None): super(SKLearnPreprocessRequest, self).__init__( - model_endpoint=model_endpoint, task=task, server_config=server_config) + model_endpoint=model_endpoint, task=task) if self._model is None: # get model - import joblib + import joblib # noqa self._model = joblib.load(filename=self._get_local_model_file()) - def process(self, data: Any) -> Any: + def process(self, data: Any, collect_custom_statistics_fn: Callable[[dict], None] = None) -> Any: """ The actual processing function. We run the model in this context @@ -273,16 +322,16 @@ class SKLearnPreprocessRequest(BasePreprocessRequest): @BasePreprocessRequest.register_engine("xgboost", modules=["xgboost"]) class XGBoostPreprocessRequest(BasePreprocessRequest): - def __init__(self, model_endpoint: ModelEndpoint, task: Task = None, server_config: dict = None): + def __init__(self, model_endpoint: ModelEndpoint, task: Task = None): super(XGBoostPreprocessRequest, self).__init__( - model_endpoint=model_endpoint, task=task, server_config=server_config) + model_endpoint=model_endpoint, task=task) if self._model is None: # get model - import xgboost + import xgboost # noqa self._model = xgboost.Booster() self._model.load_model(self._get_local_model_file()) - def process(self, data: Any) -> Any: + def process(self, data: Any, collect_custom_statistics_fn: Callable[[dict], None] = None) -> Any: """ The actual processing function. We run the model in this context @@ -292,15 +341,15 @@ class XGBoostPreprocessRequest(BasePreprocessRequest): @BasePreprocessRequest.register_engine("lightgbm", modules=["lightgbm"]) class LightGBMPreprocessRequest(BasePreprocessRequest): - def __init__(self, model_endpoint: ModelEndpoint, task: Task = None, server_config: dict = None): + def __init__(self, model_endpoint: ModelEndpoint, task: Task = None): super(LightGBMPreprocessRequest, self).__init__( - model_endpoint=model_endpoint, task=task, server_config=server_config) + model_endpoint=model_endpoint, task=task) if self._model is None: # get model - import lightgbm + import lightgbm # noqa self._model = lightgbm.Booster(model_file=self._get_local_model_file()) - def process(self, data: Any) -> Any: + def process(self, data: Any, collect_custom_statistics_fn: Callable[[dict], None] = None) -> Any: """ The actual processing function. We run the model in this context @@ -310,15 +359,15 @@ class LightGBMPreprocessRequest(BasePreprocessRequest): @BasePreprocessRequest.register_engine("custom") class CustomPreprocessRequest(BasePreprocessRequest): - def __init__(self, model_endpoint: ModelEndpoint, task: Task = None, server_config: dict = None): + def __init__(self, model_endpoint: ModelEndpoint, task: Task = None): super(CustomPreprocessRequest, self).__init__( - model_endpoint=model_endpoint, task=task, server_config=server_config) + model_endpoint=model_endpoint, task=task) - def process(self, data: Any) -> Any: + def process(self, data: Any, collect_custom_statistics_fn: Callable[[dict], None] = None) -> Any: """ The actual processing function. We run the process in this context """ if self._preprocess is not None and hasattr(self._preprocess, 'process'): - return self._preprocess.process(data) + return self._preprocess.process(data, collect_custom_statistics_fn) return None diff --git a/clearml_serving/serving/requirements.txt b/clearml_serving/serving/requirements.txt index 8ab970d..bf350bc 100644 --- a/clearml_serving/serving/requirements.txt +++ b/clearml_serving/serving/requirements.txt @@ -1,17 +1,17 @@ -clearml >= 1.1.6 -attrs -fastapi[all] +clearml>=1.3.1 +attrs>=20.3.0,<21 +fastapi[all]>=0.75.0,<0.76 uvicorn[standard] -gunicorn -pyzmq -asyncio -aiocache -tritonclient[grpc] -numpy -pandas -scikit-learn +gunicorn>=20.1.0,<20.2 +asyncio>=3.4.3,<3.5 +aiocache>=0.11.1,<0.12 +tritonclient[grpc]>=2.18.0,<2.19 +numpy>=1.20,<1.24 +scikit-learn>=1.0.2,<1.1 grpcio -Pillow -xgboost -lightgbm -requests +Pillow>=9.0.1,<10 +xgboost>=1.5.2,<1.6 +lightgbm>=3.3.2,<3.4 +requests>=2.25.1,<2.26 +kafka-python>=2.0.2,<2.1 +lz4>=4.0.0,<5 \ No newline at end of file diff --git a/clearml_serving/statistics/Dockerfile b/clearml_serving/statistics/Dockerfile new file mode 100644 index 0000000..e4e692d --- /dev/null +++ b/clearml_serving/statistics/Dockerfile @@ -0,0 +1,21 @@ +FROM python:3.9-bullseye + + +ENV LC_ALL=C.UTF-8 + +# install base package +RUN pip3 install clearml-serving + +# get latest execution code from the git repository +# RUN cd $HOME && git clone https://github.com/allegroai/clearml-serving.git +COPY clearml_serving /root/clearml/clearml_serving + +RUN pip3 install -r /root/clearml/clearml_serving/statistics/requirements.txt + +# default serving port +EXPOSE 9999 + +# environement variable to load Task from CLEARML_SERVING_TASK_ID, CLEARML_SERVING_PORT + +WORKDIR /root/clearml/ +ENTRYPOINT ["clearml_serving/statistics/entrypoint.sh"] diff --git a/clearml_serving/statistics/__init__.py b/clearml_serving/statistics/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/clearml_serving/statistics/entrypoint.sh b/clearml_serving/statistics/entrypoint.sh new file mode 100755 index 0000000..2ed724b --- /dev/null +++ b/clearml_serving/statistics/entrypoint.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +# print configuration +echo CLEARML_SERVING_TASK_ID="$CLEARML_SERVING_TASK_ID" +echo CLEARML_SERVING_PORT="$CLEARML_SERVING_PORT" +echo EXTRA_PYTHON_PACKAGES="$EXTRA_PYTHON_PACKAGES" +echo CLEARML_SERVING_POLL_FREQ="$CLEARML_SERVING_POLL_FREQ" +echo CLEARML_DEFAULT_KAFKA_SERVE_URL="$CLEARML_DEFAULT_KAFKA_SERVE_URL" + +SERVING_PORT="${CLEARML_SERVING_PORT:-9999}" + +# set default internal serve endpoint (for request pipelining) +CLEARML_DEFAULT_BASE_SERVE_URL="${CLEARML_DEFAULT_BASE_SERVE_URL:-http://127.0.0.1:$SERVING_PORT/serve}" +CLEARML_DEFAULT_TRITON_GRPC_ADDR="${CLEARML_DEFAULT_TRITON_GRPC_ADDR:-127.0.0.1:8001}" + +# print configuration +echo SERVING_PORT="$SERVING_PORT" + +# runtime add extra python packages +if [ ! -z "$EXTRA_PYTHON_PACKAGES" ] +then + python3 -m pip install $EXTRA_PYTHON_PACKAGES +fi + +echo "Starting Statistics Controller server" +PYTHONPATH=$(pwd) python3 clearml_serving/statistics/main.py diff --git a/clearml_serving/statistics/main.py b/clearml_serving/statistics/main.py new file mode 100644 index 0000000..eef9fcb --- /dev/null +++ b/clearml_serving/statistics/main.py @@ -0,0 +1,41 @@ +import os + +import prometheus_client +from clearml import Task + +from clearml_serving.serving.model_request_processor import ModelRequestProcessor +from clearml_serving.statistics.metrics import StatisticsController + + +def main(): + serving_service_task_id = os.environ.get("CLEARML_SERVING_TASK_ID", None) + model_sync_frequency_secs = 5 + try: + model_sync_frequency_secs = float(os.environ.get("CLEARML_SERVING_POLL_FREQ", model_sync_frequency_secs)) + except (ValueError, TypeError): + pass + + # noinspection PyProtectedMember + serving_task = ModelRequestProcessor._get_control_plane_task(task_id=serving_service_task_id) + # create a new serving instance (for visibility and monitoring) + instance_task = Task.init( + project_name=serving_task.get_project_name(), + task_name="{} - statistics controller".format(serving_task.name), + task_type="monitor", + ) + instance_task.set_system_tags(["service"]) + # noinspection PyProtectedMember + kafka_server_url = os.environ.get("CLEARML_DEFAULT_KAFKA_SERVE_URL", "localhost:9092") + stats_controller = StatisticsController( + task=instance_task, + kafka_server_url=kafka_server_url, + serving_id=serving_service_task_id, + poll_frequency_min=model_sync_frequency_secs + ) + prometheus_client.start_http_server(int(os.environ.get("CLEARML_SERVING_PORT", 9999))) + # we will never leave here + stats_controller.start() + + +if __name__ == '__main__': + main() diff --git a/clearml_serving/statistics/metrics.py b/clearml_serving/statistics/metrics.py new file mode 100644 index 0000000..a0d35db --- /dev/null +++ b/clearml_serving/statistics/metrics.py @@ -0,0 +1,352 @@ +import json +import os +import re +from copy import deepcopy +from functools import partial +from threading import Event, Thread +from time import time, sleep + +from clearml import Task +from typing import Optional, Dict, Any, Iterable + +from prometheus_client import Histogram, Enum, Gauge, Counter, values +from kafka import KafkaConsumer +from prometheus_client.metrics import MetricWrapperBase, _validate_exemplar +from prometheus_client.registry import REGISTRY +from prometheus_client.samples import Exemplar, Sample +from prometheus_client.context_managers import Timer +from prometheus_client.utils import floatToGoString + +from ..serving.endpoints import EndpointMetricLogging +from ..serving.model_request_processor import ModelRequestProcessor + + +class ScalarHistogram(Histogram): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def observe(self, amount, exemplar=None): + """Observe the given amount. + + The amount is usually positive or zero. Negative values are + accepted but prevent current versions of Prometheus from + properly detecting counter resets in the sum of + observations. See + https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations + for details. + """ + self._raise_if_not_observable() + self._sum.inc(1) + for i, bound in enumerate(self._upper_bounds): + if amount <= bound: + self._buckets[i].inc(1) + if exemplar: + _validate_exemplar(exemplar) + self._buckets[i].set_exemplar(Exemplar(exemplar, amount, time())) + break + + def _child_samples(self) -> Iterable[Sample]: + samples = [] + for i, bound in enumerate(self._upper_bounds): + acc = self._buckets[i].get() + samples.append( + Sample('_bucket', {'le': floatToGoString(bound)}, acc, None, self._buckets[i].get_exemplar()) + ) + samples.append(Sample('_sum', {'le': floatToGoString(bound)}, self._sum.get(), None, None)) + + return tuple(samples) + + +class EnumHistogram(MetricWrapperBase): + """A Histogram tracks the size and number of events in buckets. + + You can use Histograms for aggregatable calculation of quantiles. + + Example use cases: + - Response latency + - Request size + + Example for a Histogram: + + from prometheus_client import Histogram + + h = Histogram('request_size_bytes', 'Request size (bytes)') + h.observe(512) # Observe 512 (bytes) + + Example for a Histogram using time: + + from prometheus_client import Histogram + + REQUEST_TIME = Histogram('response_latency_seconds', 'Response latency (seconds)') + + @REQUEST_TIME.time() + def create_response(request): + '''A dummy function''' + time.sleep(1) + + Example of using the same Histogram object as a context manager: + + with REQUEST_TIME.time(): + pass # Logic to be timed + + The default buckets are intended to cover a typical web/rpc request from milliseconds to seconds. + They can be overridden by passing `buckets` keyword argument to `Histogram`. + """ + _type = 'histogram' + + def __init__(self, + name, + documentation, + buckets, + labelnames=(), + namespace='', + subsystem='', + unit='', + registry=REGISTRY, + _labelvalues=None, + ): + self._prepare_buckets(buckets) + super().__init__( + name=name, + documentation=documentation, + labelnames=labelnames, + namespace=namespace, + subsystem=subsystem, + unit=unit, + registry=registry, + _labelvalues=_labelvalues, + ) + self._kwargs['buckets'] = buckets + + def _prepare_buckets(self, buckets): + buckets = [str(b) for b in buckets] + if buckets != sorted(buckets): + # This is probably an error on the part of the user, + # so raise rather than sorting for them. + raise ValueError('Buckets not in sorted order') + + if len(buckets) < 2: + raise ValueError('Must have at least two buckets') + self._upper_bounds = buckets + + def _metric_init(self): + self._buckets = {} + self._created = time() + bucket_labelnames = self._upper_bounds + self._sum = values.ValueClass( + self._type, self._name, self._name + '_sum', self._labelnames, self._labelvalues) + for b in self._upper_bounds: + self._buckets[b] = values.ValueClass( + self._type, + self._name, + self._name + '_bucket', + bucket_labelnames, + self._labelvalues + (b,)) + + def observe(self, amount, exemplar=None): + """Observe the given amount. + + The amount is usually positive or zero. Negative values are + accepted but prevent current versions of Prometheus from + properly detecting counter resets in the sum of + observations. See + https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations + for details. + """ + self._raise_if_not_observable() + if not isinstance(amount, (list, tuple)): + amount = [amount] + self._sum.inc(len(amount)) + for v in amount: + self._buckets[v].inc(1) + if exemplar: + _validate_exemplar(exemplar) + self._buckets[v].set_exemplar(Exemplar(exemplar, 1, time())) + + def time(self): + """Time a block of code or function, and observe the duration in seconds. + + Can be used as a function decorator or context manager. + """ + return Timer(self, 'observe') + + def _child_samples(self) -> Iterable[Sample]: + samples = [] + for i in self._buckets: + acc = self._buckets[i].get() + samples.append(Sample( + '_bucket', {'enum': i}, acc, None, self._buckets[i].get_exemplar())) + samples.append(Sample('_sum', {'enum': i}, self._sum.get(), None, None)) + + return tuple(samples) + + +class StatisticsController(object): + _reserved = { + '_latency': partial(ScalarHistogram, buckets=(.005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 5.0)), + '_count': Counter + } + _metric_type_class = {"scalar": ScalarHistogram, "enum": EnumHistogram, "value": Gauge, "counter": Counter} + + def __init__( + self, + task: Task, + kafka_server_url: str, + serving_id: Optional[str], + poll_frequency_min: float = 5 + ): + self.task = task + self._serving_service_task_id = serving_id + self._poll_frequency_min = float(poll_frequency_min) + self._serving_service = None # type: Optional[ModelRequestProcessor] + self._current_endpoints = {} # type: Optional[Dict[str, EndpointMetricLogging]] + self._prometheus_metrics = {} # type: Optional[Dict[str, Dict[str, MetricWrapperBase]]] + self._timestamp = time() + self._sync_thread = None + self._last_sync_time = time() + self._dirty = False + self._sync_event = Event() + self._sync_threshold_sec = 30 + self._kafka_server = kafka_server_url + # noinspection PyProtectedMember + self._kafka_topic = ModelRequestProcessor._kafka_topic + + def start(self): + self._serving_service = ModelRequestProcessor(task_id=self._serving_service_task_id) + + if not self._sync_thread: + self._sync_thread = Thread(target=self._sync_daemon, daemon=True) + self._sync_thread.start() + + # noinspection PyProtectedMember + kafka_server = \ + self._serving_service.get_configuration().get(ModelRequestProcessor._config_key_kafka_stats) or \ + self._kafka_server + + print("Starting Kafka Statistics processing: {}".format(kafka_server)) + + while True: + try: + consumer = KafkaConsumer(self._kafka_topic, bootstrap_servers=kafka_server) + break + except Exception as ex: + print("Error: failed opening Kafka consumer [{}]: {}".format(kafka_server, ex)) + print("Retrying in 30 seconds") + sleep(30) + + # we will never leave this loop + for message in consumer: + # noinspection PyBroadException + try: + data = json.loads(message.value.decode("utf-8")) + except Exception: + print("Warning: failed to decode kafka stats message") + continue + try: + url = data.pop("_url", None) + if not url: + # should not happen + continue + endpoint_metric = self._current_endpoints.get(url) + if not endpoint_metric: + # add default one, we will just log the reserved valued: + endpoint_metric = dict() + self._current_endpoints[url] = EndpointMetricLogging(endpoint=url) + # we should sync, + if time()-self._last_sync_time > self._sync_threshold_sec: + self._last_sync_time = time() + self._sync_event.set() + + metric_url_log = self._prometheus_metrics.get(url) + if not metric_url_log: + # create a new one + metric_url_log = dict() + self._prometheus_metrics[url] = metric_url_log + + # check if we have the prometheus_logger + for k, v in data.items(): + prometheus_logger = metric_url_log.get(k) + if not prometheus_logger: + prometheus_logger = self._create_prometheus_logger_class(url, k, endpoint_metric) + if not prometheus_logger: + continue + metric_url_log[k] = prometheus_logger + + self._report_value(prometheus_logger, v) + + except Exception as ex: + print("Warning: failed to report stat to Prometheus: {}".format(ex)) + continue + + @staticmethod + def _report_value(prometheus_logger: Optional[MetricWrapperBase], v: Any) -> bool: + if not prometheus_logger: + # this means no one configured the variable to log + return False + elif isinstance(prometheus_logger, (Histogram, EnumHistogram)): + prometheus_logger.observe(amount=v) + elif isinstance(prometheus_logger, Gauge): + prometheus_logger.set(value=v) + elif isinstance(prometheus_logger, Counter): + prometheus_logger.inc(amount=v) + elif isinstance(prometheus_logger, Enum): + prometheus_logger.state(state=v) + else: + # we should not get here + return False + + return True + + def _create_prometheus_logger_class( + self, + url: str, + variable_name: str, + endpoint_config: EndpointMetricLogging + ) -> Optional[MetricWrapperBase]: + reserved_cls = self._reserved.get(variable_name) + name = "{}:{}".format(url, variable_name) + name = re.sub(r"[^(a-zA-Z0-9_:)]", "_", name) + if reserved_cls: + return reserved_cls(name=name, documentation="Built in {}".format(variable_name)) + + if not endpoint_config: + # we should not end up here + return None + + metric_ = endpoint_config.metrics.get(variable_name) + if not metric_: + return None + metric_cls = self._metric_type_class.get(metric_.type) + if not metric_cls: + return None + if metric_cls in (Histogram, EnumHistogram): + return metric_cls( + name=name, + documentation="User defined metric {}".format(metric_.type), + buckets=metric_.buckets + ) + return metric_cls(name=name, documentation="User defined metric {}".format(metric_.type)) + + def _sync_daemon(self): + self._last_sync_time = time() + poll_freq_sec = self._poll_frequency_min*60 + print("Instance [{}, pid={}]: Launching - configuration sync every {} sec".format( + self.task.id, os.getpid(), poll_freq_sec)) + while True: + try: + self._serving_service.deserialize() + endpoint_metrics = self._serving_service.list_endpoint_logging() + self._last_sync_time = time() + if self._current_endpoints == endpoint_metrics: + self._sync_event.wait(timeout=poll_freq_sec) + self._sync_event.clear() + continue + + # update metrics: + self._dirty = True + self._current_endpoints = deepcopy(endpoint_metrics) + print("New configuration synced") + except Exception as ex: + print("Warning: failed to sync state from serving service Task: {}".format(ex)) + continue diff --git a/clearml_serving/statistics/requirements.txt b/clearml_serving/statistics/requirements.txt new file mode 100644 index 0000000..1f153d9 --- /dev/null +++ b/clearml_serving/statistics/requirements.txt @@ -0,0 +1,6 @@ +clearml>=1.3.1 +numpy>=1.20,<1.24 +requests>=2.25.1,<2.26 +kafka-python>=2.0.2,<2.1 +prometheus_client>=0.13.1,<0.14 +lz4>=4.0.0,<5 diff --git a/docker/datasource.yml b/docker/datasource.yml new file mode 100644 index 0000000..1e91d21 --- /dev/null +++ b/docker/datasource.yml @@ -0,0 +1,8 @@ +apiVersion: 1 + +datasources: + - name: Prometheus + type: prometheus + # Access mode - proxy (server in the UI) or direct (browser in the UI). + access: proxy + url: http://clearml-serving-prometheus:9090 diff --git a/docker/docker-compose-triton-gpu.yml b/docker/docker-compose-triton-gpu.yml new file mode 100644 index 0000000..e354be5 --- /dev/null +++ b/docker/docker-compose-triton-gpu.yml @@ -0,0 +1,151 @@ +version: "3" + +services: + zookeeper: + image: bitnami/zookeeper:3.7.0 + container_name: clearml-serving-zookeeper + # ports: + # - "2181:2181" + environment: + - ALLOW_ANONYMOUS_LOGIN=yes + networks: + - clearml-serving-backend + + kafka: + image: bitnami/kafka:3.1.0 + container_name: clearml-serving-kafka + # ports: + # - "9092:9092" + environment: + - KAFKA_BROKER_ID=1 + - KAFKA_CFG_LISTENERS=PLAINTEXT://clearml-serving-kafka:9092 + - KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://clearml-serving-kafka:9092 + - KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181 + - ALLOW_PLAINTEXT_LISTENER=yes + - KAFKA_CREATE_TOPICS="topic_test:1:1" + depends_on: + - zookeeper + networks: + - clearml-serving-backend + + prometheus: + image: prom/prometheus:v2.34.0 + container_name: clearml-serving-prometheus + volumes: + - ./prometheus.yml:/prometheus.yml + command: + - '--config.file=/prometheus.yml' + - '--storage.tsdb.path=/prometheus' + - '--web.console.libraries=/etc/prometheus/console_libraries' + - '--web.console.templates=/etc/prometheus/consoles' + - '--storage.tsdb.retention.time=200h' + - '--web.enable-lifecycle' + restart: unless-stopped + # ports: + # - "9090:9090" + depends_on: + - clearml-serving-statistics + networks: + - clearml-serving-backend + + alertmanager: + image: prom/alertmanager:v0.23.0 + container_name: clearml-serving-alertmanager + restart: unless-stopped + # ports: + # - "9093:9093" + depends_on: + - prometheus + - grafana + networks: + - clearml-serving-backend + + grafana: + image: grafana/grafana:8.4.4-ubuntu + container_name: clearml-serving-grafana + volumes: + - './datasource.yml:/etc/grafana/provisioning/datasources/datasource.yaml' + restart: unless-stopped + ports: + - "3000:3000" + depends_on: + - prometheus + networks: + - clearml-serving-backend + + + clearml-serving-inference: + image: allegroai/clearml-serving-inference:latest + container_name: clearml-serving-inference + restart: unless-stopped + ports: + - "8080:8080" + environment: + CLEARML_WEB_HOST: ${CLEARML_WEB_HOST:-https://app.clear.ml} + CLEARML_API_HOST: ${CLEARML_API_HOST:-"https://api.clear.ml"} + CLEARML_FILES_HOST: ${CLEARML_FILES_HOST:-"https://files.clear.ml"} + CLEARML_API_ACCESS_KEY: ${CLEARML_API_ACCESS_KEY:-} + CLEARML_API_SECRET_KEY: ${CLEARML_API_SECRET_KEY:-} + CLEARML_SERVING_TASK_ID: ${CLEARML_SERVING_TASK_ID:-} + CLEARML_SERVING_PORT: ${CLEARML_SERVING_PORT:-8080} + CLEARML_SERVING_POLL_FREQ: ${CLEARML_SERVING_POLL_FREQ:-1} + CLEARML_DEFAULT_BASE_SERVE_URL: ${CLEARML_DEFAULT_BASE_SERVE_URL:-"http://127.0.0.1:8080/serve"} + CLEARML_DEFAULT_KAFKA_SERVE_URL: ${CLEARML_DEFAULT_KAFKA_SERVE_URL:-"clearml-serving-kafka:9092"} + CLEARML_DEFAULT_TRITON_GRPC_ADDR: ${CLEARML_DEFAULT_TRITON_GRPC_ADDR:-"clearml-serving-triton:8001"} + CLEARML_USE_GUNICORN: ${CLEARML_USE_GUNICORN} + CLEARML_SERVING_NUM_PROCESS: ${CLEARML_SERVING_NUM_PROCESS} + depends_on: + - kafka + - clearml-serving-triton + networks: + - clearml-serving-backend + + clearml-serving-triton: + image: allegroai/clearml-serving-triton:latest + container_name: clearml-serving-triton + restart: unless-stopped + # ports: + # - "8001:8001" + environment: + CLEARML_WEB_HOST: ${CLEARML_WEB_HOST:-"https://app.clear.ml"} + CLEARML_API_HOST: ${CLEARML_API_HOST:-"https://api.clear.ml"} + CLEARML_FILES_HOST: ${CLEARML_FILES_HOST:-"https://files.clear.ml"} + CLEARML_API_ACCESS_KEY: ${CLEARML_API_ACCESS_KEY:-} + CLEARML_API_SECRET_KEY: ${CLEARML_API_SECRET_KEY:-} + CLEARML_SERVING_TASK_ID: ${CLEARML_SERVING_TASK_ID:-} + CLEARML_SERVING_POLL_FREQ: ${CLEARML_SERVING_POLL_FREQ:-1} + CLEARML_TRITON_METRIC_FREQ: $CLEARML_TRITON_METRIC_FREQ:-1} + depends_on: + - kafka + networks: + - clearml-serving-backend + deploy: + resources: + reservations: + devices: + - capabilities: [gpu] + + clearml-serving-statistics: + image: allegroai/clearml-serving-statistics:latest + container_name: clearml-serving-statistics + restart: unless-stopped + # ports: + # - "9999:9999" + environment: + CLEARML_WEB_HOST: ${CLEARML_WEB_HOST:-"https://app.clear.ml"} + CLEARML_API_HOST: ${CLEARML_API_HOST:-"https://api.clear.ml"} + CLEARML_FILES_HOST: ${CLEARML_FILES_HOST:-"https://files.clear.ml"} + CLEARML_API_ACCESS_KEY: ${CLEARML_API_ACCESS_KEY:-} + CLEARML_API_SECRET_KEY: ${CLEARML_API_SECRET_KEY:-} + CLEARML_SERVING_TASK_ID: ${CLEARML_SERVING_TASK_ID:-} + CLEARML_DEFAULT_KAFKA_SERVE_URL: ${CLEARML_DEFAULT_KAFKA_SERVE_URL:-"clearml-serving-kafka:9092"} + CLEARML_SERVING_POLL_FREQ: ${CLEARML_SERVING_POLL_FREQ:-1} + depends_on: + - kafka + networks: + - clearml-serving-backend + + +networks: + clearml-serving-backend: + driver: bridge diff --git a/docker/docker-compose-triton.yml b/docker/docker-compose-triton.yml new file mode 100644 index 0000000..05077a6 --- /dev/null +++ b/docker/docker-compose-triton.yml @@ -0,0 +1,146 @@ +version: "3" + +services: + zookeeper: + image: bitnami/zookeeper:3.7.0 + container_name: clearml-serving-zookeeper + # ports: + # - "2181:2181" + environment: + - ALLOW_ANONYMOUS_LOGIN=yes + networks: + - clearml-serving-backend + + kafka: + image: bitnami/kafka:3.1.0 + container_name: clearml-serving-kafka + # ports: + # - "9092:9092" + environment: + - KAFKA_BROKER_ID=1 + - KAFKA_CFG_LISTENERS=PLAINTEXT://clearml-serving-kafka:9092 + - KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://clearml-serving-kafka:9092 + - KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181 + - ALLOW_PLAINTEXT_LISTENER=yes + - KAFKA_CREATE_TOPICS="topic_test:1:1" + depends_on: + - zookeeper + networks: + - clearml-serving-backend + + prometheus: + image: prom/prometheus:v2.34.0 + container_name: clearml-serving-prometheus + volumes: + - ./prometheus.yml:/prometheus.yml + command: + - '--config.file=/prometheus.yml' + - '--storage.tsdb.path=/prometheus' + - '--web.console.libraries=/etc/prometheus/console_libraries' + - '--web.console.templates=/etc/prometheus/consoles' + - '--storage.tsdb.retention.time=200h' + - '--web.enable-lifecycle' + restart: unless-stopped + # ports: + # - "9090:9090" + depends_on: + - clearml-serving-statistics + networks: + - clearml-serving-backend + + alertmanager: + image: prom/alertmanager:v0.23.0 + container_name: clearml-serving-alertmanager + restart: unless-stopped + # ports: + # - "9093:9093" + depends_on: + - prometheus + - grafana + networks: + - clearml-serving-backend + + grafana: + image: grafana/grafana:8.4.4-ubuntu + container_name: clearml-serving-grafana + volumes: + - './datasource.yml:/etc/grafana/provisioning/datasources/datasource.yaml' + restart: unless-stopped + ports: + - "3000:3000" + depends_on: + - prometheus + networks: + - clearml-serving-backend + + + clearml-serving-inference: + image: allegroai/clearml-serving-inference:latest + container_name: clearml-serving-inference + restart: unless-stopped + ports: + - "8080:8080" + environment: + CLEARML_WEB_HOST: ${CLEARML_WEB_HOST:-https://app.clear.ml} + CLEARML_API_HOST: ${CLEARML_API_HOST:-"https://api.clear.ml"} + CLEARML_FILES_HOST: ${CLEARML_FILES_HOST:-"https://files.clear.ml"} + CLEARML_API_ACCESS_KEY: ${CLEARML_API_ACCESS_KEY:-} + CLEARML_API_SECRET_KEY: ${CLEARML_API_SECRET_KEY:-} + CLEARML_SERVING_TASK_ID: ${CLEARML_SERVING_TASK_ID:-} + CLEARML_SERVING_PORT: ${CLEARML_SERVING_PORT:-8080} + CLEARML_SERVING_POLL_FREQ: ${CLEARML_SERVING_POLL_FREQ:-1} + CLEARML_DEFAULT_BASE_SERVE_URL: ${CLEARML_DEFAULT_BASE_SERVE_URL:-"http://127.0.0.1:8080/serve"} + CLEARML_DEFAULT_KAFKA_SERVE_URL: ${CLEARML_DEFAULT_KAFKA_SERVE_URL:-"clearml-serving-kafka:9092"} + CLEARML_DEFAULT_TRITON_GRPC_ADDR: ${CLEARML_DEFAULT_TRITON_GRPC_ADDR:-"clearml-serving-triton:8001"} + CLEARML_USE_GUNICORN: ${CLEARML_USE_GUNICORN} + CLEARML_SERVING_NUM_PROCESS: ${CLEARML_SERVING_NUM_PROCESS} + depends_on: + - kafka + - clearml-serving-triton + networks: + - clearml-serving-backend + + clearml-serving-triton: + image: allegroai/clearml-serving-triton:latest + container_name: clearml-serving-triton + restart: unless-stopped + # ports: + # - "8001:8001" + environment: + CLEARML_WEB_HOST: ${CLEARML_WEB_HOST:-"https://app.clear.ml"} + CLEARML_API_HOST: ${CLEARML_API_HOST:-"https://api.clear.ml"} + CLEARML_FILES_HOST: ${CLEARML_FILES_HOST:-"https://files.clear.ml"} + CLEARML_API_ACCESS_KEY: ${CLEARML_API_ACCESS_KEY:-} + CLEARML_API_SECRET_KEY: ${CLEARML_API_SECRET_KEY:-} + CLEARML_SERVING_TASK_ID: ${CLEARML_SERVING_TASK_ID:-} + CLEARML_SERVING_POLL_FREQ: ${CLEARML_SERVING_POLL_FREQ:-1} + CLEARML_TRITON_METRIC_FREQ: $CLEARML_TRITON_METRIC_FREQ:-1} + depends_on: + - kafka + networks: + - clearml-serving-backend + + clearml-serving-statistics: + image: allegroai/clearml-serving-statistics:latest + container_name: clearml-serving-statistics + restart: unless-stopped + # ports: + # - "9999:9999" + environment: + CLEARML_WEB_HOST: ${CLEARML_WEB_HOST:-"https://app.clear.ml"} + CLEARML_API_HOST: ${CLEARML_API_HOST:-"https://api.clear.ml"} + CLEARML_FILES_HOST: ${CLEARML_FILES_HOST:-"https://files.clear.ml"} + CLEARML_API_ACCESS_KEY: ${CLEARML_API_ACCESS_KEY:-} + CLEARML_API_SECRET_KEY: ${CLEARML_API_SECRET_KEY:-} + CLEARML_SERVING_TASK_ID: ${CLEARML_SERVING_TASK_ID:-} + CLEARML_DEFAULT_KAFKA_SERVE_URL: ${CLEARML_DEFAULT_KAFKA_SERVE_URL:-"clearml-serving-kafka:9092"} + CLEARML_SERVING_POLL_FREQ: ${CLEARML_SERVING_POLL_FREQ:-1} + depends_on: + - kafka + networks: + - clearml-serving-backend + + +networks: + clearml-serving-backend: + driver: bridge diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml new file mode 100644 index 0000000..9a98e08 --- /dev/null +++ b/docker/docker-compose.yml @@ -0,0 +1,125 @@ +version: "3" + +services: + zookeeper: + image: bitnami/zookeeper:3.7.0 + container_name: clearml-serving-zookeeper + # ports: + # - "2181:2181" + environment: + - ALLOW_ANONYMOUS_LOGIN=yes + networks: + - clearml-serving-backend + + kafka: + image: bitnami/kafka:3.1.0 + container_name: clearml-serving-kafka + # ports: + # - "9092:9092" + environment: + - KAFKA_BROKER_ID=1 + - KAFKA_CFG_LISTENERS=PLAINTEXT://clearml-serving-kafka:9092 + - KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://clearml-serving-kafka:9092 + - KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181 + - ALLOW_PLAINTEXT_LISTENER=yes + - KAFKA_CREATE_TOPICS="topic_test:1:1" + depends_on: + - zookeeper + networks: + - clearml-serving-backend + + prometheus: + image: prom/prometheus:v2.34.0 + container_name: clearml-serving-prometheus + volumes: + - ./prometheus.yml:/prometheus.yml + command: + - '--config.file=/prometheus.yml' + - '--storage.tsdb.path=/prometheus' + - '--web.console.libraries=/etc/prometheus/console_libraries' + - '--web.console.templates=/etc/prometheus/consoles' + - '--storage.tsdb.retention.time=200h' + - '--web.enable-lifecycle' + restart: unless-stopped + # ports: + # - "9090:9090" + depends_on: + - clearml-serving-statistics + networks: + - clearml-serving-backend + + alertmanager: + image: prom/alertmanager:v0.23.0 + container_name: clearml-serving-alertmanager + restart: unless-stopped + # ports: + # - "9093:9093" + depends_on: + - prometheus + - grafana + networks: + - clearml-serving-backend + + grafana: + image: grafana/grafana:8.4.4-ubuntu + container_name: clearml-serving-grafana + volumes: + - './datasource.yml:/etc/grafana/provisioning/datasources/datasource.yaml' + restart: unless-stopped + ports: + - "3000:3000" + depends_on: + - prometheus + networks: + - clearml-serving-backend + + + clearml-serving-inference: + image: allegroai/clearml-serving-inference:latest + container_name: clearml-serving-inference + restart: unless-stopped + ports: + - "8080:8080" + environment: + CLEARML_WEB_HOST: ${CLEARML_WEB_HOST:-https://app.clear.ml} + CLEARML_API_HOST: ${CLEARML_API_HOST:-"https://api.clear.ml"} + CLEARML_FILES_HOST: ${CLEARML_FILES_HOST:-"https://files.clear.ml"} + CLEARML_API_ACCESS_KEY: ${CLEARML_API_ACCESS_KEY:-} + CLEARML_API_SECRET_KEY: ${CLEARML_API_SECRET_KEY:-} + CLEARML_SERVING_TASK_ID: ${CLEARML_SERVING_TASK_ID:-} + CLEARML_SERVING_PORT: ${CLEARML_SERVING_PORT:-8080} + CLEARML_SERVING_POLL_FREQ: ${CLEARML_SERVING_POLL_FREQ:-1} + CLEARML_DEFAULT_BASE_SERVE_URL: ${CLEARML_DEFAULT_BASE_SERVE_URL:-"http://127.0.0.1:8080/serve"} + CLEARML_DEFAULT_KAFKA_SERVE_URL: ${CLEARML_DEFAULT_KAFKA_SERVE_URL:-"clearml-serving-kafka:9092"} + CLEARML_DEFAULT_TRITON_GRPC_ADDR: ${CLEARML_DEFAULT_TRITON_GRPC_ADDR} + CLEARML_USE_GUNICORN: ${CLEARML_USE_GUNICORN} + CLEARML_SERVING_NUM_PROCESS: ${CLEARML_SERVING_NUM_PROCESS} + depends_on: + - kafka + networks: + - clearml-serving-backend + + clearml-serving-statistics: + image: allegroai/clearml-serving-statistics:latest + container_name: clearml-serving-statistics + restart: unless-stopped + # ports: + # - "9999:9999" + environment: + CLEARML_WEB_HOST: ${CLEARML_WEB_HOST:-"https://app.clear.ml"} + CLEARML_API_HOST: ${CLEARML_API_HOST:-"https://api.clear.ml"} + CLEARML_FILES_HOST: ${CLEARML_FILES_HOST:-"https://files.clear.ml"} + CLEARML_API_ACCESS_KEY: ${CLEARML_API_ACCESS_KEY:-} + CLEARML_API_SECRET_KEY: ${CLEARML_API_SECRET_KEY:-} + CLEARML_SERVING_TASK_ID: ${CLEARML_SERVING_TASK_ID:-} + CLEARML_DEFAULT_KAFKA_SERVE_URL: ${CLEARML_DEFAULT_KAFKA_SERVE_URL:-"clearml-serving-kafka:9092"} + CLEARML_SERVING_POLL_FREQ: ${CLEARML_SERVING_POLL_FREQ:-1} + depends_on: + - kafka + networks: + - clearml-serving-backend + + +networks: + clearml-serving-backend: + driver: bridge diff --git a/docker/example.env b/docker/example.env new file mode 100644 index 0000000..8b38660 --- /dev/null +++ b/docker/example.env @@ -0,0 +1,6 @@ +CLEARML_WEB_HOST="https://app.clear.ml" +CLEARML_API_HOST="https://api.clear.ml" +CLEARML_FILES_HOST="https://files.clear.ml" +CLEARML_API_ACCESS_KEY="" +CLEARML_API_SECRET_KEY="" +CLEARML_SERVING_TASK_ID="" diff --git a/docker/prometheus.yml b/docker/prometheus.yml new file mode 100644 index 0000000..469e220 --- /dev/null +++ b/docker/prometheus.yml @@ -0,0 +1,22 @@ +global: + scrape_interval: 15s # By default, scrape targets every 15 seconds. + evaluation_interval: 15s # By default, scrape targets every 15 seconds. + external_labels: + monitor: 'clearml-serving' + +scrape_configs: + # The job name is added as a label `job=` to any timeseries scraped from this config. + - job_name: 'prometheus' + + scrape_interval: 5s + + static_configs: + - targets: ['localhost:9090'] + + # The job name is added as a label `job=` to any timeseries scraped from this config. + - job_name: 'clearml-inference-stats' + + scrape_interval: 5s + + static_configs: + - targets: ['clearml-serving-statistics:9999'] diff --git a/examples/ensemble/preprocess.py b/examples/ensemble/preprocess.py index 079299a..6ba648c 100644 --- a/examples/ensemble/preprocess.py +++ b/examples/ensemble/preprocess.py @@ -9,11 +9,11 @@ class Preprocess(object): # set internal state, this will be called only once. (i.e. not per request) pass - def preprocess(self, body: dict) -> Any: + def preprocess(self, body: dict, collect_custom_statistics_fn=None) -> Any: # we expect to get two valid on the dict x0, and x1 return [[body.get("x0", None), body.get("x1", None)], ] - def postprocess(self, data: Any) -> dict: + def postprocess(self, data: Any, collect_custom_statistics_fn=None) -> dict: # post process the data returned from the model inference engine # data is the return value from model.predict we will put is inside a return value as Y return dict(y=data.tolist() if isinstance(data, np.ndarray) else data) diff --git a/examples/ensemble/readme.md b/examples/ensemble/readme.md index e3cac56..68a80d4 100644 --- a/examples/ensemble/readme.md +++ b/examples/ensemble/readme.md @@ -29,3 +29,4 @@ Or add Canary endpoint > **_Notice:_** You can also change the serving service while it is already running! This includes adding/removing endpoints, adding canary model routing etc. +by default new endpoints/models will be automatically updated after 1 minute diff --git a/examples/keras/preprocess.py b/examples/keras/preprocess.py index e7d6bac..b87d3e8 100644 --- a/examples/keras/preprocess.py +++ b/examples/keras/preprocess.py @@ -13,7 +13,7 @@ class Preprocess(object): # set internal state, this will be called only once. (i.e. not per request) pass - def preprocess(self, body: dict) -> Any: + def preprocess(self, body: dict, collect_custom_statistics_fn=None) -> Any: # we expect to get two valid on the dict x0, and x1 url = body.get("url") if not url: @@ -25,7 +25,7 @@ class Preprocess(object): return np.array(image).flatten() - def postprocess(self, data: Any) -> dict: + def postprocess(self, data: Any, collect_custom_statistics_fn=None) -> dict: # post process the data returned from the model inference engine # data is the return value from model.predict we will put is inside a return value as Y if not isinstance(data, np.ndarray): diff --git a/examples/keras/readme.md b/examples/keras/readme.md index f8a6904..5a9f84c 100644 --- a/examples/keras/readme.md +++ b/examples/keras/readme.md @@ -12,6 +12,8 @@ The output will be a model created on the project "serving examples", by the nam ## setting up the serving service +Prerequisites, Keras/Tensorflow models require Triton engine support, please use `docker-compose-triton.yml` / `docker-compose-triton-gpu.yml` or if running on Kubernetes, the matching helm chart. + 1. Create serving Service: `clearml-serving create --name "serving example"` (write down the service ID) 2. Create model endpoint: @@ -36,3 +38,4 @@ Or add Canary endpoint > **_Notice:_** You can also change the serving service while it is already running! This includes adding/removing endpoints, adding canary model routing etc. +by default new endpoints/models will be automatically updated after 1 minute \ No newline at end of file diff --git a/examples/lightgbm/preprocess.py b/examples/lightgbm/preprocess.py index 5d7ebe7..e89f563 100644 --- a/examples/lightgbm/preprocess.py +++ b/examples/lightgbm/preprocess.py @@ -9,14 +9,14 @@ class Preprocess(object): # set internal state, this will be called only once. (i.e. not per request) pass - def preprocess(self, body: dict) -> Any: + def preprocess(self, body: dict, collect_custom_statistics_fn=None) -> Any: # we expect to get four valid numbers on the dict: x0, x1, x2, x3 return np.array( [[body.get("x0", None), body.get("x1", None), body.get("x2", None), body.get("x3", None)], ], dtype=np.float32 ) - def postprocess(self, data: Any) -> dict: + def postprocess(self, data: Any, collect_custom_statistics_fn=None) -> dict: # post process the data returned from the model inference engine # data is the return value from model.predict we will put is inside a return value as Y # we pick the most probably class and return the class index (argmax) diff --git a/examples/pipeline/preprocess.py b/examples/pipeline/preprocess.py index 07598e5..bcfd8a1 100644 --- a/examples/pipeline/preprocess.py +++ b/examples/pipeline/preprocess.py @@ -7,14 +7,14 @@ class Preprocess(object): # set internal state, this will be called only once. (i.e. not per request) pass - def postprocess(self, data: List[dict]) -> dict: + def postprocess(self, data: List[dict], collect_custom_statistics_fn=None) -> dict: # we will here average the results and return the new value # assume data is a list of dicts greater than 1 # average result return dict(y=0.5 * data[0]['y'][0] + 0.5 * data[1]['y'][0]) - def process(self, data: Any) -> Any: + def process(self, data: Any, collect_custom_statistics_fn=None) -> Any: """ do something with the actual data, return any type of object. The returned object will be passed as is to the postprocess function engine diff --git a/examples/pipeline/readme.md b/examples/pipeline/readme.md index 8b51268..1a25846 100644 --- a/examples/pipeline/readme.md +++ b/examples/pipeline/readme.md @@ -24,3 +24,4 @@ Training a scikit-learn model (see example/sklearn) > **_Notice:_** You can also change the serving service while it is already running! This includes adding/removing endpoints, adding canary model routing etc. +by default new endpoints/models will be automatically updated after 1 minute diff --git a/examples/pytorch/preprocess.py b/examples/pytorch/preprocess.py index 8a80002..75d6815 100644 --- a/examples/pytorch/preprocess.py +++ b/examples/pytorch/preprocess.py @@ -13,7 +13,7 @@ class Preprocess(object): # set internal state, this will be called only once. (i.e. not per request) pass - def preprocess(self, body: dict) -> Any: + def preprocess(self, body: dict, collect_custom_statistics_fn=None) -> Any: # we expect to get two valid on the dict x0, and x1 url = body.get("url") if not url: @@ -24,7 +24,7 @@ class Preprocess(object): image = ImageOps.grayscale(image).resize((28, 28)) return np.array(image).flatten() - def postprocess(self, data: Any) -> dict: + def postprocess(self, data: Any, collect_custom_statistics_fn=None) -> dict: # post process the data returned from the model inference engine # data is the return value from model.predict we will put is inside a return value as Y if not isinstance(data, np.ndarray): diff --git a/examples/pytorch/readme.md b/examples/pytorch/readme.md index 13d4579..0b1a064 100644 --- a/examples/pytorch/readme.md +++ b/examples/pytorch/readme.md @@ -13,6 +13,9 @@ The output will be a model created on the project "serving examples", by the nam ## setting up the serving service + +Prerequisites, PyTorch models require Triton engine support, please use `docker-compose-triton.yml` / `docker-compose-triton-gpu.yml` or if running on Kubernetes, the matching helm chart. + 1. Create serving Service: `clearml-serving create --name "serving example"` (write down the service ID) 2. Create model endpoint: @@ -39,4 +42,4 @@ Or add Canary endpoint > **_Notice:_** You can also change the serving service while it is already running! This includes adding/removing endpoints, adding canary model routing etc. - +by default new endpoints/models will be automatically updated after 1 minute diff --git a/examples/sklearn/preprocess.py b/examples/sklearn/preprocess.py index 079299a..6ba648c 100644 --- a/examples/sklearn/preprocess.py +++ b/examples/sklearn/preprocess.py @@ -9,11 +9,11 @@ class Preprocess(object): # set internal state, this will be called only once. (i.e. not per request) pass - def preprocess(self, body: dict) -> Any: + def preprocess(self, body: dict, collect_custom_statistics_fn=None) -> Any: # we expect to get two valid on the dict x0, and x1 return [[body.get("x0", None), body.get("x1", None)], ] - def postprocess(self, data: Any) -> dict: + def postprocess(self, data: Any, collect_custom_statistics_fn=None) -> dict: # post process the data returned from the model inference engine # data is the return value from model.predict we will put is inside a return value as Y return dict(y=data.tolist() if isinstance(data, np.ndarray) else data) diff --git a/examples/sklearn/readme.md b/examples/sklearn/readme.md index 7e7acf7..33b802f 100644 --- a/examples/sklearn/readme.md +++ b/examples/sklearn/readme.md @@ -29,3 +29,4 @@ Or add Canary endpoint > **_Notice:_** You can also change the serving service while it is already running! This includes adding/removing endpoints, adding canary model routing etc. +by default new endpoints/models will be automatically updated after 1 minute diff --git a/examples/xgboost/preprocess.py b/examples/xgboost/preprocess.py index 48acf33..e3a1771 100644 --- a/examples/xgboost/preprocess.py +++ b/examples/xgboost/preprocess.py @@ -10,12 +10,12 @@ class Preprocess(object): # set internal state, this will be called only once. (i.e. not per request) pass - def preprocess(self, body: dict) -> Any: + def preprocess(self, body: dict, collect_custom_statistics_fn=None) -> Any: # we expect to get four valid numbers on the dict: x0, x1, x2, x3 return xgb.DMatrix( [[body.get("x0", None), body.get("x1", None), body.get("x2", None), body.get("x3", None)]]) - def postprocess(self, data: Any) -> dict: + def postprocess(self, data: Any, collect_custom_statistics_fn=None) -> dict: # post process the data returned from the model inference engine # data is the return value from model.predict we will put is inside a return value as Y return dict(y=data.tolist() if isinstance(data, np.ndarray) else data) diff --git a/setup.py b/setup.py index 8575ed3..b6d0f16 100644 --- a/setup.py +++ b/setup.py @@ -39,8 +39,8 @@ setup( long_description_content_type='text/markdown', # The project's main homepage. url='https://github.com/allegroai/clearml-serving.git', - author='Allegroai', - author_email='clearml@allegro.ai', + author='ClearML', + author_email='support@clear.ml', license='Apache License 2.0', classifiers=[ 'Development Status :: 4 - Beta', @@ -54,7 +54,6 @@ setup( 'Topic :: Software Development :: Version Control', 'Topic :: System :: Logging', 'Topic :: System :: Monitoring', - 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', From e0f18dc1f77a885e1bde8cf5ed73d8cc17732d8a Mon Sep 17 00:00:00 2001 From: allegroai Date: Mon, 21 Mar 2022 17:10:04 +0200 Subject: [PATCH 16/19] Fix metric scalar vector support --- clearml_serving/statistics/metrics.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/clearml_serving/statistics/metrics.py b/clearml_serving/statistics/metrics.py index a0d35db..befb49e 100644 --- a/clearml_serving/statistics/metrics.py +++ b/clearml_serving/statistics/metrics.py @@ -37,14 +37,17 @@ class ScalarHistogram(Histogram): for details. """ self._raise_if_not_observable() - self._sum.inc(1) - for i, bound in enumerate(self._upper_bounds): - if amount <= bound: - self._buckets[i].inc(1) - if exemplar: - _validate_exemplar(exemplar) - self._buckets[i].set_exemplar(Exemplar(exemplar, amount, time())) - break + if not isinstance(amount, (list, tuple)): + amount = [amount] + self._sum.inc(len(amount)) + for v in amount: + for i, bound in enumerate(self._upper_bounds): + if v <= bound: + self._buckets[i].inc(1) + if exemplar: + _validate_exemplar(exemplar) + self._buckets[i].set_exemplar(Exemplar(exemplar, v, time())) + break def _child_samples(self) -> Iterable[Sample]: samples = [] From a0ca23eccc7227f9c8c19ee3cefd5c7057ced62e Mon Sep 17 00:00:00 2001 From: allegroai Date: Mon, 21 Mar 2022 17:10:38 +0200 Subject: [PATCH 17/19] Fix docker-compose --- docker/docker-compose-triton-gpu.yml | 50 ++++++++++++++-------------- docker/docker-compose-triton.yml | 50 ++++++++++++++-------------- docker/docker-compose.yml | 36 ++++++++++---------- 3 files changed, 68 insertions(+), 68 deletions(-) diff --git a/docker/docker-compose-triton-gpu.yml b/docker/docker-compose-triton-gpu.yml index e354be5..073eee3 100644 --- a/docker/docker-compose-triton-gpu.yml +++ b/docker/docker-compose-triton-gpu.yml @@ -20,7 +20,7 @@ services: - KAFKA_BROKER_ID=1 - KAFKA_CFG_LISTENERS=PLAINTEXT://clearml-serving-kafka:9092 - KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://clearml-serving-kafka:9092 - - KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181 + - KAFKA_CFG_ZOOKEEPER_CONNECT=clearml-serving-zookeeper:2181 - ALLOW_PLAINTEXT_LISTENER=yes - KAFKA_CREATE_TOPICS="topic_test:1:1" depends_on: @@ -82,18 +82,18 @@ services: - "8080:8080" environment: CLEARML_WEB_HOST: ${CLEARML_WEB_HOST:-https://app.clear.ml} - CLEARML_API_HOST: ${CLEARML_API_HOST:-"https://api.clear.ml"} - CLEARML_FILES_HOST: ${CLEARML_FILES_HOST:-"https://files.clear.ml"} - CLEARML_API_ACCESS_KEY: ${CLEARML_API_ACCESS_KEY:-} - CLEARML_API_SECRET_KEY: ${CLEARML_API_SECRET_KEY:-} + CLEARML_API_HOST: ${CLEARML_API_HOST:-https://api.clear.ml} + CLEARML_FILES_HOST: ${CLEARML_FILES_HOST:-https://files.clear.ml} + CLEARML_API_ACCESS_KEY: ${CLEARML_API_ACCESS_KEY} + CLEARML_API_SECRET_KEY: ${CLEARML_API_SECRET_KEY} CLEARML_SERVING_TASK_ID: ${CLEARML_SERVING_TASK_ID:-} CLEARML_SERVING_PORT: ${CLEARML_SERVING_PORT:-8080} - CLEARML_SERVING_POLL_FREQ: ${CLEARML_SERVING_POLL_FREQ:-1} - CLEARML_DEFAULT_BASE_SERVE_URL: ${CLEARML_DEFAULT_BASE_SERVE_URL:-"http://127.0.0.1:8080/serve"} - CLEARML_DEFAULT_KAFKA_SERVE_URL: ${CLEARML_DEFAULT_KAFKA_SERVE_URL:-"clearml-serving-kafka:9092"} - CLEARML_DEFAULT_TRITON_GRPC_ADDR: ${CLEARML_DEFAULT_TRITON_GRPC_ADDR:-"clearml-serving-triton:8001"} - CLEARML_USE_GUNICORN: ${CLEARML_USE_GUNICORN} - CLEARML_SERVING_NUM_PROCESS: ${CLEARML_SERVING_NUM_PROCESS} + CLEARML_SERVING_POLL_FREQ: ${CLEARML_SERVING_POLL_FREQ:-1.0} + CLEARML_DEFAULT_BASE_SERVE_URL: ${CLEARML_DEFAULT_BASE_SERVE_URL:-http://127.0.0.1:8080/serve} + CLEARML_DEFAULT_KAFKA_SERVE_URL: ${CLEARML_DEFAULT_KAFKA_SERVE_URL:-clearml-serving-kafka:9092} + CLEARML_DEFAULT_TRITON_GRPC_ADDR: ${CLEARML_DEFAULT_TRITON_GRPC_ADDR:-clearml-serving-triton:8001} + CLEARML_USE_GUNICORN: ${CLEARML_USE_GUNICORN:-} + CLEARML_SERVING_NUM_PROCESS: ${CLEARML_SERVING_NUM_PROCESS:-} depends_on: - kafka - clearml-serving-triton @@ -107,14 +107,14 @@ services: # ports: # - "8001:8001" environment: - CLEARML_WEB_HOST: ${CLEARML_WEB_HOST:-"https://app.clear.ml"} - CLEARML_API_HOST: ${CLEARML_API_HOST:-"https://api.clear.ml"} - CLEARML_FILES_HOST: ${CLEARML_FILES_HOST:-"https://files.clear.ml"} - CLEARML_API_ACCESS_KEY: ${CLEARML_API_ACCESS_KEY:-} - CLEARML_API_SECRET_KEY: ${CLEARML_API_SECRET_KEY:-} + CLEARML_WEB_HOST: ${CLEARML_WEB_HOST:-https://app.clear.ml} + CLEARML_API_HOST: ${CLEARML_API_HOST:-https://api.clear.ml} + CLEARML_FILES_HOST: ${CLEARML_FILES_HOST:-https://files.clear.ml} + CLEARML_API_ACCESS_KEY: ${CLEARML_API_ACCESS_KEY} + CLEARML_API_SECRET_KEY: ${CLEARML_API_SECRET_KEY} CLEARML_SERVING_TASK_ID: ${CLEARML_SERVING_TASK_ID:-} - CLEARML_SERVING_POLL_FREQ: ${CLEARML_SERVING_POLL_FREQ:-1} - CLEARML_TRITON_METRIC_FREQ: $CLEARML_TRITON_METRIC_FREQ:-1} + CLEARML_SERVING_POLL_FREQ: ${CLEARML_SERVING_POLL_FREQ:-1.0} + CLEARML_TRITON_METRIC_FREQ: ${CLEARML_TRITON_METRIC_FREQ:-1.0} depends_on: - kafka networks: @@ -132,14 +132,14 @@ services: # ports: # - "9999:9999" environment: - CLEARML_WEB_HOST: ${CLEARML_WEB_HOST:-"https://app.clear.ml"} - CLEARML_API_HOST: ${CLEARML_API_HOST:-"https://api.clear.ml"} - CLEARML_FILES_HOST: ${CLEARML_FILES_HOST:-"https://files.clear.ml"} - CLEARML_API_ACCESS_KEY: ${CLEARML_API_ACCESS_KEY:-} - CLEARML_API_SECRET_KEY: ${CLEARML_API_SECRET_KEY:-} + CLEARML_WEB_HOST: ${CLEARML_WEB_HOST:-https://app.clear.ml} + CLEARML_API_HOST: ${CLEARML_API_HOST:-https://api.clear.ml} + CLEARML_FILES_HOST: ${CLEARML_FILES_HOST:-https://files.clear.ml} + CLEARML_API_ACCESS_KEY: ${CLEARML_API_ACCESS_KEY} + CLEARML_API_SECRET_KEY: ${CLEARML_API_SECRET_KEY} CLEARML_SERVING_TASK_ID: ${CLEARML_SERVING_TASK_ID:-} - CLEARML_DEFAULT_KAFKA_SERVE_URL: ${CLEARML_DEFAULT_KAFKA_SERVE_URL:-"clearml-serving-kafka:9092"} - CLEARML_SERVING_POLL_FREQ: ${CLEARML_SERVING_POLL_FREQ:-1} + CLEARML_DEFAULT_KAFKA_SERVE_URL: ${CLEARML_DEFAULT_KAFKA_SERVE_URL:-clearml-serving-kafka:9092} + CLEARML_SERVING_POLL_FREQ: ${CLEARML_SERVING_POLL_FREQ:-1.0} depends_on: - kafka networks: diff --git a/docker/docker-compose-triton.yml b/docker/docker-compose-triton.yml index 05077a6..f62b1c4 100644 --- a/docker/docker-compose-triton.yml +++ b/docker/docker-compose-triton.yml @@ -20,7 +20,7 @@ services: - KAFKA_BROKER_ID=1 - KAFKA_CFG_LISTENERS=PLAINTEXT://clearml-serving-kafka:9092 - KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://clearml-serving-kafka:9092 - - KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181 + - KAFKA_CFG_ZOOKEEPER_CONNECT=clearml-serving-zookeeper:2181 - ALLOW_PLAINTEXT_LISTENER=yes - KAFKA_CREATE_TOPICS="topic_test:1:1" depends_on: @@ -82,18 +82,18 @@ services: - "8080:8080" environment: CLEARML_WEB_HOST: ${CLEARML_WEB_HOST:-https://app.clear.ml} - CLEARML_API_HOST: ${CLEARML_API_HOST:-"https://api.clear.ml"} - CLEARML_FILES_HOST: ${CLEARML_FILES_HOST:-"https://files.clear.ml"} - CLEARML_API_ACCESS_KEY: ${CLEARML_API_ACCESS_KEY:-} - CLEARML_API_SECRET_KEY: ${CLEARML_API_SECRET_KEY:-} + CLEARML_API_HOST: ${CLEARML_API_HOST:-https://api.clear.ml} + CLEARML_FILES_HOST: ${CLEARML_FILES_HOST:-https://files.clear.ml} + CLEARML_API_ACCESS_KEY: ${CLEARML_API_ACCESS_KEY} + CLEARML_API_SECRET_KEY: ${CLEARML_API_SECRET_KEY} CLEARML_SERVING_TASK_ID: ${CLEARML_SERVING_TASK_ID:-} CLEARML_SERVING_PORT: ${CLEARML_SERVING_PORT:-8080} - CLEARML_SERVING_POLL_FREQ: ${CLEARML_SERVING_POLL_FREQ:-1} - CLEARML_DEFAULT_BASE_SERVE_URL: ${CLEARML_DEFAULT_BASE_SERVE_URL:-"http://127.0.0.1:8080/serve"} - CLEARML_DEFAULT_KAFKA_SERVE_URL: ${CLEARML_DEFAULT_KAFKA_SERVE_URL:-"clearml-serving-kafka:9092"} - CLEARML_DEFAULT_TRITON_GRPC_ADDR: ${CLEARML_DEFAULT_TRITON_GRPC_ADDR:-"clearml-serving-triton:8001"} - CLEARML_USE_GUNICORN: ${CLEARML_USE_GUNICORN} - CLEARML_SERVING_NUM_PROCESS: ${CLEARML_SERVING_NUM_PROCESS} + CLEARML_SERVING_POLL_FREQ: ${CLEARML_SERVING_POLL_FREQ:-1.0} + CLEARML_DEFAULT_BASE_SERVE_URL: ${CLEARML_DEFAULT_BASE_SERVE_URL:-http://127.0.0.1:8080/serve} + CLEARML_DEFAULT_KAFKA_SERVE_URL: ${CLEARML_DEFAULT_KAFKA_SERVE_URL:-clearml-serving-kafka:9092} + CLEARML_DEFAULT_TRITON_GRPC_ADDR: ${CLEARML_DEFAULT_TRITON_GRPC_ADDR:-clearml-serving-triton:8001} + CLEARML_USE_GUNICORN: ${CLEARML_USE_GUNICORN:-} + CLEARML_SERVING_NUM_PROCESS: ${CLEARML_SERVING_NUM_PROCESS:-} depends_on: - kafka - clearml-serving-triton @@ -107,14 +107,14 @@ services: # ports: # - "8001:8001" environment: - CLEARML_WEB_HOST: ${CLEARML_WEB_HOST:-"https://app.clear.ml"} - CLEARML_API_HOST: ${CLEARML_API_HOST:-"https://api.clear.ml"} - CLEARML_FILES_HOST: ${CLEARML_FILES_HOST:-"https://files.clear.ml"} - CLEARML_API_ACCESS_KEY: ${CLEARML_API_ACCESS_KEY:-} - CLEARML_API_SECRET_KEY: ${CLEARML_API_SECRET_KEY:-} + CLEARML_WEB_HOST: ${CLEARML_WEB_HOST:-https://app.clear.ml} + CLEARML_API_HOST: ${CLEARML_API_HOST:-https://api.clear.ml} + CLEARML_FILES_HOST: ${CLEARML_FILES_HOST:-https://files.clear.ml} + CLEARML_API_ACCESS_KEY: ${CLEARML_API_ACCESS_KEY} + CLEARML_API_SECRET_KEY: ${CLEARML_API_SECRET_KEY} CLEARML_SERVING_TASK_ID: ${CLEARML_SERVING_TASK_ID:-} - CLEARML_SERVING_POLL_FREQ: ${CLEARML_SERVING_POLL_FREQ:-1} - CLEARML_TRITON_METRIC_FREQ: $CLEARML_TRITON_METRIC_FREQ:-1} + CLEARML_SERVING_POLL_FREQ: ${CLEARML_SERVING_POLL_FREQ:-1.0} + CLEARML_TRITON_METRIC_FREQ: ${CLEARML_TRITON_METRIC_FREQ:-1.0} depends_on: - kafka networks: @@ -127,14 +127,14 @@ services: # ports: # - "9999:9999" environment: - CLEARML_WEB_HOST: ${CLEARML_WEB_HOST:-"https://app.clear.ml"} - CLEARML_API_HOST: ${CLEARML_API_HOST:-"https://api.clear.ml"} - CLEARML_FILES_HOST: ${CLEARML_FILES_HOST:-"https://files.clear.ml"} - CLEARML_API_ACCESS_KEY: ${CLEARML_API_ACCESS_KEY:-} - CLEARML_API_SECRET_KEY: ${CLEARML_API_SECRET_KEY:-} + CLEARML_WEB_HOST: ${CLEARML_WEB_HOST:-https://app.clear.ml} + CLEARML_API_HOST: ${CLEARML_API_HOST:-https://api.clear.ml} + CLEARML_FILES_HOST: ${CLEARML_FILES_HOST:-https://files.clear.ml} + CLEARML_API_ACCESS_KEY: ${CLEARML_API_ACCESS_KEY} + CLEARML_API_SECRET_KEY: ${CLEARML_API_SECRET_KEY} CLEARML_SERVING_TASK_ID: ${CLEARML_SERVING_TASK_ID:-} - CLEARML_DEFAULT_KAFKA_SERVE_URL: ${CLEARML_DEFAULT_KAFKA_SERVE_URL:-"clearml-serving-kafka:9092"} - CLEARML_SERVING_POLL_FREQ: ${CLEARML_SERVING_POLL_FREQ:-1} + CLEARML_DEFAULT_KAFKA_SERVE_URL: ${CLEARML_DEFAULT_KAFKA_SERVE_URL:-clearml-serving-kafka:9092} + CLEARML_SERVING_POLL_FREQ: ${CLEARML_SERVING_POLL_FREQ:-1.0} depends_on: - kafka networks: diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 9a98e08..54f4f32 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -20,7 +20,7 @@ services: - KAFKA_BROKER_ID=1 - KAFKA_CFG_LISTENERS=PLAINTEXT://clearml-serving-kafka:9092 - KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://clearml-serving-kafka:9092 - - KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181 + - KAFKA_CFG_ZOOKEEPER_CONNECT=clearml-serving-zookeeper:2181 - ALLOW_PLAINTEXT_LISTENER=yes - KAFKA_CREATE_TOPICS="topic_test:1:1" depends_on: @@ -82,18 +82,18 @@ services: - "8080:8080" environment: CLEARML_WEB_HOST: ${CLEARML_WEB_HOST:-https://app.clear.ml} - CLEARML_API_HOST: ${CLEARML_API_HOST:-"https://api.clear.ml"} - CLEARML_FILES_HOST: ${CLEARML_FILES_HOST:-"https://files.clear.ml"} - CLEARML_API_ACCESS_KEY: ${CLEARML_API_ACCESS_KEY:-} - CLEARML_API_SECRET_KEY: ${CLEARML_API_SECRET_KEY:-} + CLEARML_API_HOST: ${CLEARML_API_HOST:-https://api.clear.ml} + CLEARML_FILES_HOST: ${CLEARML_FILES_HOST:-https://files.clear.ml} + CLEARML_API_ACCESS_KEY: ${CLEARML_API_ACCESS_KEY} + CLEARML_API_SECRET_KEY: ${CLEARML_API_SECRET_KEY} CLEARML_SERVING_TASK_ID: ${CLEARML_SERVING_TASK_ID:-} CLEARML_SERVING_PORT: ${CLEARML_SERVING_PORT:-8080} - CLEARML_SERVING_POLL_FREQ: ${CLEARML_SERVING_POLL_FREQ:-1} - CLEARML_DEFAULT_BASE_SERVE_URL: ${CLEARML_DEFAULT_BASE_SERVE_URL:-"http://127.0.0.1:8080/serve"} - CLEARML_DEFAULT_KAFKA_SERVE_URL: ${CLEARML_DEFAULT_KAFKA_SERVE_URL:-"clearml-serving-kafka:9092"} - CLEARML_DEFAULT_TRITON_GRPC_ADDR: ${CLEARML_DEFAULT_TRITON_GRPC_ADDR} - CLEARML_USE_GUNICORN: ${CLEARML_USE_GUNICORN} - CLEARML_SERVING_NUM_PROCESS: ${CLEARML_SERVING_NUM_PROCESS} + CLEARML_SERVING_POLL_FREQ: ${CLEARML_SERVING_POLL_FREQ:-1.0} + CLEARML_DEFAULT_BASE_SERVE_URL: ${CLEARML_DEFAULT_BASE_SERVE_URL:-http://127.0.0.1:8080/serve} + CLEARML_DEFAULT_KAFKA_SERVE_URL: ${CLEARML_DEFAULT_KAFKA_SERVE_URL:-clearml-serving-kafka:9092} + CLEARML_DEFAULT_TRITON_GRPC_ADDR: ${CLEARML_DEFAULT_TRITON_GRPC_ADDR:-} + CLEARML_USE_GUNICORN: ${CLEARML_USE_GUNICORN:-} + CLEARML_SERVING_NUM_PROCESS: ${CLEARML_SERVING_NUM_PROCESS:-} depends_on: - kafka networks: @@ -106,14 +106,14 @@ services: # ports: # - "9999:9999" environment: - CLEARML_WEB_HOST: ${CLEARML_WEB_HOST:-"https://app.clear.ml"} - CLEARML_API_HOST: ${CLEARML_API_HOST:-"https://api.clear.ml"} - CLEARML_FILES_HOST: ${CLEARML_FILES_HOST:-"https://files.clear.ml"} - CLEARML_API_ACCESS_KEY: ${CLEARML_API_ACCESS_KEY:-} - CLEARML_API_SECRET_KEY: ${CLEARML_API_SECRET_KEY:-} + CLEARML_WEB_HOST: ${CLEARML_WEB_HOST:-https://app.clear.ml} + CLEARML_API_HOST: ${CLEARML_API_HOST:-https://api.clear.ml} + CLEARML_FILES_HOST: ${CLEARML_FILES_HOST:-https://files.clear.ml} + CLEARML_API_ACCESS_KEY: ${CLEARML_API_ACCESS_KEY} + CLEARML_API_SECRET_KEY: ${CLEARML_API_SECRET_KEY} CLEARML_SERVING_TASK_ID: ${CLEARML_SERVING_TASK_ID:-} - CLEARML_DEFAULT_KAFKA_SERVE_URL: ${CLEARML_DEFAULT_KAFKA_SERVE_URL:-"clearml-serving-kafka:9092"} - CLEARML_SERVING_POLL_FREQ: ${CLEARML_SERVING_POLL_FREQ:-1} + CLEARML_DEFAULT_KAFKA_SERVE_URL: ${CLEARML_DEFAULT_KAFKA_SERVE_URL:-clearml-serving-kafka:9092} + CLEARML_SERVING_POLL_FREQ: ${CLEARML_SERVING_POLL_FREQ:-1.0} depends_on: - kafka networks: From eb373b25c822e48e08d1b5691b075a676331f73a Mon Sep 17 00:00:00 2001 From: allegroai Date: Mon, 21 Mar 2022 17:10:53 +0200 Subject: [PATCH 18/19] Documentation --- README.md | 6 ++++-- docs/grafana_screenshot.png | Bin 0 -> 102220 bytes 2 files changed, 4 insertions(+), 2 deletions(-) create mode 100644 docs/grafana_screenshot.png diff --git a/README.md b/README.md index eeeb896..a6d1520 100644 --- a/README.md +++ b/README.md @@ -244,6 +244,8 @@ Example: ### Model monitoring and performance metrics +![Grafana Screenshot](docs/grafana_screenshot.png) + ClearML serving instances send serving statistics (count/latency) automatically to Prometheus and Grafana can be used to visualize and create live dashboards. @@ -271,10 +273,10 @@ Grafana model performance example: - login with: admin/admin - create a new dashboard - select Prometheus as data source -- Add a query: `100 * delta(test_model_sklearn:_latency_bucket[1m]) / delta(test_model_sklearn:_latency_sum[1m])` +- Add a query: `100 * increase(test_model_sklearn:_latency_bucket[1m]) / increase(test_model_sklearn:_latency_sum[1m])` - Change type to heatmap, and select on the right hand-side under "Data Format" select "Time series buckets" - You now have the latency distribution, over time. -- Repeat the same process for x0, the query would be `100 * delta(test_model_sklearn:x0_bucket[1m]) / delta(test_model_sklearn:x0_sum[1m])` +- Repeat the same process for x0, the query would be `100 * increase(test_model_sklearn:x0_bucket[1m]) / increase(test_model_sklearn:x0_sum[1m])` > **Notice**: If not specified all serving requests will be logged, to change the default configure "CLEARML_DEFAULT_METRIC_LOG_FREQ", for example CLEARML_DEFAULT_METRIC_LOG_FREQ=0.2 means only 20% of all requests will be logged. You can also specify per endpoint log frequency with the `clearml-serving` CLI. Check the CLI documentation with `cleamrl-serving metrics --help` diff --git a/docs/grafana_screenshot.png b/docs/grafana_screenshot.png new file mode 100644 index 0000000000000000000000000000000000000000..37ef655a7a4f982138f5e32a08f262768dbdbfe4 GIT binary patch literal 102220 zcmeFZXEfBEa*>dQq0_jZ;3Qf<*5OKBe*cw8Oi1cC!hdiYE^6h_y-u%OW953Y7tNSGXJz{Os3EKLr zi(geqgFs8@gt%fRHpc%R{$jFZf&X(R`N^MH<^MZFmi2%AjB_`3n*k>t^Vi1l`RQMu zDE@m9NE#lN%45w8cDGeK33*Sstolzk@o>L>WAr}<#lAoIem8)@cI7P}1e)fZQ$XX+ zftS$wKP%Dq!VKHs_@op#EBImBWF=$E!7^9qoPTZ8q-{4~y@4pzi<(NKH@MhWe#Bkf z)5pQVqOywD-Av})e~k@C%m!lb;_07!#~dz_zW!7K{M3`?<6__TQ%%M~!}@r{Dy>`r zeS3x`OkMci{?5*@A#1;A<$v!Q<8j@(3y&eaHYE6?yS^f>BNRzTJL3F<`WW?ggqEoB zjX`};pb8aL=SF`Y{$~#lM-(r zkj_4cqfyI3Tb?7W7&aViB`kD)0UHkQKJCayu77tlwvtLgrkm)_fR<1$mrSn7b64BrnRnk z{#iyuE0!LcyiZ34Mjqm0ENsbYH_s5WPX9IJMfB9MJQrETC}~x0OH`{6L!}EyJRqc+ zuXE(v1+9n0Tv+``TU!Tx`~U1Aql+=cG>&j@JNgMty{O{#dm&*@Gh4V9cz$T`wf=b( z>{1yLN|IJW+h4~gs&(hzd;4dJV=EiCMFPM|LzYG#b|#xEI%U0YW(Nh4f=62AVemV- zIAB-e*%UzENFyn3OE468!3dicQ#nB>=(#PCO$s? z*LeQ^Weo}^!kRvH4w&EhdFa8?DIi5oR6m;l+OElu*GJe{;K%qU@$q8wRYPx zpgwPzxqHp7JsA;}PeZ#L&m*c2-f{p?1+lxtv&zz-PO76_S=e4`Jz4W2-NIz^%3<$( z+k(_ni8{Bo3!8h6v#E&nWx%DjtgI*w{wXJvcXMm^{a%_ZZ@CYCD*ok~@xbcDYK9&9 zRJS#y?n|f0=R?L#>YL|kw>|_ZeDL*lE`Vv#s7}P)hsk{#S9ZR>=TaA(-SCZK;>{a~ zq>2228PM~_VM#X=cm|m)A%F}6BS%=FUg-AdS@R;ZU~`)uiA>*t57D^de8={JoBEf^ zlr#(7_zX+$OJ*t-5OAH5miwC7nYvxb3=d>~OAhKRx#c+1E-4EwF45w-H+a)bq#e^NlD4C2Fu6IgI)G|(Uacx{N-Qo z3YoWY)REt2m<}7ST{-T(&Rn07bB|zV=SLSqK0#F~?B~ffN({2TJSK^?HOT)xQf&>T z51;+?N}@MaT5q9I5dbD2uveH)rdLWi6Vb385deVcod!!VCoUNUP+V8X*x%PJwF0s| z!8&sZDsyldZO)5dDQl zxM^>cMj;v(U&wfW#(96fF4P*z4E^oQ%_O%l*hwrkqU+dFh=Z{|%%I@6IEhatprIrs zrZegp+DYKe#4KwBt@E;WWR~TF`_=;yDNr{1N5bsYFZ{gEEp^}eIyXQJxkKC9F01WH zKaN^aPd%=h?HJOAPBzt)51E*F8}aDQQ~@Z$!tfPT6go_5A5m&$VlK?>`>v9_yxJC5 za*4qNBcqElZyLu#8c|v)L;pV;UK<39#+l1RZ;?EXJ?m1VjZ#g?dvhD zh+k+ydfVpWoQZKL_2_=V^o5g~1n+zEvuhom0@Vw#jxfF(w(R0vKM@fT@qj}u1R9u_ zW3UF&FrdBOGza+zB{IphkVXnvpO1<)`wr5Hi@0!{`1r{vuvl4L?QaW|ex=W~5=G~TWrUtDs{$N4Xq4)x zcn%7l?2pO)2X$FVk+|4}2{Kw%!sJ!KD1xmRi~-NYtPUj$WOQ|ejJ35r4IyN;;-?dI z^UQsSO@ZsL22~+Dk5CYI3y|;u#HPDMl#eE2mRK+SFiSe*_8_QD#yl}St_yAM`aK#D zi#evkO`F|*{@tiR9zMnndh;{=KIEA$mwzP9-*LabPS0!dnsiD9`K`r3-QtV#p>E-! zq0ke;!6ad2$EC)2s9R9G7gI`|r9l49`@|kmWk(vSb^b+=^AHM{l5sd4C2^I`(7BfZ z_|-oG03KA#M2jgyhWNyk&k=5bRQRS>l*`f+7{3H>Wfk;9qNz61v}q)51qpFpaD}Ev zPb4P6(0aoaVvf?p#RrGzdaXh+cC)@@p~CCKE$YT{!ZlXv;^Bhl3`N8xLW_7sZ9Iw=OWE=m&xPtUQq-Vn z(G)N1IR+XZoq8}*&k$@~yC0V|`0o(RoPi(yTK{PfNl@m~tMGl=wG_+sv*mC4&XN`! z9%g??SJ;a$V5~Fn3^z*JS3b?3z5)5Gkz8eui<_A;~nqec(z>!5OR!6kF^qIiFV^l=z_ggh?pCQ=2g2V^id zzb@Bn-8Rcf*qmb{k&?Q7Kzv{CN?lW+9!9@T=4bswA zJMK-Cw~6>&V`_-Y7wzP5Str4Kn2V zkkKsMZ(LQ8U6>2kh_qQv^!t%fM8SPMC}Jm~EzDE;o{j$61^as3lKaMo>yj`sOCj=F zskO{VQY!~?dP3`h??i|NZIkmlK9hFpiq$j;ImrGA&D`h4vOJ(r1 zZ?R2Nt%dSb{&^?Ee*YmC*V@}0S;ODrO2hQV<4v7L@Xm#EZEP7I*wm#$Ub8b_FAFgY zp&jL*I<|3j>??%C*kPuTUI7W(v)Uiw|(LA|L` z_Z{JrJ0b3ls^w+)wL_izI#&|QN-6GVJUqz-P=uuH;%w3sp{Q*j`E7mCZMMkiil==Vu>KV?G^s51$kLvOuJr=u0b5j$9! z=FaTTus35#^|VZjr3Hx9g~E`rLJK$XVNzvTt%3IQ)hw7~>ifvyT&=PX1_(2RE_BT{ z3pc-p)c&-eKf|PAfXTr_icYjIfh?>1dog#=iF>A%=xsdPPb} z7ZFw~a@}mefe)ywLv(wq6&!kc313`HiMKAAtbItm<5(3`gCE%i=e&2~ow9l>L}tVTR2vR zf9rjE&3TDsCHo}kG!RpT6UY-F?qQplM%&O)3svI3N{Z^8%O9gPkyOKWV~ZIl<$r)Xo`S zX{w_`|Md`(aTP9|txCbmv4a$1uS-1DxH|Urs|0IL{_WRp(Q=+8GJXa?G`LuSV<0ln z=HRCNf|9h~*%syQC_pXa#^|y=IMLS{&9l6pf1zk(Gc-zZOYAcsE69F(UpiyT%}OZ3 zKX{^@e`~pmnJhgpPt?!L0nO)D2VoHpbn{M_@ zL2$i{c(Fv}VDr1mLV>5pJvZW)vZUf5->+roiQir~)tX&^JLIoiaoCbTW)L3ThJiA6 zY-}6ksI=+P$q8e#`QE2Yb+KIG`j3kodlDMCm-F+Z(fcRPO{x2 zAe_E##D2w(f8+yhDi<8FldcnZsRjj~YzCN?Fa!^#k>D#c>wQ}V9P!?b6 zkf&m+Z3p6LpAcPVxdTsvoR7TlIG;Q?&rC^Bu&}^uZ?}Y;RyZz$c)8RB@tT_@ctl;~ zmD%LfC!FZ?>3zR&3J@U{38IgU7g+ZLj4w^-{SN6aqZ`AC1`W(VYm6N;+ssTI9|s*) zo_!_cULn0^%#7TVRhmvznrj4!x4pPfx#1}mK{?;9_a}ijUput3@*Qr~B_H*bEG+xq zi5?dA6fL(?G@fN6Mvm-)A3!4}_9rumK>1o%V>!*$NAkwyPP|e#C=!O{#mqD@-#6$= zH_n?}dV-QhkGvj)Is2kcRGs?)zV^k~w42W%edCfpaRnwiLq)!~+HF=u66*5pKr6_y z43?cTchna~57hsY?b|P+O zdeJsa_#Hqm+TTa)&sG0;!%;{=dLSd=ND;9G<$_6)`+19|hp~;bb>A5Lt|;d{#Up(9 z@D=?QH)8b8V^ZRk^>sDzx`T1^iVIH)G@Yv{vBgNL0e5pwrW7VCd_+DK7q)4q)iN>i z@P_kp=W5Wv0=E}s)X-{KvlWvb_chZnfb#((qvp5vVU@w%qiIg2tM^}ni0lD5P|m)( z1bYcgHC4`GJgA)`iJ=Ex2pmk15xD7;hzuo*WEe|_8pb|#`gna{WUT*(k^s4;;uZ;n zVk$c)ZUojxx*WDeHwyEF)KoB)z;omF~#f?AIED1P+FAfI; zws3lw`Sq+d46o;% zg{uAs%6;|=cviQo7GEWuQ0-yBa&gd;;0FhCNfl0uhWR)hGbmexFkDY>4cpPJdw4iF zJ`BI#gFnmTKC8DgU3uQl%Gn_=Jrmmy(UgZRa-x{%^l2dJ8qB1Gf!8X z$cfrMcsiIBr*>%G&yS9kco7weZ&zJlh%ZH&kJDD0f#$Y#H$I|xnj{6jiZjG(eGl%l zN^LB*)GaAJv0GO#K(iV>|~_m$O`*9T@McMe85i{s0?dfj|bBZ+Iih86vmv_*}YLFdWdJ_01DB*&la8 z`7|ZtwN+R1?|v3K_7rmMXW3i3O;9Rhzq3+Vu2YimN(xCa#7e&=Qf}}j2!?T%?v}*x z5b3+}Dk^JGh@{cX=xX<%fwI17qhZC0qe4GxKFyW%m$XFd*4IS>`JaBIU>N=2c3k$? zDy>;=N^N&$H)jy7rl2h0-VM`{gaI!u$xzO>h&PYGFC3H%yx_xbR%Ywaj~dPFxdi)13QP>QyDp!Dz%m`WwVdBW65_OHkY&Y# zKY}@=7|K^ZRrW=+R_}`acylN3F~N-j+>uY)pH?X{mBh})==7oGsD5CsibT>SJ!OQ! z1kq7@n_;&mv>Dy}X;9?YrGmO`Wmkt=sIWaVW`&xPHnf1oj~F1A(40N-w2g@Qw()|B zl?cle032_cvXQ$LJN&6>M)1X^(eza-T8W|Z zMiR}9G$b&Z&uPcXB7=&7FR@0P)-K!-La%$*n1k9>rzu|NKy>`6Bk3~dMeqgzrNFUhjHQ z&y1C`M(Wwq&Nb$~`&oaqXN~B(wa$_5%;V>8Hx@H0mFs;2;HlSrKD?_RI_r69*Uv}I zO{HjHdtd~vKV}Q>eh<8BgrOmuPq*%ZyuF7+{MRuRk~wCkyL~w#VsQFm?6O!4sa*vgp`<|m?ref@QS69&2;E;6~MZm0Gg9-ALt|EUab3Uyp zWJzC(9oKquXA>ULkeUQ_v-JqUFkG`x>&CHRl;@5%)d)oT+-4-H%q!}rI}nHhCW*{y zt3YvZa5D0IQk7Mpq#kr0sq`P}VxcD|6YDBazRFz@E+I`#*p4jBexv~=3_E6bfV~Vo z9##biv(8#K4yW#jOTpJg8mCy|YxMR|cX z(c*RWVusQAnMfqmBMJ$1j+$C^rAun_KE?dJmoVhE)IKylZ<@%d6*P1bk700>U??8S zj7&|+bXEJ5%sq6y8OZIXiF+=?aQ*-$ETx(UVxM+rXW`4ygMrtZGc({CZX@P)Mz{<5 zw3M8TyxgW!ZXtwJBjs70DLk&C7+=SfY9kn)r-*mjUfzk+x0wJwoP=!EtcG6(GbLj9 z9oU!U0>c^CGsVSdh!>ZD?h&F?#i-@!C$+)p_o5E#moDA2oQ`x2Fafn&_Xc&gOT%Is zy40BmTL_NE?ZysdP*sCiPha|JY4NPjWwvHnNRTem@%CuF+jQ@NoYKRqwv3bg0mR%} z&%r4Sw6!9`f&cb0%v*f(K#H9XN4Y)j^pr2tqXqp57$WzFLu7|{nZ|?k_5g53x1<7`q;wYjchn~;iiMRX-5NRGhCgI z#>p%{P9x;fCuLC7U&=MpKVYLB!1LP@0QcRukd0B@WnjC24-zL=Y`si^^GJs7+tCmO z;f6Qe(L8-N*7C0wfS7pUVGL-?>{wq8FC~TOA+xIZBB2rSuiJ12p2nKUjQ19Nh(Z15 zxtlSEQy?G1)-(d}p<3%PTXfu;6$(So3=$>?ut-?AzMt&=O^@SjSF4{7rO28A`r4NC z(T65a;99gK`U|E^;r6@SSTg{(w<~|!r5vs>mD8%Z1mK4KWe>YpF*MuSt?m$cb4wA9 zkzksLaZQzE#ftXSAYEyhYh$ zTE5>%dl$~Hyo_ws!<)N!Q@u^)i0a>gO@B#Oa{ebT=o>GuKJlNJk2Mdx@UheTf?+pa!XLUl!?Y$!x65Z>Me9$dc}NG&7qdw|yb8jkr@utP9racyFP=>0 zmi>B9j->(-e3WLy$iKaRlN5L(2-*g$w(03a1?F2Mvne0d{fd?Xeb^-o3S|JM_e4q$ zv<#tU<>%^>9?Frv?Rd+>Wgw$n*ju|#>q3Xx&lLZ{!$%B49xx1$VtZ`F*t@UvU4&7D z-Y^s)U%8N{SS{Sri%yHS$5F^4M;as6v?_{x4DFJ(Q%1F_2o`mkDlG>-Jw71Q4-X;* z`?XDg4m^v8TNai#!^bzZK*uY@g7zvVp375TanqkNPIQ%WO~vo_vA7P4e!6mh&RZ?1 zkaaWT0b!K9*29fekd49#O2Zw*CDum`%*Fb}kUhP%Fd4a(k6zZ1tB^ZEH43}eLu$1B2wZNU0fd1 zy*27t&u3^1@zZL+ZUX{uFmWPNV7j8)E=%*p+GR(*?|m)kHa2oSrz%-oSB#jWda|nTIh&uJH9sisN@xtM;hB3p(1H}T99ruhrMcEPEGepoyp#6=p$EvW?#aXHTwtTz%x{%qvWwU#D5Q^ObevHJ8j zdJ!EvofbpBswafR@=urn1OX_G36*9Z0}yhc*j@L%b^AWcE0ACAmYHCEDkPl~{rHDr zH>QXC))&3A?2Cy~e^{iMikxoZ7@1BULzUhrI`e(C9EhS}gN?{4hc2{gTDiM@^+iQ^ zvY`EA%mkmuzA!!T!r8zVK{aXc7fgRCVS1c6jt!Ar()fRxBdboMFT`-_KNq6OhZ|~L zCcjsCU)CyH8(PEsIc#2vYbC=rHhz6M_Ar^R<+wXV!1%o0I@g*uc>IkK29yWqprX%= z*)WBs^*nR-khH%?IANuxpX~v zeO>f%ncHBzCIypJ{~iS^3@*%otkxX4d?cz`XWu2t-6a6KF=V)y+z*i2f&q0#@Ya#6xXtFE87 zBmysb=>z^WvXm)vg49}AslD13rYUY%Tp;`W{NZ6l=$!}(G3ka1dVQ0(72RCMGT*PW z!=-N9>?D)xqS3p+2^SvqdU2P!GNxy}p}#>wD?m@ey=ZbghAN>dW=Wdc(rc@O)UK^z z*j`0OZ|!oT>%yaVkT)OIxhfRiufLqbqhwQ`ikcf+@S0v1p8>PJ$fpMsXb@`Bnps+? zWdZIf4cG6SS=o?fR$5RKHiwrA&^<3hMP{IkR1JF8l&O++qgR$*E<-7vuDe&$2*jNw z($;dKf|)$TQVR0>I)n3KNz}P?MoR=U4x54!O9K#ZVJaB#G$CHcLg)&T6XQq$(N{`o zqM;S0y(Ackir>}Q${~f*XD(LE$A~H2GG6z&=U92lXoS~J)NR-P<_&T3@2IhdbiB1~eQ`)>?-_7a|n<^*55L3U7hd1vfde@NSfwS+ES@?0zLm@ zjBGd9Qy7H;Mrn|Z$r$MCZ$DyMwU|z2CPts|&OL%t<-M|4a5TXE@&vAFy^BlHX!xf` zX*zZC<)^perdP$V&9L!6HDosNk?L9&&U1Rv4+}+g+lO7!xKIx)OjhzrMYoattnlOm z_?JWgaRz_D-p=UQ>bk7KWg+#c&D?#$%8!t`rxWgeA0(IHxzbAD7xlXX_X8rOmv0#y z1w2D79jN>LnE|Hrc^uF4=A6q&x=vTKE}CG@8?b)%Il~JhbE$!7QmLnFB`n*QLlpe* zOB!cXOpFh(Z?h}Lqk--+GcD+3Sran_QYIQETOHi(KkE4gK^gAuag%rY10w1{a6hYUPn^RDIIW(8H~fG?{hTXWgBgmS(@8^%a`A^_9r2FA}?7RV>Ox;USnZwF+587nt+^MH!MS#6G$yJkHS4aGZ zybJeP&`3lDgojkq3YvL3&jsi@Y+|^QHV6bmIs_j6Vqyhs^At+vrvFkaT;R1fH)7-o~@}#Wj zXiRk=bb0byI9Tz_NOF3cLv!E-Ar@F@`K?Q8;DWDKb9>~0zo_B$DdBO-smZG)IMErs zAYyCCW?$@6gU_wx$tWdn6>s=0=YyLcfE3<}{BoPMpok^mug|WV#!8imJJ^ks#d_LH zmXYgg+P-zi#JunCiX(|e);D?ND7k%wTNoRmU_vvxWk{k37vE``p>T|Tps(=wx2uhh zHx2&J0SmO!O-otHuBt9(0QSsQzAzM`mrHoVWvO)seYM;m?g@FysOypEF=hPNq!)R4 zu5l^F^YyY+0KxOg=H<$9dTXTPzU+rHomPYRffs4!FKwBilKOA+DK4G#Wd=K)^0mP_VmC>d{D|iifqx) zQho8k5BD?jR^Lk!B4Qc^FirOMdeg#$!Wn$rCM%BNv6?e;6FXhFA{zIDt24_f~`QL{k~_xRx*l zh~)b%ob$0d`{xMvEpY=dlGa60cedg`$&4a<{t>H9% z@oC3D6)~kV*Xn9X;Fs5XlORjLW+7RGylBr%#rpPG71s^$&czizx8oA~Wb)r8)&{$X zNG^`0rs-1U_qoLKU`!5(p{qGm*laG`@-UcLJe96U_L?Jgy4*hP%$yfZf&&%9=AM=^))eVNL;1{qf&*ZKYjW%a2_0_$&%?` zr+Gt6i>}feWDo;p>kT^2QUjEKNFO82KAYUgt_hYhVR~>KBCZ_W?zM3eapBmB2`}#1 z)ogK_Zn-t~xM7;n>E)wN`R9AtjVawG1wV4M@Ps&jTc!?^ha`F$6Sv*D@YL+JtJRwn zt-j-QccSGOnUTCmJ5g#4gXAO$9_{^AKPSN%DKPRmgrjm-XD4!Bak5oH{NzaATt}7@ zr_PD~(hggn6l6M)#@4S9)|b%ShA)(5+Dr2-Qr~T4#R0P;@7#BXs;%S0gE}?UFuH(0 zi5(^kRXHxxo0w7{4i-{GlmEuk&+n8CrFePAIqUWMWjm^?)qdsWJ*M<*wEz8ktlX+^ zH9O>;<#Lmjxpgn=9x$W7w-^7gBVajL{TIwpd;aIE)LXuD^NDX)-v@7pmY9=@_PIXa zu*KDd&srE6eRxxL*!FYnr{kY6#==5U=bRDZM@|y^9(Y)=KY#Ei^aAfFrCfUD7O*^7 zEW1WyW6+1I)sTgm-b{Ky6r+N9`z7r$DKtY>>gwnC1W(q}BQdA8lDku`^oEi1_3&(i z(WWOABSqv`t?5#U@2Rl!C%BhS&HB~VNoRxdOY1GYClvGt6Zxj`#xYYlnuD{WUU}l;u z^fIZ{Dh)&HxVrQW4752^8MLnINi~e8ZlZ@3ptAFnQKd-T`-u}O}*#G~`f2~mLs}30%C!j?YRaFzN zre@Ux>A%}kSllemHnmS%tWg9?`p<@y2EvYAKVn^o4_WX{ii?-e+G7L(1+V{p`fEVy z7R%gwW&-(-o){zH_TiLbAiU$(`o*KpsA|4%a8T3uFC8tPkeHMdJ?zlA?=bf7(ef|+ zF-PMDKsqNUBPMOS9qJ0W|2eN>)x5n{$rLX!tQhv!k3x0x5V;DrIEFbUeL3pbpr8qwygy=g`9*$fGdvTrSG`b~?|THA{9#`ez8;fPiX)cjzF7k-0*2lNZ9DM06f~>a*5(u? zUr$mfU3ovKqHui9SI)+i=^q+8kJ6j8R0=(P!rHI!Jaee)V>@ifrEI-&U!FdNB@WYL zdnQrG-r$rqyxrg{1b6@lP3Iave99#e{A=U6ip7?G8Nu+g3Z!7`Q&_0vw{wju?8St17c-n^QE#H1m(!18f-(sBh`=N&u% z^{MZVP<#M$eMscSS1pY^8Qyv|JZNu(@!6P>Y^zYLZc*>?fWBkJGX|%FA-?5i`B0pA z&;5cjh3cRJSsJ_uT7|>2XA>^{Tia3QLm=@mb9`4U95|s5&u(ef!e}!m=uC}v4#DQy znmqKNPAuhOi*7>b&dFA;V^amZW&Q%3yR+#ZKT|_Q=g<wAqw=7*CL$ zS32zTh%N?FT9bAZ9`O=^OFAiy&uyY5#VBo~#vv8lLepxxo8&s{F`#kLtQI|zLvQJ+ zMA;g$zOPjQ636IN6SM}sG6!EVTb?s}m=QDnRC$DXV8ZwC>ihKNMir+A1&}WLcgvvE*1+10NgylI6Y#v6$e+DB3Djy#V87v^6_%&lnk#K(YZ( zNMZ}!O^a6F6XxK^$x>4Vybm&NJ{mCXx}aa1o;Bq4*>tu6&<%&1dVhdgwXvBzG)D2C!S@d?HUhW{}ipa|dy|Xayx{SR2 zyh-b ze1vPZ5M2nkgHLE$N?(Cna5e9leg6V?rWbg>!=Q|miGfN^BUu9|C z)!I_7zho(7Z?0b8;pJ6s)hV0N*piY^Mcn7sa8!NwjR4h@v}5*H<519XR&p6>+VjE0 z;brQVp0^i~V-uAQCOelCpIk>9gCkZpDH!r}Gr$+u^Zi3wps)Tge6P*iCMyd(ZYVlR z^THDBR7j^Y(2N=sCilcymF%gMO8Okro+CQB7*PDMGMs)Q;?6ran>K{I)G>4`XNIfV zUhNT1CNS`sy{(JMB5iBrqVnmts0mKQiWkNeSWt!Z!=`28y#ad%cKR1D3^`_l8&2uO zuMV56y_@YIDHNRN9rmbGGp*_{`*6z&D_!!Nk`UglvniD=62X;OAU~JnLfWHF6%*Ky zxxEEjc4`yI=VBiz&Q<5KIM@L&Tq{ye5#I0B7C_GwySlt7r194F0Z6qM)6QSTSr@a3{|^bDPuaM z?pLi{w$VgSyBecVhRV;*0$50-4Ob`e8ISnWIn!)n0WO2?P1=n{^fWied0xvb^-a)V z7>UC?Gd6}9)bN&fQp1Q1-77E=GJgUG697u>WdWr{`bnZ*czT6GxzqY}kF4Zb;B}Mf z)w)*xgZBLH^Su#uw}Z`{f-9DBMgHwlb7y&`!-QHs?s*~WsunFMap>6=4uQzNL})(a zzyj^-Qx)kQ8Y|O5%Q9}~9m9Y?0jfK`PUn#)R>)^G=R$eL?o1*yiAM+7TA%2^<;XrD zgs z2%WXvv(CVv2`B7rgg)iOubhT6(mj>_MiFYPnkn zIJuk$KLQ*_cY=G)@1VGDgGY36ZfaD|V$Ei-l}qf;0=+{3*g(M5^sN_P1)$#hGjmqq zd?fjJ*RLgw)LwLaK_EJVO=ElEA5$)TXc2*Ls_o(Qv{MwoWYzxUru^L_(jDok9<3=0 z_cc)V@1*u{q9DBSz8A4xjuO2a84Yc|Yb6ZGt+qQ0bH;*YCHHdy=dUAJOj9n7J*O;0 zR+{K2rM!Ym&3!rN%NvWe&!7GD&@U8e+x{5+Z9aq)ZUdW{OW3b@!-wTVJ-E!!c`=!S zI}g9v^{Q(TX%K2j)#_Uk1|==sgpm?C&Hm^Nb$pnGhebXII-ITPfv~^2zAOJaNjI!C zc5o}Y2Qjj?KEp$jbVF2jVk;+h+piQk?=j_?S0LL@$06NtYfxY8&XL)AAv!otM=2gn zatO) z=0s|tjdiCDC$}pd!E58r5A_VFt2_j{zD(GP?PdI&%G6<>ZZC|hWdveP#LdBSP@+mn zKbu4}>2KPPI}*I`>%eYiJYAP=MYtilo^ZZT+*Pb}b%D43#iYu0hbgXtDBvlqYMvry@^ z;n@DT^nWBz?3j*8;}+(TXYD_QZLDpxeil=-(!SQ(Drc}V-%Y2_d$sa3YSM)n(wzQK zu_c2xx))%pqA?m_-1EoYypV*qXu26wXxV+$dX^E+~+($+S_3>o5ydx-@e6GRI6w%cK`UR(PN*` zM^{fXaZ;Z`U#zmRvDRH%6IZ5?@g=$5uVCiSu`2S~~no(?$yIY&BgLp7+PACyKs?y;HZ@Y2I`Zx$3X1!?JTIQ=!XvFySzoss{n#OUyNU zN4_d;`9!Cp5--4_UqE7k(kPv}Y?Qzub}ZR#3GAd#qJ#5)ON7FqwzmzA%al;VvW~-> zTz}tYgQb;~;oQPk)5Z>B8mm)oFOvNR^y>sY?T!8YGQN22j(A!6!`TL zJwP6QQc*#HDnP1UY9YH(tnD&HaPBqwvqcd3rR`^!jos@u{rpQj1@a<0DQ(N| z*Euma>@!96TT|Z;=}LPApf_pueH6H>_!&ivko0peToltfC40%1|28mwuD~x&`_=7c zibK?4aePkahAt{+Y8t*cqEcd@_VSjMrNA|mo7K_rrOjuGh1(hzg-RV_KhK+ZRhI3k zw8PPox`ixNX4MbiRvb6)^QvlKCv^$h8F!Ho>*vlP|5q$5CvgJL=^bR9lx8j0TT4Bu zcbXo>8t_7;&lfL(Q_Kb5-+{bRrsdJFZMg!EmMo%-8?&9?aouuR1uyugE@TM@k_XAi z)Fn_8GT@x_H*3s$8xtpb=Bi&I3k8=~q00p}V2``Lo3D-(GKlH@c4Fc`Pure;>3((S zmjYb0{OJK&jq%YBNlZW~{56jnUTyQ5s~X!eJ9|q0cP*C&#fz-qxGJ|iUX3QR9eW8Q zJ(7F!$G3;|L(k)%SIx4ct{>dRj)Oq=Q{qBtlM1el1zL;;-CFp}b8W8gpu|7F>W54v z#$5(QQ4zNuJVuS!(F&oOm$s&yjw);e4e(=8FfoPp?vE=yTSW~l)T_`4 zm8w|)GtiT$SB3Lg=mYwcx%5J)*#5QhkI#a8K!181123BA@f@E>d|v%#Zc;nD#ybg~ zKi=e%DU^;g)IGB^UC%o>_5`r`*U7Es8x-L)G;2!e5%7CWET5djG@b5dQ`hVO6FA|N zQ^H4yX1pmUuDw4&9pd)qX$hmx3?^rN=E`~0nrOu5K5LD6M*e#JI!?hyuJPtO$DGEF znwFxR429ziGSz*c$RWn(%43qYC!j^rzv#Q4Ma|)y%!ov^wx~ggl;mkP-nn9&cAU?( z9tTH)sDW3}i2Ic7a7S+8!%>3A=8(N7w?T}{=+wGI3NmD0_z2H?knK|mxx;B`->0%q`LXQK+wAKwSec~Z9;d`l zHmIF|oPTL6P!Nytsb0$@xeG@)Zy9$j0)90uf~9o{BzR@qHS)X4S426;U8}rYwh@)j zai6QLJDq(C4rc3UfCU?~?oRzbyuD{sQ)|}+3fMtFMNzsQDFOo0JE({ns`L&@Z=v@t z2&nYXd+#Om009Cjy%QkxBApO==z+VzbKdiQcYNcvA9oK3Bzbmu>Y8h=xz}dUWSjv` zv%@-z?Ia7Axf}6)6!|9T=TRhg&i+tv;$*$|fiXD$_Oqhh^VQ0&hT8Z%Zh85LvDn<6 z2@kfNZ{u25`9PXS^Mv0Kcs>%pZ-2ylSvWV%12R~`4kK|^NKf37+2OiA+xaOsSnPxy zOM4t(sk3Y54yjC&1hHDz5JcY#$ecC@@aZD)?i`6aSlTI1jJZo4B=R8=% zsp%CpC?e3j%6&uG7G|KR-g?8d-jJz+Tai*!1`AVZ*MgkazN5#x6f2a8R8_wna{z*x_jGAF;o_T89V@$4xABX;PIh;mnU zjiL!%)GS6O!E4UAVK>9~@j1^~)h4gO@|-I*SVZe>$70`MSa<@+ zWC>pa?CEP09nlYFpA_fcbnpf~f_s%Zlh4iaIkUwTY?iocKAaKNmYCUC+%lj9!dDQEK1_HItd(Z_f)x&hd8`ipn2|AAC`au~bn<&8&3FF{-d1sdljt5XMzRD=i>3 zawm(P90*obvx7`XS8|_K_#*93ELfBmiJuj9E!Lrp6Ulpyt2r0p>mosXY51{HqoGXX)n~dmVa*wYrp?Qb8u5rZ*S8Rp$E@wed zRnz(7{b*u%nyoJv6&_1&& zT^AxUzPD}fhC$1W^)~2vQy=@V^FRj={2MRO&~W-ndNqclU+vsQH`04=rIPAA>m6N< z_5f8ZiwVN~u(_>7*0MrB{R}(Vb$m8`cL~zwZ$6iAZOKH=% z?jOl(z}upCSq2YJAmJI_6+EBJR%-9+lKQp3#&(w1rXt9q78w}-E@8vNd2R32i);$u zy$>0&(#tCsUwk~ZJ!+1-_4MyTz@JKz`@kgnXG@Dt!@K>@8$7(?tJgmM`_7c%;-vp< zFIivVKl*69>nQ zNsLdMy?sR#Tu>`II{FRWzh}_=5cJT8j4W~p*2f?vkM3XCT{WW_f^J$z>6OavF7>I} z+TL~lx7K)?FN4T^02OD>@qWV_61W6JSz9q{a&rAj%<5!q;XN`)RLaOJ*rZ1w5K=G3 zQUUtKf6q1h(7cRbYjvt`XfalYEx&9lk<+@GkgytOSlc2^&casgcZ_X#^X(R>?zFsc z(FqE`S_>u1(7i;cBm%`8>H?Uze`9j~S`ugD0xj ziT~a2@S0{8-lTP3WnYNNS#QJyMrgre(kj;2Ls+MULDFuNW^H2m-u}ORfOkrOsKHB$ zi_5Cqr=bT?>>AUlZaZ_pCbENFD;PjJLeBHgKmPaOy=?JathZq`Su1HsG5*}NkBQ%< zzte+~g&hsT{w>$kw--dzq*Lb*O~1Hge7^3u?g`e^Jib*+c6|?_+$XxrUw-#LrJFeP z&GH}AYM1EMlLY>3rWO1m%~AXMp|(xBuFE#_&nocR!m6D>Qu9ey49CFN*c3CTBO>;i z4+k?R;lK6gt3LhIY{2e|SHg>;k}n47tjQ6gqIcx$9Q15q`!0ugp-~q(tX<$woii=U z2wt>ky&s$n+*IgDVQ#>Qc~^8XFu{=XVWQdm({7T5sG_0G>!bsj-qbljZj+?P1i zHAYIbveDLcA};5ZN`3u$sV=&JHqkP|_Rq9vyHXKBiM(fW7k1tGcHKLJx86m}sUF3! z84r1rB(E5HX5OlXjYc$nvg=z?PXTkt*-A(?>Jj8mEupuI}u<_h# zYo{YtcB(vRj=YktZiz?Zb=edB?~q7HbX?qh?IrXoa%k4#gCF!&A?h?wF1hYKguN~g z#%BZ`nhe%3-{Q^7$4UxpvQdA7(I*Z5mgqRJ&`tW)Z#dF7;f<2bmi0QV2b-fm^ zT_f;n5IB7>X1UK8>o`hNP*^BBTQ**&DVTisvRXcAPibP;hj&p8${H%^#8fC6huWtU zZI?B+5jku@;gu8>DZiTI%dXl?^YY{vV2Wg-wsR|PSa%A%Sdk!r=bj*O=6mvH09Y6u zuqr4PMxnP}-Y?5+ZNGmv_ZnaP^5u==>Yz&O-i&iGYbr-t+{&W|4=@mV6&00hsk<(d z-_6N}562JRU?yubJFvz^CY_6R=)DaSAg(ThQB*mp$UQQ$MY~!R z6-l};Z_UhBS=iad))6MAQ{7819M@e2@wwGze=Ru(-G3OH_TvYEFFr`o!6EoLd61N} zbm(z)XJ?o4hf>U!@0$^!()*PC=ZZ$%iN!A;+w?Dn8d4inX*!PQ=h9UjP*PCjx3@pT zj92-QsuJBKs(8l6CaVRrdh&>kUbHyV=>aCHZJzNF8yC4Oq-#z%o2EbSIen4jD~msR2)pph76suhv$Jll>p6 z*hZm6KCfv$=TMQH3Ep@aq17z8@p1B@joL9yztdxNdffYaIxy~EtV`kp$hU?;o?!@jp3gD?2CJ-fV`YI>{ zDR`17;G&?YS)^fqR%m^8GJMw>npLTeShL-DwU3d(u8O`~e6Xll@A6rMw(nCh|DCO_ z&E0F>+2;tt9oSkhx7*dtwl=0b*~C0!gmT>!d$9(*KvJKYE;Y@@skCi#hVp;=9w)_xt-IRFU?fS(9Vg+&WY!fQ7T4_BGnx`d0ow7 zz9XDiv39Kp5BNo3hh_MgBE&;1m4L}bx7v&>J1fi32t5AL=WI|ym+~Ei0)K9Jq`8RvJFO|!DY4}C4GJU`!qC#Y+8I3NG+dX^GFnWqz-Fw z>n+i?o9K8suRIouy_$I~E7wsOtW)f?x%AOp`fHH%hc_-OVF#7ua=A*pRl}X#lP_Or zex2Dw<`l{91gr>qSSYFJWPkt86&)K(7EB%<74f<~8z?mf+iuMkN{<=2%`}bcLBeshJt_ds^(%-&y0IL^#OfM%l zn>U56-g#}=hmz>WJ%o!xKw#IRlUZADYv}&zt{aPBt z=!if%6(^?{JAM<^Ns1C$(cS@I@u#+P4eWKQqU zw`cZIYiKm~IFW-NoB!+d&W1lhcv%26J&jOrU_vpdm!AaG} zIgcq1Bu41GK1CC5RkiWR0XkBoMTVn`pQ!)Mrx|2>7V%jbHdr#XL*%&=*pHI|c3Z;Xz@Iy1Vx?myE^i9%H4V&pY?tq2_?)Wo>3t5!|g!mvq<6+j$1- z1LVb3op8QSYe#(vnVYuT2NhX05ub`DtzhhkbruFj`qhh>Z3~L9+duL5o7(u`cHC%j zwjnA#`pJ1E24GqjblPxiMpKs{2mg53?R0^E@?rxzFh^(&lXvrF#A}c-i$QQ5%QL-m@}q{ zs?xZJ1w5nR(*BRxeJU+o@|=lZx@F!u-#S%#DBGSNXpw6{4d)iP0A9^nnwcN5FG9EW*vq%M}L<(ribW6mCg z7w!P{;9Riu{xL?vnfGMUkzdzqbpDTv6Z$SDM)#AEQFh~@T>&7rF6>RNV`gL1AqkX# zEZEg5e+Y+8_GlCoO$txhXNw&<{J5oem9GF;kFe`w(h_tuGMDbp3-(K&qiOc}JBt>; zWw~JB0PeiyNh}*XyHMIq4-7B=8cyNdc=O9>GHj;M;`(%&e6ClK4&3P;K(`Hpa}Yo&0T(Y(wOCll6-xdQP)HC*m7XL_lz`l zU5R7uY-YNou1@F=MiyY_yvB2Jl&0|iLfwbz|Ao5u9seil-iWfwWqI=CRiqe2E^r)A zTo2FikSZEjitl+`9?3T7f~o?xfPVq49$iRRBt3RxSP$?-l#PsxDxg%Hrz#-E#RWr) z$wSi0LeqDVT>uvTAsvRb)M1VF=7VV z=y`pm!YndT$7?~@+RR=z6sUz&%?9}en#6gvU%7GL^}tSmx}Z?preJw|TvL!{uBSvC zke}u?pa7$u)Vq~g7ee-J*gX2@9DNE~`<%lw5fMdW`$x^D_<*s)zqVi(k;yg~2-uv} z1@0OBROdh?xj;XcY8#oSfBm(99l!ZDXfJ(kr)Fx$rT%KBWMg1Jmwwe&y!Dg2cZ+x3 z8(IuR*9FljZya-#xpt707d)JyyEWubay#57+YU(qsIntT7gES34=xa1zhL z%Ao|0QfAXD%Nkn*?u{U;$F*ufO$WaK>15H&nlLZfNWTM{<aE+SVm{j;j2)#Ap+7f4xb7VeQn0@L_D6`Ax4&6mo` zT7uLhz>n_$!5@^jxO)V^0T2ij-M1`mMR?Kn{! z>7^_H5KqWiX#d)*6Gjn0u@bs?z5l^>oCQ2S_&APXe^HJRA`UUh%jIF#+ps$^U##Iq zUVVmk&_fP^bzj}bv#~SFkz2IUC~SK@B9umcq}+~-0C z;$>PzDn>B*%6@%pP*f(3ztIlg;q5WGFxcDafYyp*%t&>)`g(w+i62aEBY-kH*D-1W zqb6I`6^_|E(zbIgpa-c+*I23=q_iv?@a9b`<tK2!XIjRCC|iD(qEZ0dXZ3 z+l?FCp5PxIjs*r2-~qion)n_vA*U8k@VQmr{L9ln+^N7bI$H9LH z3p|S5Wzz-mclnKtx=_TCT}7AU`^~(#ICXIE1Ymn}60K@vX`$}~Yc-mUE?9(~_}${_ z&1cH0`;3_B1})gMN3K}15Xkf7_6Oy}XT|DeQE7C(NZMi|I4P&)33;)*4)K{#yzHj{ ztL5)Sq{<3Y0#;(s3BPiU5LX))@U$|9#X=c=bJ{d%cj7x-i!NJZ04YXsYb+-pYt5fc>0mSHJM^om>PU6U&-4mxdCBwK zo#_6Gd_~m6TG3|atP)DUS`=v&i=+j&J6Vk$M_0#sae##8v#9PPb26Vy#KR04?VP)# z>8rvvix2r)*U`>VC47XN@?n6f)|=^WGs{H?Cg0y4XO{LhRyoZ;Mi`Zr+YyD0O8*dh(58` z;l`|IYM7qY#VUW8KR9@A(W+NL3a$(uttlB>1klU|V$)g5ES3eBY3oU{ZfE{XDr&Gz zAq0w~{*}c@(cnIf|IU%e@x)B(aQ~Aq^fB6MFf>_s@3(YcL3K$?+9iy7n+c@w#Gv2x&8#CI3pIjvP5OAWNDdH zZePx`lgzKIJS5OFd>EFq)&&()>PvaZ49o~FZvGWa1oQ@!e*oa0>dvQ|*ZSw^YbV)d z&m1z0`{L8L>0}4Uygg;KV2dC~MrI~e5Xn2l;27_H{^uoYdnux5j%~& zKTtqLVPFqiLGT$5rGH_SZPcZtz5}Qy|K;QUbuNJ0;W1qCyP?x}fwP)$r>Ed!fSSAW z@uK}+7QVtC^&uqO#g5LC78VppT{+gv`4lEzctT5^g_$7Yb(&mBmBGgq(k`#0z&B7t zPg6#!!CHB;Mv0C?IAAY@})1{D+` z`RyC1xxxpLA|^}h-g~Iiu=LJxZAfzWVehcyL+nUSQOE_#xOA*0CtHac`g-EBnF2)% zZOO#KXExOU*Ku#GvLn#aTVEeSOL_WrnvS=8OmBH}TBiVkIL;s>+I>Jpm3w33BpI-d zdts>ak%b-ALRo;+VY@f8Qq2hfl5%yBH;FRKA zUH25i8UV~rj;P6(%QnzQUI>#Vxp%nkd7^>%Q|n& z@Ahc)(a$jOy*67=P%vvx8>XcI&Y;wzO3h0aPHgmX0+xGic97D7*ztQ)aXr5_p)J~O%F;|b*7 zpi{&%GRfO|UYvjX>R!Cf0WWuBkwN6Q2e84E4K(h{UaDI&MY1 zf#ARs^7lg2oV0ealKU+!-w+2h!sT8oJ~V@VhtF zx8zv08sU3sCH$u|3iX9u-n{bq(_cN(xk__@#Nk`0!cr^tNqmf#wXT$+4md;` zi)FIXZT_VCg3XcD#h-QtF;4l_ zbwY-&W7Xhb4`Ny)EA@xv^_63ofE5nn$XH(9C-GIEtaYyw;E!2WhkGL~+*v%l@Mm~b zq1+-se@hEH0T!}X9GVM>oVb<>Eh_r(`nBSgPYr#~p;H~L>!E^?i3zi_eKjyV*9PX3 z{8B!{Q5llEyelK{VSwdj)8r9%A)cL_v;--(o(fVP^(g@4Ckk!dIJxe2;a0}Jtfr^mL# z2Ri|?v!C*=5_q{zri5Eg%uT8p9n?$_Hrvi$DUn#mx=Gv%!jZKMVWt6rltAG8v^U%?qhxs6nwlAtJ%WIV zEg+^!6z^J`v}gaIsd)qNp!#L`C&@idNyA7~0n+||P#MutQAO`)y_A7G$pr-^gQVg` zqr23XmwfT!lNv-&ir*bCtYp8`dwcr)V>kR5_-#HhIA9HSYtR)v_(3MF(e^`fx6)>y zo)P7}EXq%7A02g{m6dfj4M}vi>3*p9H~HXqVA6a1mfOVh zXYY;woKQ17{?6=22m~@*VZzaqBJB6^KVRWpZ=+~39r<60fy+Wov(NA{H2@-D09X!g z{u#R$U)UrG7%RPWUY(Q`^_~j)`fmugNx`nW7q4+YfrWut^H=<(zm=kT$+D; z81_2G>4w6;m*A|1w<-?PqmG^YZk;LC+?i!^{HdRo`=}Iw92! zLQ@O)EQP01Jdak*^0+O9ab7cxVp7-78>vk#Egst(&ctR;MmLKc#Nlvz@JAjv8K6IO z>kZTG>?#al{G#e(?n~CK^Bq zCVI4^fCYUuy*X?Jr||T_O&)JN`~RKQ*m0SZWv9D)*L7&xqK5mmoXMfW)Z4CY=87$) zpVhtQkwqgn$7o2vllqfRo%I~Kc21r~E26Y_?=lA>)4?q>9g7A04#Es(CX05MB5kV` zC!u7y;Sp=qS963v+bUWsB;)?%sWpI0fzQX$9v_-DQMR8o1SLTPn?uR-CRGp!@?s zb8~m?gpvtK!#+H|GcLVfqs@NgLEP4&UMC5~*Vfi{STU0N{%r4(2_upIUy;g+1+WPI zH&XFq7xr#Wn#EiM#v>U$O00C`2Y^t7S9#H}xyrJfPUyIMNN}*Sp?aCgFQwNxkjEwQlIh*!58z2n!v0^MbDe8G@ zpZp8W_fk>?9%ARVN&+wH78dTeym<$pwt$b7J^&o^*LY6o{1(_@7KrLmQOz9h(8IQO zM{1E}xaNWmLq)h#K0paQ;lNv7wpMYKJhClAJW{!==xsKG>Hi96HNPTq;~Xe+Gy8>3 z;I#;7OaM|jgPAU)TOF2sfc_Skr3qDqq2njP-@Yk0I22V|j#(ah>hs1slz$F-o`}}r z(kR|>)8SHCrh+9)Ruwq%k`u_G6P^e0b_32b$8$AFqvU$sFquX>|9H;;%jJ^aln-Ku zr658%Z;st%dGvn?J(3KfJPp)V+QgVzcuVbvR^Np{>%TJxQ5wLT{tf>TCC z>ZJp$2?Ns`RvDE5%iO+YR6tTI^QWK5e2!wwy``SWV_CQ%OnAX=xP9n@{-~4jA|_@6 zIU1NUz&22-^wOI7xte3hJAk3^k}vzw-I}CXYoiw3$wGOO894z{F{~K|-1cdmTpc)C z83;((6u$dpY;e|JSYNJtO_=H=b3r}#oTh=(xE{HAI&oZcmbRE{XP)Dq7lyNDw3iYZj}`t?w>{eL{JrVc_GhBbt6xDQIITm z+Q#F!DtqiW7VvZL0`h2pqr>1ME$0Cfb!mTq?mD9mIbD7RxKL;;zn+y-Z8dmeE&DFi ztfO4=*!#nxw*WUz}Q-B7X&<|MKVyM3aZ z>Wy?!Jx!F~N3H6R0b~IJagsbuJ0p<1Q-NrB8LKlo5RRcf!M+tea+;6IKDfPJFu}2D z+3wVRp^ z1_eD&2k57hoRS9Jh(3;%H)DGpXlKgmWX~I(Sw(CisG&2by~jVveXP%sJUv>CDiEHf z?82c&ov0@hN3m+lraxjW7(h_*ap~pMf!=V8hsAG}2BG~Mpuv84--wE5@R6rsXD~6P zkbVyR>NKnTKsZ$$Re7@X$ivmvToY-?b!`VofT+XgClj?*r81&WvvrBS&4`jR>uJMw zCpW6oHJj!c29OyhgXdW9;88Ux#&*EwL9A+_1xl`5uwD~3*B{%PSM)53-Uuwlrv!C= zk1n7!Np7A+n`Hb^i@m7)Kl2^EqRRvqN(umfO*#AoH@BT{t$bWuT$F;8(75=G6lr8* zgb9sc!Vc%22ql0eac!VATDBbA>~>DK%K;E7o;i$*_6@n!n$#HA0)n zop=QwA%}tdA~}x#XDC6lveY#+;+wf$6S@9`igwe`wHvKney|FAKC|}z8@JdmOmmtF zC)wp%;S&J}a0*TObQ46KuQy+Og*SUAbc~{j)mu9~FRz;A+34G$OCS`u*gLT~%zMSU zKUYb7q(n#f)*ti}XwyyD1Rx4BjPdOi?|k-k*Rl%R;67g-`V4 zMT+qCsmwDRy9$uSz-ePL{eq4J^cT*xjZIJADee8zqeG5> zT_IU3E2f?TYX(y%O~rtA=M)6b_1^WT&;QitMw;`lUWpz=7eZCcnF(aE_wWTA!}phc zb=d;5CTVz>`goipe8x)1k9PW|m4Jfwoo0S$lfE^Ns68ik!evXq<^6BJblL>2Ra zymZH=+6MSF&d@W>24kbhmYJu~G1CgJuC>i4+1U&QB_)b(nt^0Fc$Qr8MimpU%Q2;; zrL*9~DcbHxgYhHveMfQCLKVfE;~%T8TTg(i!`De8I~}@OK95 zI5=oX-~{-3?{^3F%N{-EU;|vV$g%sU;tsg%7Ij>9PXW%UkJF%Poa8X+ix5Z8lXZ@R z>ZjfJjK3o0odW&pdER?pf@i#s_y2F%AI;Jj-e5@Y4kcA>dv`iXBgy2g7^|;1{o{Ur$}n z{h7aSMzeU^Rv5@`@_>xW#HT)(kts<_)d9o_|8s2#zt+^mtH_7fFNT>`#;3|?r?wWe-NXxO6Le%$Qex7V7 zM>c9FQ7{frq*fYZ3dqNtfLZq=`EDo2DCM9h&CRO|+cgrq%`kO8NtZOp+;ppRc{jPq)hxEdkPzTQ!A^j@$MYP=b!9Jh zgAqYQjjQMBGOvh;b>AtMO?(C@1X%2m0c4E`dJd{xE>2DmU!ttP|FzqumreDuT>+2p zk%>!!Z zW~Vu_Lq8e-C?NL|3bTs*^z>I=2)$dkzEQdiV=rh6z=L5^7trIMae!a2MS!NX7+8r5 zc`ekEU!cyT=~JPvfV;^h$}xE>rvC4EQayO>w690|4j0#J1u)fa zc|p0-NMCPP0%PHKlT~gAO2Z6p!n!4kb zd@YpEIR4NE!9Qef1~c&UYY{!{MNU{Z<~~e0fy)Ul@_?tR84gbihQ&<}HWX#4`BeaG z$8mWFQV7LKW5C&5UxLUgEAB)u({Z{MAy$Nmj@}&E{h8YTAXYE)+q^gman7p=Xq17j z{KN79kaE1#cIp)hI~W9A-DT#EN|{#hcH6tY|BS*Z<> zH0-x_xow>V^4dQqIJXTe=Q6i0?!1Gswt_Ko7ZHcdlpC9T3-nKQq_wCp@vy7 zUy)YB2&(G z7W$1-C!(VB3rkkLn+&95J#(|W8lV1YDqzIQYNl#s9OM)!w(b_5V-Q_#s!*B5p_4)pQO0@5A9>q?IFaigxg5X>zJ{+P44}9 zq$I0SGu5kF1_$*3DT(RHz1*q*07u0Jz@Xz!FayWyyB*_I7EJj6TsSou+Fp9EU7GvW zRXNkJQ?sHG>*w@$1oZFw3>0mXwXL*jl?>)8G2Z*5m76e*e`9&X# z{a6GlV{;pEVB4ejvF}k{!sFSme-ln$x_A2;vkIa08$NAs8%3o6dHScwE)I>l4*
    PXafvy>x_xNJgdM|*DLRY1Wxh?0^kC%Z=%=24{Fy!ZW&CG1pO2gVM`GA-! zoE7Z{i!EVRp5)5~ybDj1mnW`Of4GkF@W+)$?^}P!vek(JPio_)-I9KYJ0?taJ@yau z=;=%VL9#07C>&&_E07fFlap-gr>Px(fnW~ySLLrYW+(dnxm{#7PQBWr%r0r zIjAdm;w4<^u4>_P)m+-+{!x_71RO!n6Z0rMSD}dm(1Fu!4r`EN_0PpKY(y1Z-ez;RZ)rnGrIlUWyI611=iamQQ+7 zMdJFK@S3dqv0Wj^)nAKtWF&zZb#(Q*9L=)>wr3w}`~J}ad=7qK-ujtDjiW!V|4+_2 zyR1W;7ZCpiq1dO6EGM6_*OyZHT^IN6XPRd>!7LO{U~;l5YhHCuSn`e2PHdD8OIlh! zFDu&~EmGwTzW%XH-E6#)0>JDA!Pf_iK+?~RwTj_Oi?h7uQK2X_gCMg;^Z?%Pc`_RT zY5{5zmA6;Eek(%?G;lV5d>=jVGJy~L_QfM1^Lsbmmgc>97yVdq-X*lEl5$zf-B@#R2!xgoz&CFH|*|-91uo zSeo4s8mn_80|HxZ#ubtqC1z7~#K1Fcz~MVyh*YDlSn6swfTjA2lQTYqivN*UYJ3mh z@2Q^KRC*v+KGnkb0v5V4Eh%CulUkD-pFcWs@itFUM~=mHc*`5HtKgG*c|WKXX1)1{ z$5~;O?Z+~d_WC&H{ZC_iDz4sw0!5L=*T7RCWz#x++sJqflg@kszjtcmtyjAwkr*%* z_%*Y1icA1Xqo?eA{=Z*CpUmG5^o>$hCd~v_{knrTH=h-AAR-A&24c5PmfRcfBa9wQ z+zbF>tYijSpF83Zwz2MZH?OkZ^7l2gW@ez~Go{mv590vBjq^#X>P2#IehKhdTaIQK zn2nLY{_-NeoBmgI5z1olyB}{Eb#27$P7&dofw(U}KdBaJ%RRfVY>=0`#vN43^7>D1 zunYrig36cIpWJ2~IglnYV0dOh zD0*tGd1{sg6KDyyU3vgD0xg;1QRXJe1TKjZQNeGKeYCaGMsef&xgwGVP6{6Udef=*gi?ip(wNaT z%B1}_60PxNt5nsC=LAi*Y=HQ_#Bs>rSnQmW#^KwL>B?qEy!4HEq9_B-wjuYakda>K zx5de)`;E&`w^X}LCte@Fw7aR73Ebb%ujR;|e;X+t2cDVHNY=zZ<3gq9xqT&8cSp@l z*Ms8Xf7lt01kbnmF$eKlUw&>%@2EV}7K*jRc(x0dzoq;}cLv>Q@m^XQ>g@XWaL)|c z_&ZHzL|#`{m(g%R^m_N$!SGb37%p5P)vE@z46_{Tp2yLSqIcEw+*k3VA71Plam+Pu z=|PxWr}IPdA)nDv+Pgd7qGaZ5{WlL-SlU@xkuv!_X8yFkZ&{InVokou1t_+OaMb?;JVTI z_A2WY0gRT0hT2-kbOaS$O$@mz+2@9$j>%*46xU<&dtOIAs6c3o*V5FaGm7;~^-L3D zStWdYmpP@rn#Q8Fp}mwiq^43{kJhU^oS5|XQwwXsi3Us5g3MDkzF)`Ix}QAW$`S}_ zzgAUv4_6K~H}xGQM_kQd86-GqN-9pvb~vpg*t6{6s&Q$8{h{k5)#-BbK(|J{PktuCQ1Zp+ zYE0s%*4JH5n9|t3#V$_qZFEvuaD4SGrCkzg^tipw$t3$nDLE#8{SlnVH;sy%7xqi? z-APs9#nDnR9RocDESHyYH^LOi#jaLTduFHdH#jGos=BF1~6h43$!xWt@yW3O7&6<)q0kvoZOWTkEwYo>mV_v-lW?fB#+&WZZIeyEo?>eJbGb_@}Y) zJW4J)Gj8dBD~znuhrVLH{`rN zEHddvqmK8;o3oSgAdk~|0@dg7znJK}x=7I9lDzb(88HHa--qdaWUJ1<{gb)#;x9D| z`j)mAcJ&v7Zu@*{xAAWnoxOntQMG-_V#UEmc7xHu4@UhkOsi3>?#syUAw_x>akcaB zIAM=Nk$ocgLAA^#IyMBH76{|+g`*;Mu5Su@=Tdo+{OqO{qD>f0(_xI*2F{&7A;B3u zV@OCwh_(U=c8WXUnYaCSBMi>^qT{zbtc_cTCVfn;I*jD^bKRQ*MRt4nElC(`b{r3o zY+E!>8+GMg)RMtOM*Sf3)1QWdE!W%h`i}DP%STM^-d?OQlID!*ZK%QSXd7>Qov$qH zrEi4=^ax9Ql$u?zuo%H298adLUdR+x5sO+iLJW$WTA@7loy2PWHFnJNYEc$V+JXfd zD%bw|!tB()G%Q$aN}O4HYY`-&uXH+xS22^O);?%U8TOYu;|)HUv2!{*Chr3>Z1?dV zY&nqff6jPL9_AiE+Bhw9Q(a(P?>4vS8|Z{?P9Rc#Am>`JtJ%%S-Pa%GiW*8h!4J1& zJ-;oJ(Ps9k$Zt(nF?LpWo9szLxtZVXo_7uu3*md*jOE;0nU36eh@CWvFDjA=2zli8 zVe!v8{m1+*nBK`T+D zd2G6S<+pRs-E6Vw|L%#uZgdddvGR#8swn5uD6<>;#L6*%Iw?7MFC(*1BF2xbcV)dU za&qtTA=U;*v_`Q|^&c}OaGsrmTWzVokn+SIJ0Ow~MgToJIdQPxKGXd1L-hYhVQhD^ zsKEE>=Bb=c&1$O_$T~{S%#4}r>_Biv_LL4!{v7SP?sS8Wgz8FD#g&wg6V;aERhXAR zZOw*{9|6GUk=Sq17~aLP`R3%EO@{`k1-{{02~a7a?;RKw?rPNcX`C>896IJE*LZs$tx);z6N#J z0eJV-fz?A)JRXVvYh~aD#Iwdyd;X>Fj+_@`w@3sHEBOn450FH zm^iIZKGG`-rGk>Y*8~m~jEP!M_QE;N(Q@YI^0&^}K&os}rBfPI{QXtZ2e?K(_*C8nhpG8hBFxO+<~GAx)fNr5f9pNscA}4tP7#+CE>$ne;*$#MtnP`?L3w3m@qU+L z`pBjAR~XqnvRi=?@5HW03hoH-R^4S(a+vrsHTP9Bu~Z#w9*eMuT@68QyRc-8vl|%9 z+tg|+j8f%Hjb}#=y{&#$C2g_bZBW_utff>7E+ZrL-OYP7H0bBz85yiAfp5;HmUw_& ziHcVd&hs7)Jlo)M<0NG8$pi+I-qSEtvEI+qYxaWMhmq~w+G-{j5^Dfzngd;OtLb}m zmwJ3z61SXqBC`_Hl^33^%D@ez_=k=IudNVO{CLG$uyixzlF0VZPoUL&+G*eb*k3$5 zp-tC7E?G{b$ihk(wo*VT%S*O81prZaO_RRZv4?dsmkg5L-f`I+X&Bt-$~ zp`S9gdX2(VbamfaS%tFb{8@nB>=!WNd#tFHzyx!^4KXy9N8*rqA?iLh{u$7&F&mx@ z+@PbcZ_47Rvhpo8YVPXU$wpg*5#TNiTX6m59UBw#hW4buLx8$>4VA<9m$*0wX-9c#EMTFNDj&^Lb3fmBk=fmY{ibKfB zAC!Ka6Tzzn_bp8w1Z=I%^1B+eenp&bY`d*N5CjR8Zj}-#=>`=QP(nmX3F+<_dQhZGx*0)Ax;rEX zB&A`1p}RYW|3>}J`+ny?b2;cWjMqH#+_m@GYppHn>?{<9l1EK+8Q%QIe}4A)(>-&3#BCFzkB=XdaQxkt&h))~s?aNYC*29nz*o*pUhaj{OFc3>T5H<^|_uh49R zKj+Z$SE;cVrm38p{hphvucyK^_0no^obmBjMr!}QvUn1X5a&*4@9@J#itbc0Z6vx=MGfRw(fGgJ&)-dObpCqTsxuK}$Di zZD|>mk$;&|n1$v!R$^q`>>yyLvy>0o)M7`BJ@Y+;M?};_)YeY8TUyPv;hQ6q<+emF z8{2=;-@wdXLPJ@QFM|=jjqAPQK2j$O<1#2%!@q{7nkCnJHavWY6Z{$+jy8|TgI;Q`i!T*O-16&3-543?{AEhi!1(^# z7)LXfaZDrF3>zNaN_R8~I5pBcVqup|nTAzs`aHe*g@o`%e|?T7*0m=XrJb(_&(^aJ zWB4u{1p{EglMV0-)WcLRLW@V_7mg-Vd5WL$`Z!tz`StUR#`o!*zi#QBSxFi4^okyc zf}Jh>(vx{*Dv$XBMj^fBYWYk0?w@63J19Jlxm`FTNx|>7tiyUK1yCT0)2Um@z#sj( z_jO_tl0xI1ruIGBP~RQx;JKs<@gKTnJH5=b`fJ18q9W5QF31zYQ`Fq;rzv;Wh70EE zj*z({hgxnYLYo_Q67=Ha!5#XuODA^pI3GMs2B5RU?>FaLW0Da|)jrT;TEeMIL71Ch zG_JyRx=?BrGqJk-S|5YX=)sq0+JhUqZSLYOp$_Sp0*fE(L&36rX`;^3fU5oyDn&5D z{O0%eci)_!>YF&~Q)aS9kX%B}R<#1S=gm4Wo25ws&>o~1ZdRXg%=b7+sq=iwh9-oW zojsUKPwI{G!W_b%sB-sbj?mJ&{8{Z|l|P$lOFN}G!H2OLunTCoHj{)k7w=Nw;>FM&v-PM z$Og0J1L;Q{P}LhwlRNN2|j)Eo6Evr+zc z?~2%WN9Vc)%oDY=ck+AR7B&^yV0kHr!IhtcrkrK0-Zo;xl*HPS^s1?EunXWe4c5?S zsQ-~)Utef0rV`p23ro+|RN^FwvPgUQIt7FFHipSEN{~Fi{sTWX!LaLO(bReKoz8m9 zPn2;hA82-Ieuz~crJX$&J5pz2WsT6G_o033EL01^x)^OoMxutx7g4~t8a#Y>9L7V> z*E&!^)MB5&Hc*i-c5K^JiKSlAJo7_I{S4bXAOJTpr0(+3nj@UH{Aks-+D2H@*MSP3 zS$45!3y&(P+p^8jP7fleR`#pXMt5u|gMWeQEZ?p|gw%JSadE=isV>SVgy?R8IgYcz zD{%7w{;544ng=PhJ}Z}q3EL}M$lp4mZ^F}zQ#Z`bxX z(PKPLbgy7Zz1aRXkCB{`_n#PZCRek9vS36C97!34Sznms5{f>|)MAM^LxjwjT_-IC zKn>Ud_V_(pTBd-w`4*ozM{;GQlO*#<2$M=+)cpK>b9FDu9C)N;Bb;=M;Q_$cLc|6Gb7mHyMY}k!P}~cXm4o<H~e7WvY6~;;4Cf`=lU< z*>q5!svM#TF}++9;rwZ7X$(N6IaX=oHFM;`VbM+YT8CiBq`%DT4)pf z)|~#my9Uj808OmzTjeaxJ zO+QU|E30@Whu6?u<+>l-xZZ`lMt|RNlPl_p{4?P3C&r#x$_4CT4P5t`9nTq z=1rJtY%0Lo;s)k5zCDVjf1uX{;0W;g_3IzOBgmgibMU@AGs0QWlB;stNcq%bRBu-3 z+XanJu+?F}2+n8GSXoj@c$M|Wjet&mbnBN1RDMNO0D|4k-C1YLMd4>!L?pLV1vyJi z{|A(X^H=%(3zXeuM>E;U{4zC53?Nh0+GxhOZ|Kqro6CKCmKq*El+>%2#CqS+h&ib!)|`b?0ywMRSfBt|+Itj*Xe`$zSUg-A>C zrq(P=9X;gOmS~HP#GpfGk**av=+vKAU%hJ17{B}*XXAn3IJ66Y4aD*Kx$la4i#Z9; z1+LIo_V?wBJ5sRO98QmE;1A^|`WJr1zDtg`+y51n=(4&JbYXuDS~qKoRX+j4Rq*~# z@E-C`Mfu~O6-}974R#g!>;E?TwL)&_i zMBe_{aqr(hQxwCr$6)Y0o0vQ*{O9ce8g9u_ka+X_Z)Q8F)P1WD+!BJv!);wIkz>F% z=igum=-c6u3$`(xW?yde18f8l}v++AHE`1@`D zrhQ=&e({)vGJOBJ-rrlsecMQ$9Nd3@z_Vv4a$d~RzgPR`uXei6BzJQ$3;+8?MFqQm zH_~ZW%u)Q-cE(P0Y<}_cFG&xGFgo4GVU55Z$tIkPMDlsTWj)10Ez0xLH?CfH%H< z>xa~SZfZ*3wdmf-rL)17w~>2?h=TrH^_!NpJBJtft}{(lvI+cg z9GWiHv;LXu73aQm1dIZZCB>S{=LF2%=>LWD+~37kbccF?`8)5o`m2t)SL`hOKc%F< zBT3-Di6;JYzxJ5BN9@QgneI4fM%V0DDhZ*)u|kN&K(qy-VrSD;QmxL#Ajtqx2CPI1 z2=mI!NBvUHw-PwZB7VKB%j<| z?1%n{u-_VvK=tCK9f?&;?>Mcx$${m2bU4Yk34KKV_Ibe7IJJu()USyH!>X#B-fmv2 zsQ~oe`j+UtgpPK#da1Dm*68huHICqO_of}@oqZ(?wPJn6JlmtgIEs6Z?{;;~Sxbf2(e= zC^F?@r}p&ct+wD3I+z=8NGC_#orLbTQH6#KhaCPeTTst)J7^_jU|?9kz4uQ*J-g#{ z>^O@Fm}P<>o_2MIfn*3A;O!EOpi>ffpxm(bp>Yjb9u9d1TeW;acD-WI&|(mY0LX@G zu|(kk3(GxvJ)x4NuCJ~YfomF&2gKP9JvKT^mOy7k1nYgDqj7gSG!i6i<1Bxd{@apk zqGb_ zLUxn{nGtQl3DZYsaeS`Anl<)Ib7AUe_A3)f>7AsCAk&cQVt-f4B;v}dtX$t-oSTbZ zYCh&tu*LI#B5K_p{0Uuxnmx;I+j%t{Xx5b)id6?lQoTUYa)e%?T9=fl&m`w(YhevV z1YONxAfqW4=$)8V@hY&jI7d@h^O>HyNY3 znhlPB{%(jJ+ofn8Q*#va%QU83bEAK=5cr#=z9TG;B~P4@u&Uur$VQ|O83yXsOblTq za#Wy?r#GW*^!xBodtc~qugs^T%O)Mt*X7qc9+OLg!aWV(x@L&Ppa0$|ZxXOA6RFf7!o=LI4`nb5H z;!9#;6VT8}frF=@4)rE!q1%v0J~QXZ>d1oqoV_rheSE7gn42X^rS-bKdT*RdkVZb9 z-z;R3VgbV%!sai-VjA}3+flJm#?h9xv%g4aI&poJ9BZY*-fc3vEbJuS)T9xE41u+-!oahf5x8PNBa^@^<8`X`->XR8?>jQ&L7GljH-un}Z>byvOGc45V* zVSfCi6&LrR3c0X6$geVOSrN7|l=b3uA2-aow_CZun>k``MwBJyT14RgcScA0?~Jak zmM$3aG2ZO-wQoe;*xvTpg^$&@s`S8i_UF$~Kj2f2nIs}b*rl6;FQw#8hNSXumVN3} zNgA-Jn)uxOE2$X1c5p}Z<%{4q*G4@#Ru9QgF3&l`??l}2ZeqwxTi^5D0x)c;*+_C= z%~m|(Avs<|WGt}^T1@ojXt#(}LF`9c}Gvhr;&U<_!+<96% zv^BY6|AJ!>P*D#kWwNA>l4TQ6O8Sv$$gWr+#@%BHNh}`2eN}s7C^f4J2VxI%TvsR; ziM?a9)6&|yMxK`9v0bPpBR>prYCeRpyj5}8IdKCFAt5rkR%La6;H+lA>-qWk8Q4!e zwoL9i)+7O?*g*>Zm@gCazyo&80Xb^V@Fr^IPty4G`*2HG15Fe34}T3ocAulN z+i7}yAM)|x#|(6X!#}mUpn2xj)X(K}Co&=;A`1Rr?^pcWZuF_;k(zT2H7A_7X+3SE zN7^KG8?F;zfAoZXI(OUM+e3$ObzsOXE%1H~hwvMxV&L)$mYAQn$9j;wm@M43``p}Jm ze#&1vEfLOZNzkn$axr4O7a8!;xmreY^TNIGY6eEeu-MqHwz?pcJ_ElL9=n3raN&-H zR=7Wl{{1$~%=Q&t^Tc$jk{d4BzGbhBp`wOfT9NOP>qww7Tm&^qNd~uCKt|8n(Wt5- z#+xEi#K;xmq}Qgd=$UDbjSlsE{o`Y=S=WV@va-C)bAB`qSMH*o7d}A)bo+{L%frC> zn4Rr^z#7V>pP2IcOKMXYAN-B{ow@f^eZk6`oV7-gNQO;6nvE6>_UiAoUL(X^rqim^*gn88r97-jL70jWG&3Z~y){0@LRW8*QE zOl%^@4RS@ojafNao9Vn;jPe{;cBa1z?r!w!k4K?}7}k`gt6pw{O3JrUjtJ{&k2TJl z{%xF^FN|ZrlpP)LFpG)$Wfp_aE7t*=T;5v#5rEgE+G?rNic=JH1htkQ|*ks$XY(4{x$QQfDh|Sm0=Gt6HtAc#tD=Tv4^N zHHs&Ee&o~pLa}gvWL2*;0u?}C6?boLqH3RNm)qmoR~ZB?1K*}`yP%ywx8B~*9W(~G zLz-fs65z0T?7WN4r&7MCk?+T`H=eqRmXtT=mx3VZ)`AIrsR!C%F5Z3J(QAkG_9&%? zS6+Ck(tSDiWQg7-HqUW9(Y3_ptdGSDCt4X=0TEnqZ0=+SGw2E^`Awt76)38ch>(Obb`s~iObUdmf(NV0&Vqy0g;06B!lrYia7eda2VrI`>_4LWz9^%5=WAIToXn z0E2^ zFh>?8edIjKOkI4r7&%Xzzv(~&stW|4uecOo(J=DyMfDfI0VQ#rZoA*f#I6yq#i_!C zOJ`E;alhReo4|SVk<&H`elp)&^w#O&vD5q`Zf?im{yspfQ!6)@$-~@!KFMJ?vGnao zEoO4_>yhTsuTmMFbJW9OP?Gjsx+I~{diN)Fn>PmQ8Fb7Rj$=`R$Q6ste!Jub&lf0^ zenm?ba%-t-2{x_=?vsMi@cN2F{^hVpK7f3-pvP);h9nosZq@}mKq6V}xUnj>`17<5 zIMVJpHDIs0S$2;PY1k+qwIlzQ5=@tV_XU=ouhsh>b|0wPG#21b{Gs&Gc5a@)EgYgX zD?IQC#V4eqIR9i)&5&Do>5+VcW648NyoK1 z5c>8H+-p0^rnSpDq5-Y?PxhboaryrPeQx~CAOGCUt3>i{Kde;PLbd0&W%7uG=mWGpDiKwqCN2Xa!RCn02B#HJw_<9%~!xaJDN$b*bc zOeA3wcY>bmar_pxb)5|thdBovH6{Z-7y%0itA=^-Ja68DhBdDgGI>(*z07>R7Y*wD zxHXfv)$1MB47mkz=>-I&T>t&i03-WI9gS%nNSe27uVn;yzsgH{?C(ow&*jP9ym{9( zQB~5eR`%5OFTM8f_nN=OcTD|*%%LWhs^HR<;Upm;ra86sCVg)~ms;EKdN&$F3$yK5 zx@!Mgwwc-Fz5j+>?qlaa*+ScN7%D3@L!f)+jU}Au^jAbiMxt4_tr%f|3LoeE;^LPt zUmgQm!@y39lJQ76295v&SZu~_;suAl!HnN?;y0@C#JKSH0tjJ%LSy&YS$U#ITu^J5 z!OY^~_qeyLWMpsv&=S(sb)ezWLBjj;RVJXKaBuGY8vosV_Rpld`iY(L#K+U}SE}+n z0kS&00d0+EWSA1y*^OoVfgt((UbK#_sI7_f>-4})N zue1Mi8TX}V>~`Zcc4C~O-MNCh8-3mvFy`}RYV}1}PFByvI!bWLJ-n;2m&k%5&Uc^Y z$`{VT$4R-ye=ZuIvgO`U9bip^o?)|?r_jID&ttrre;?XS{G#E4RH@>a;C)hT;zs3h zzHcR}DMA{etXOEtq^05@A{;Vtc+Cas>C-6R!cO_Nph;NBT%tIq4gCB2lihA;@x*W(;#B-6 zPcqvno~EpFo#tyN9CNeolk#pNag?sW$w25AUw0I}ww*GZODeo4WVfV|$nVSJ| z=QAZ;td^~e?wfNcqlcptMmJb58d$C58QEK97m5{}j?QGSp<%LUD1DzVkQSra{c*j4 z5o)(*3-F<_IcI<=q{nD9N)n3poFs7sF+fvXj+cTjry+FSOpTw{2PzRgW6 z0f!!$x#itmh^{8m@7kh|at_JMNDJnnjcbP=CBJd^O-#RSGX8xG_07tHhxu#V1{bX_djotQpolSZ4U3&$bS0@>g$+?-q= zl0Zk*nt^0C$z%StlE{E-gr_$cbp8BvpeE?On#`(@rt#$&0!_{)(FGMJ$Pew7f+-m7 zG!>BV9NaM;iYrM_C&=nqqiI-BlQ&+VUps?1ZPkpO3or4s$&Nh}Tp)i}E$2Wzni&?2 zTScK1%VmaJVu)(#iW^W1n^Tn}`@VIulafDOp>JWhSS<`|$a+sGoa}^)25OCE;q`kS z$(lLq!+Gz~j0dxd4uk$2BCy16L%sD(?SX3Lg!W=Ua;ra`#Rle8hYz3{zH>6KUg>p> zhB=AS)+Y&m+zsIF5vmywuK+0g_w4M{?CgNYkG=+66)%Kv)hWxGPWR9@t2W=zqV%zi zB+hwb_@}gSTb>4LVXQzl3B+CI-{jYyc+_WC2V`zF_#IE~g@-uqyDXA$^gsibe0owI zB>oU`*Pc5prKq(ZX`R?_A-@(wFh3<{}s44m(x?_|xq(Agd1P;TTYDQLR%xX#g)3#9B$p*;E?LW!GWZT#nmt`htkvS~q^kCAKh08DYE|s> z)r~K{o{oalX!gPdJV`xkT_cxNbTYe43=Gf08^CaMPdJxU0*?XD)ss)q8a=^#_dX&l z>!^I|Tr`9G`^${}oGqyNa}Nm+fMlA*Y`TeZ-}Jnq(bXoZ@AUV#&wYB{ z@WEyjpCYs3uE1`kE*w|XI{}2G20D)OHZoI`*OUzaq*wdMOx4|`qJ+IS7Qz$dYU}7) z8?j!i9Xuv}Q6nye4rLL6umK`N<8=N75N7urQqLWxq`bcMhzR$j1G!(KGl!?*d_UB# zf*md5Nkc?!O^AAcBS)*Q9c2k&Uc1n0sKT}PnY-PI-I2au-A<+4pbPLshzmCoRT)Ya z_ukqgraU)p8?#|v<+dTwfVkUr#L5$0*|*GG-EzyQW3&Aayb71DcSJBg1W3SM^&D;M zHE^n_Qhx52aTA(T(Gomj$hBL#b2@3*WUv;iJ#x<a9 zhSZh<EheW8c}xpl+HwNZRY|g+RX+P6&e8YVK7e{F4yp? z@MM)ivCbUt#jvk%qIq|6AaY9rcRY(L^7tVEsx{>rZoplRdmhLUtXLvxzoR#5%l-@` zlFkj=l5!En7{e+tJ0tdVjlo>!D(#W6FJ8);^-yzrnL!YUl;{3}veQDIUfFylMq!ZF zg+y0*=9dbAnNE#wRgMX^FR(B^qYYSgOcjpdrQB-lqxHtYf|T{HVl#DN;$WI(sx*_> zIc@zCp)kN|dh*f{lYq+u+rkBZ*84qW$$lO-MNwLAkG^H4+jV4ZiSEd!GE(`BMq94oEq!lFGlt^3QtwdyGR?G9_zgkR-!V?qS)Q-kra<*@90Rhxa9QN=VI$(0l zX6LpTmVD=?1Z2?rfUgLByeG+zSu-hb3tAKQ`K(JuM3_WI=dUmkJeHZcH;>cBlEx&d zZo)EC=w;m=)cQv1q2OCt8~-FGe-7EmTUfH2(bOZj<@r;e=x&h6&Xz$vnr(XPUNpRq z***5{+=(VtTGej#J(}!P6*-cXrfHY0MwJc3#z5F(Ci=;&bZ_r1vCTbhFli<^A$}Wg z95sI9W&+M_$(JuhXHqqAk1A=Z{LVk!&1egj;gXc;d*){PkSoOv7Z(~6f*(~VR+9E{ zHZ)~ufs?!Nh)cir>}~-nvTo#Aew`#I=oxOfbKI-;mi(I1K|NrNL&tl)ZlCH^^lB5m zn!P=l52%$~MW>bG|3NG}Ljw5i+Hs}<`K2$_nH3zJW{gmgWGC*!_!<{b{VdEG;9xhL zsvUE6Tx4V%-8hVgw?Zok7yqgT`YOu*9K`5XAe|;~;LNJ@tFkkCqpVyOvo|2eYc@=s z&`nxZli{$&-=+PiLJI+=&gV~0x)!UM$!=nL=^GdTj#XS_i(EBXB_=V@15HKphAGcuz@NDy#R8k5Lt1f_JQHr>%v zOf^A~h=AQy-hp)0#GnrSF0QZ8IM{P7Eegw+Kz{gW5hD1c4pit!`7JuIr_cAs!{YB; z)O;Mt6EEByCKan({lpSkV01Nt+XJqp=*eD&r-6&c&hX9d##tf1G8M+ z7*EH#5~5o9>MZ9<(=QEV)E_3f5YD1LD=eSs%3Z6prvgQY;_V^D#O>9ReRU53E6!y2 ze)^HE8B8nJZI5bLmC1IN1iCbs2ismLc>X!3_PARnjS*2ae{!P=VKa% z5TI@cu^!^u(Cg zcV4R&ircY+phY7BQ{Jzud9Y}%qfDWtM4-fW!B)j^-CZpKn189l)b#Vu1~$KB$i?pr ze>uXBV{@$15bZm%0mNX9pPilLXD6BGk5&-Eo%|*fkqE1`Zhz*)Pm`8L8`VR2bX<|$ z@A;J>Di$K!ZEKQdb%uXA0terJ%L&5XWpu|jiC7VQ15D`=q?19x0&wM!eDVZcKnuzW z3#F3;)I-0}K%(4?pc8$aN zGN0hR0gvYPd>J(4&Y*G&V>Mt*VPmu@6@EL_y-0>=Jli{Llg0EnYE(E^i$L3S9ym#K zoU!o(Z!*fElT3C)%y= z1Xpf#a9l-HT~sn?0#;F7175)h0ZqO&lBRps@Np-X?IEAwlHQ>$MMW(!sgnlZ3G-3R zZ81p5UCx42%c7;^mFIhl9X>&O9Y$?HJ`yTsk7ERLFb7iB-A*Cwx8jA)BnwM~`lEC3 z=?5LtnfExiFM4jh{t*;<*{Ob!Jlnj(6=@#Z5x&95CrOAscIp7t3SEQOZOJoDs`NyA z>pAaq)3Fe!5~5#FUA8GWTePs?InU!ni|w*0UG6ayT{^{Ho-N;-m1Hzm=S|ib2*AOZ zSzkFXJbt-JEBb&n~%LNbZ9r7nH=kid-Eqa+e#?4iy=>royz z;yrRziGAU^rt9XhXIKS68a{M>CP2k1tm)^vh%MWB_(j6tkypH^3{0z+t55SeD*lpd zK99>>I4gm5?RG;IP7=qOZ2S-|nM5lq*_O*jwvIsmR%UeK_;*$dQSH5zbB$~7;k&zm zmZwt=7#CW8%xZRiM1=2)O-TYVvt1v5T_s1NWp}zdDlC_$7`?IBY0sB$e24P zwYir>%$~uy0BZyVl6FmZ%M9l|6+EXy#&YvX*%vf27isgv!$$c=`>_gH`%aZ7iwNh# zII^mYwV{|Exz#QE@%h_h4{uJ@Zz-Gm=vtu-hDPEddp$-|A~gv|9^y({ zQ+Ah?ot!i+t3x@X_20+?TYvT;Hd8GU9FH&X=NB#XkNw~_HIu9MASn!!^fNl7*WjwL zIjJ)&cioei8PkESFGW@GOZ8jhh5?Y-xoXDP-?oGR! zN>Un)gY5xJl{2RoaJpwUgJ#LBjd|$zb z;B-k#gk=tH15yYZ_JPX=N~kdvOx&6uwh19bIIs8!xkBz5bRrE}b|Fy{_Zt)gyVy&C z*Aj=|4cQZ~yp=fsyt(EIe41yrJu)r-s1(P5Ae$wD-{N@a%mXx~jPj+ehcqAx zz`@#XYdQ5|v*wb(A^$2edFIRVksxz_CNU>J<<-j3xoe)nPM~z4aoDx(>O?+j_x&Pj zJ9~j1iA&vDZD*E;e;^&C*m z{37FqZ`;}XU}vLi^;mSQsvJiB^FUQ4)x)_lH`}FO?%z|Y&NPth8gHO4#p)wAuq&vf zbnjsVWZnNB4{O3`sdr-sG&%g8V=Fuf4s z+)&FMUS3RBccyUOl6@6;dce&kCJgI{fz`2f=@*8|Rl5A8pHW~dfSO_N8X6UA5K;<5ITm(g|(KAO#Db~-Af2s ztOFItBDLIFk;P!?+lBD&Vk=-~s|1q3dOKsZ-CagQQP zHpD#Ts;JZ|vuyJHLATHn4kLUq`HhY)^#Sx)9*By*{zfQkw9Q_yB>M8i$pZlD_R@z= zh9Z$p2N%vQo8jq!l}q{90In7TebojUkdH~(5?|dd+)_SWf)=}ox>~+|t*sr}ZFo7< zAi%>DDB?TWS@R9)cmA#+H8qh;%O!et)*>w$ArvM%1W=p<~vJbM3P zOIE(owCs*i$NwDulTFJMl>$f%rj53SS!ksLcJ2+UrECH=Ts`!nQ3OxH|@D6V-< z8ZwKCMSOUW)^iI`Mc?=b@(|G>PbKDopW?+3VvX9;vKFPnQDc7#CVS!sns_~&x!R8B zw{Lgp_17&uk^yu7rdbhgZ10l6E}W=xb~k6aub#;0U{x}dR;;5J*#df66sb|(^kb%m z-XRT)(gOm|c?M>nk79H?8OoG<;Y-^SvNoKw<~#B7%#jAoTJX*TPUNLrE)O@iP(EVk z{H(hu%t0l`8=uPVeyEx0;eJv&2CKrJP0sNzG)-T-S&5U_g4mB zWLng}&QA{bNf#~n-_Luf5%|ujO08$4XYiQLcLJG|(3>AvS-IICR*GBE%#n&+304h1 zUvbvANoc!=J$_WUbte7ju}nuk>B^<2glD&*Sv7HfbT%D~L1sG^d3 zc8Sv`ILjj@Cbva(CkzxM%*}IcpWc7kc+0Tl#iySD^qPmh}Y>9%KD2qTUEg80~3 z&wU@GG%&??K1T>VL0m=MnPp{#qNO!1fo4p^wZ|T{JFSW1^jE4NVl77MWeO@edknI6#c~A~G+dmM&Ea95 z-@;M~!ufP&N$CA_aAc%R`VwIt+`1Dzj zZ}+%6;(~@q7i#SVrDb0>&F#GYQh`$drz)$(%*y%Rs0rQ9OyQ5a+kC@0E{xXsGMkMx z^qrRQ9ZEahS5G|N?)Gn5fB6!L`B?e65DE+W7W6&Lz)}D&6UX@=)@+oNHUi#h0htE~ zyhjsZWa+i`x|0{q8TKPRSzYz!LA60hu?yb8Y7M8$YwWV4IppO_ z2$)T$fpA1^l_!lz0ENq1QV?vL_T%F@F;8eRmJ1j@U|Hic#UCVOXS4~C09AkIJtMox zX)33k1;tXUk>nEB1A}=xXkcm8Nd_WLXio%KO)&`x#Vtu)B!M2@+Xoe}+jMkpP?1Ooy*TI5DZ;DKcwu3xAo#&Ty2;kwoq%`p&$MWDLY8asyd~v! z>BpTZLi!K-^_y;ubk+$skvmi1@w@+`ed1ho#3%AjNQe6mJ6?*72nVj&(n^DSV!G%e z+Hfl2MQ%REJdksFk~ombZl>kr8CtQ)NBL8q@uIS1dE`>`lvcH+26{qfhOugJ`^4hi z22&Q8*A7BvY85qo4}zO!9$D30JPZ{-Cnkv^0n+zE-G$8=7rPaL=n8P9Ro zR-Dk<2<{I<59pG9ausjHS7y)dH!an5(ASi%)FT&r_?GLYO#>gARnFq7RiB}lZB{=G ztG+1?%)!#8b{2)8*cN+WWyr%h3F_u!!UR&uE6v}m6(Ju2mEItbMCo!R#CPsx4}+7C z7>HgrZC}uWh9Hq3U@FSdhjXH6EGrVow5Kk42URs3p6EL-=B@w*s(XV)W^e4zeF~*0 z>VN8zyAlDJn#h{UrM8>P$tD@~*0gjD2e`<^VIroGpaH9y{P&zok&ceFBGG#=E>0ek z=R)_o_T84Le5z;-M`Oz{(GWtMTq^<8n+;HEg9|O)ei9P5M1=RvQ_Q-Zzx13hsQ_)d6 zKfc7e?%YzZHF9-Ge+osSa3OE>_8CfmYxNU-LaXjV>P^lC5fZ(>gn_HG?Uuf{M0{wYW<`FSxvP`ufZiB(12h!RPnI>BgRJGc*nX8ufM(s3y-8=L=q!~$P zrV*%batHe@s$(e8`7Aim{IGaAiuGjgnA%Oq%BGjSIZl=A!`eD@rw#rpYkY}-?@2j3 ztnFtWAFoJ`oiTIHB1h`ipHga>O3&VT;{K2-RDwb&BgD^xK>Mi0Zde|{zo&>SHT;mq z&u$D;TvOFwV*38k8gzHuXw4d)AY?ers9)d)BP1oA!EqTY+LmJ~*l4P{2$_BS^2bMu zfn$3WT0WFZ`NcNI^I^8)JF?xScynXrpS{;QZ>*;INe>nT*|xl^T9v}mv#D7i=y>i7ozr3=@F9Qv-|g>UhI z<%|z4MXcP8e)0};{>h|@VK4h#X}^A&8fo$G0&o#_wE=~haP$U|AK#Uld3lc?F&c%N z`qQ^z4j&rz<+xVka^An=P)+bJ@0<2}oeeu;FY1)qSqPy`k-ZK2^K@phptX>S)7{sP zLX0kpf(W3La5zk1fleU3WI3Jr#N6!GjW_!+=#Gi9MDmF>6Or}_(OACjwHXW{Q}U5r zE45o9eyVK}a8G?6sf(Q_D7Q&EX=NZ#N|inOH)$kHQVrv|oz_f|PyufV`{R!A3TKxN zM7KDPMoGB80)g(V)|%F(v(8m!>DqhxdMs1awl&jLEV+Xf8ZR@SwWZxz)MAUoPU)SMSd3BXK~D2q+JaC^3YUvwSz1Us&SO z=@%5W6p-|p=u%P$nLX4I;589g-+cdol@~BoMfJ=Atf^AqtiO=<$OWa>Ce;6Lsd0e~`KXc=5ZAD~du^IKJ z+2|Wp07^N(3E7Qf6Jz7ADJdU8aW~@c{qp4v{yRirSwLWc1th_INZE^Pp5LnN8QJ-g z=%5pzInF+ew7ITbR z@AE$)xmS=-Y+wg``udg_u>jSN?b0?(XD*kuz@M%6Y*&iy&`Gkb%FfJL-}zvIdUu2v zNIdhXDz^OXe!ch!QKsPTIUhpY@1%l`hrs9f^%D`eKks)Y3~zkcJ*ETTwX=zZzO$+g zK1@MFw_UIcQHMW)!oKxGGR?{TqX~!zK7PI7(VR6y={xOF3j%X~*Bsh! zj6-b6mZPJ&TufaA*rX-i=|qcE)pSsCCn4dCN zI%aOJQ5M)*VhdiS8jdQ`(ToGHA~qto_8)BqKlj<{9bZM8!>2Z?9Rq$GUau2V<|b+s zkWLQyI|V5!R9nFp=nY7#W7PS4NtB;o(fQd>G)k5#wE5TZTysoYuHaXr{an*?@$y`! z&Ly(q1+^RG`FWde<4z{s^LOQdp1L{l z<^^i}{xuXD1r5>i=&#rd^`ab~vQEkZsWHvs<-WSQdvo?9l^yDpA4^N|#vOW6H*jQp zf_SEH7V5WAqJ_U%-(Q~WE*!}?xm1^|6-st`#U=%dG#(Ws>mGmXZZQ|ye5$qUD%+Nb z9zP^O{90MX3rM|Fyu8%{n`1{#9p1CBnkMdE#wJfnO9!KZ7#5%Mc)&ac@9CDucC1bn zZ&^w7Xapjc;KXFFGD|$5k9%=c1xUpuX$YoVn?s_gZVb9gTQd|o+o7ktevM<}`OiuO z*3)7suVYgx@b#{E*cN0ruwn(E{3H1FAIY?7pPxBy2-T=+zYssFkWNUjs!odIY0v{J zHasE%B#TJ(T77gd927e{!Agn13<8AQNy3gyjEqsAny#@$JLS{b)8M19c6S9<(#``S z_c2kzv)s*X6{ahO4_{8Uw6s|dHLC(*dIcRh5epg&dek}^5v)#peD1A~HLi~HgHZq` zmucBXefzlMJrDi$X>`@3j_>XvJoTMPb5jHLKuczyG>)kjI^M0sJxiij(9I@`i+Bi%} z5c8}bw+$dxD8*p>A|e)dQCvvJ>scCkM&Q7okyQcA^VpXtzTEw%Q>Y*uW=rw$;{lM! z`%F5MTcL^@FgCY^Y#173#p$B|@Oi(d4-X!LQ;u^=hD5 zi+JXIWF!VUBY!R_#MIhf7iFV zap19%@V4;To;B$obp6kTDYCPdzBz|x(A??2`mdrll({h2y5(!IK5^}lpFY1U2u10H4OzPDdh+?NYv!{Ul*EV4AOIc2(^z?zOeIFA$T*H!|UGPUfBl z_O0C#hwWHEj!sss#ecV7mu{xKe6eCaI}}Jn?faHm940>b@8CxKc!4tl>87=%14vfG zCurhJ=s!zW>y6j*nZWe9>&f!vKXptf+0A55;o>-O6rTF%Mg961KNZKl{zJwaDaK zPf+C|&93*SE6*>nBsN)>385rut`>lJxs#LN#X+}!tJ*jot3IIqNhob?SSzv!K&V>4 zai%z))CQItG0tma4%>=l#(&^=a|?^|Q!W?Wp8$ExtymUSQSEQ-LN!v`9KR0>(}j2n zJ73lJI;O04k5_#3=q|P3+y;*O-?&=2Q`QBqPW#E2*OT%rf1(K~sr>UVtZyGCcL%~9Z2 zwOgQeqz0-?(V74FHh2aXSXn2V_33J2V`8YlNst7cJb&2)&j$dKQwqTdpb2j3;aI%H zLu`Nfd|FZf#g0&^^+Y}JoIKM7H%Svu$qUn5njSGmY_=xvUD%ktme-5TuIY@I@;`f6 z62U`VTB_=*+=>duEYGSupb&%C(H!p3){blj;!W37<8SmY-@=l@alj3yU}tv&)EKRm z_4T--&a2rZmGnTBA$;@ZcYDhsw;uVagpkXK?YVEw*XikBm5yZt-N$=6X-i8>lhGNu z+Lg`6d+#iR{I!%Cn;T^$xByu0)86r+@gnGJl4Byt{P9TA-otqnJQh<$%tdaHkQnDz zDR{q6jA+!}olEVqW`O01OT2Nb7`C`GX((0j#fl1u`is24d^;knz?($#EGqGjhXt~dn3L5z#Wv{RG;*q=YDWvcHSdjXU! z4WZX}9O9#*Xm%(9eiHWv)tQ!r#!r+!-Q+SS;J&BlHmceA29+yeU55Px_o8Cv3jZDn zG%y7~bXkZh?iWDab6bL_d>_Rpit8pVM6t4vHYD^!oIK1Em+RMIJFsiD8Go$hzHw$4Qv2fd#5zIJ!zT^;xR?k{pU0JRO#7--%ztRk$?KMVR#y7Er0NRpipI3f|~nR z69Y)ux+pv>Oh#QjM^qXjr}&hKvs;0i)N;poaBxsXD--vx2gK^s!z|~xe)w&m+5xCJ z>CPe7*$km!z1&xp9?or#nc%@FOfMb?OLr;wX{qbA2SlG5F2ie1@`5$MPZ>xlFC+@u z6Rm`-SyWNeZ31zwAZ_MNsqTwjG7yUpJZjmDv0=}4UiTyggEsR}*d~_xE<@a@27r@6 zQC)h=%5S*>xJbH=@4OHxZ8BoA-uu$AUMX7tBC3DDGALMqerc!t%BA=C5}FOlW-Gj!ty{#(%EMowS2=;7T3f6PlB11E?sLJr zJ5o|imRD9Z%dDLoxBAQj6_oY#HK=#0ewR;t2>MyYZ&;rY@<7*=45Xuyl9Ml83p`(Q zA5T1FgX{^Hus{Gl_x;D4EO(d`*mb?cNLn#}9N}arS78zAaVbJ)(8R>#1~|jEdtbT% zq8s6XfrX5ZE6$P68I_G59AlJ?XTG~3-K+4fE%45b#V$=@bjaAFs4MVo!4wZtwAeit zI4pzSbakCiJT03#wrrn}d-4QtSkHkWQ6c*ADQq$9OH^awYbi)z@)xe^{b!OCdz?wn zFMIFr@3(sCcWs)^qS1TW29h)%A3ZeJ6#!?TjgsVk@!(MTce^aLRU+9r`j&NfAwtQi zhrx9;HqE7opX#dU&wU4%Wv@~JSbV_#?xfMEE+j}#uQ`Q+jZ1fs)AI!d4mfYYq~6tf z#Osy!NF^0Lu3Ew1(gXJOMg@Xo-tT+6ISE#rBQ*f#Yr0(aHxQ_#mo-8M*6AQ70@z|7CJeYU^5e8 za{!?;p54DcFf?f2l1~lXTq)@TJR-5DM;f350k)2sMb0}ox4hZ1SScj8c&rgSoa;yZ ziJOPw3Ma0QQXo3Oe-ieP&I;8=(BA!*3sB4eJsvw=X8}sA@oUNsr(VPk^S3mLj2DR# zyiUxPU4loR$t$8icjTF_?(;kV){erGr;b{42bkc5i?v@h^lcyCdi!^s{Rh+1QpqCo z?pYr6)r0?AM3OrBl?GJagNnBPHVlsxB(_owzF4QF68g~;<0OqBinY3 zgVvkz7WEat9AxpgRHWIIR~Db)gEJCRq^{^EuL&&-eqV z^!;lgkx0K$hZ=k;SzHztmK6nxpS~ng)0uVT+imG2gOvYP6!?0RaY0;u{k+ecv)%J# zaseDyl4zpCU%3;G%zySZu(#fLyB<-O>qU6`~L{Bor zM9tJ9z#ldbk$Q&MT_dM&e-;uNx~X7oix$bmJq^^rzyKs9Iy#l_{JSdx%`>y|QCN3; z(C2Ii(Rk03rD9XKoTpCNI#^V=%|PO-wa{5{RC%b?{cF?$*|P7v9C=o_0j!DW+y$xO zQC#J@A}gTv|E^U&LVkmJbfS7*El7OruaWlc1`8a>H}}tGV`u+n+|s?=Nv6KCwbko! z4LDcmDx!n?8j`pjBx7tqtWc`QmTMcizhBua01F*NNEB6Z99nZ^MJ|92s;cBk=sf-6 zT)u+^kae=_l(BRNvX#Bi)wO69cCno7hob~Z`JiDu`tl{WJPz|LJ#L*I*uK4kw%j;i z==ho{;3?2uKKvXmeceHaT~awjc~Zz4h`|qAw=~dX*FlZbD%|lo>uyI!jjmGFS(i)e zCdSZ_1(&DOwR=<6ofgNrRcjuTiR!ngcAZ`pRNpPED#{$!Ue(R{cFo3j*x0DOt6otP zdesya7J?v**AwsVdJ^gZqWJUj%n$WyOg=$;0vv&1_3nbX?1G-~N_k*Yx?ypjjV+cG zdi8CkCK;?|Xyf=Pg(=*8T)1C_FTYW{09^6Ac0q>d;zp*huU{|M9`yuLvqUir|5V(* zdimm!Pfza}+vgFb&?K~iykf&wji!Ld&v9a(FkiKJqos9evi;{BJ3>NtgGo~3 z7PB(3{ALl&-Bi0C1RoC`6<5-i9&M)wJ!Z&`V#IOodorkf6``t6JSmhln)20$D^E(M zfJKpB-eSmp&Cqu|1OKLHz=Z~svK#D=?9 zoNN74z#{2Ovl0WY*W;GjdBgq_L&YO2w!*f9FU#cEnQLd{-jpB*XKX>tuJ#UvY#4~X zvJ{LxEW#VL6r{OAH0w#F{PX6jpcu0P|NilE09l)|uiiFCt5LxZDQz^Bxy!Fo5{{un zXWX}ty64a1dmmw*nmS~IcZ?(ouFULJOj_<@P9}{cf3yfhD`;AQO17aWk`;bD5- z#jGaD0&RBnTw2cy(UgIE+a+4rj;lYHyc1&M=H}92#^2d|e%NO61l9f&ll-<97UpPL)a#H78_#`f1Y_Lb{t4lyR?q_wGH8w^O zQfjNcrqfdvQTPAYE-E%d0ceuhS(BepgTgU_JP5If501bkGh_0hmm3bS1^9OVqk%BM zrVho`UVgQK)Q0}_nM>&gz5tlK^lVE)&}<5m6m25CdkP8Xfj?H&Rh6qo zkb3|Af-cYu16#Dxdd0{JSqHIEVK=h9*>QC_OEru z9nhj&_{sccXFI`};l=@uX^kx{B$qD&kDFyzkspmb@gIvOFlS(U-J|6WYJc3P_b24qRt;N)**XLtU#-30oxJ2e@DZ?H zb73s6Uk9pMYIQ9+uDJ$*19MR#6gOW&^5Z%!m*DEo8|V}C%WnldrqV61)QX}jC-A0? zgDOeGA6~1aQ)ZIXN1wKfm8=C1ns?IoTC(r+szBC9(#X5#0HfAHev`(0*;#)jo+T`1 zd;*|kg+c)TGAO-#GztB%-rWGBWw2dsPl19V1@yq9Ol~Da& zaNqz!&I}h%YOA!spSpe@H%}KqrT;#-$X#HPW^8er*ZPPe9_P=7M)QbSV6~u3dMz9~wVc6`68a6A%gk6t|D$I%6g%a(%@Yn7>u`pSKo&Q|AD_JrUgN4LoNz2`^t! zfhPMG@C)er9*AGxsX5=|pBK4npDordfZw0pOz!9I0OwynTe6A=Y<5z1+wn>!SL1f6q5Q(3o+JsKySXY&x*r0}8=5+XBLhU1rXjU-rOy5a7^c zm;hL~0ryW!ZNx-3;7T-7O7x`U=0>z7DwvG9OhIhh+axFiwo<(k_?Ax-OhH(~8SC=o zpj}(BG!Ta2s5w03WBR)*|Gc)^q~PX1{=MSCxtIvQ5-fFaeP>-%MtmDrgRn9pSStFb zCH8+CL;8TsOS}0&^2cLz01Xx29I;i;(Yo4 zi^v$owbikt@$%S%?zgglxPx*X3(b66&I^acj4V(>;1%!V<0FR!r8Afs>HWt<2IZHJU83p!1v{+b%rz^opw5oGp`|4JDA}gxw&(rFO=<2w9-)$ z&`V0`;h_!s(t5m7 z0=3QJ{yNL%FA`$gwVlY#^nKp*WjX8QUy2pzsRUZsKU!gE|5aZ4-(Y{a6TT=1*=vr>pB1fPcj;==#tGSV z_=&y$gA_WF&*KY9AjrO5lYN4xC!=62ZE#ChtpnHe(oC?5y86`zfLsg_rxRMSAF|(l zt>&aBoS~4;N*gI`YjBOcJj*N_y%v{NeZs#_Kca7Lt!EpjLqqOhj(=y{=kO7yl z@5%=_;A>@GGBV69PGSjgq=;Z)w|Pmvp|R4OonjzcATc$vUdp0Y?u_bA6aq2^bajs#kG%`7R@*dS-S|+ zxQb&-C0uIVIuo|N!r?I*>T3Cpqec)whFj<=)1OZz*GDJj)yUnZ7=hA*^N+4{vI*S| z|8kaAVZ~do6=WG59qn3O!<@$_Qt0&SXJ`r|G~8@uzV#LfiTq->?!b+khDEQQ%6HTP z=^x)kNIKug@^axm741`5tFLNbNpD%HrJpamt*{Kp_|-GYUgenRdIg4vipnz%;E{2s zE&g{=(n)D+9qsZ5W;<8)T$e^q4>bYP%H2i6su1;DRkgZjmr0)7u1A;nXNEDkdjjkd zKs=yGEZ`Ggix;E6b-&Yjsqbq%qEyHuaS|Z+pcE2yS#Lj_VDLaVF143d%ScHZ+hH3? zlxBB?PEP#cvbU#wxuc8}u8+GQrluai9_snWB?2rQxY0KupMQyD@E=~OLvO0Qc$Euy zz(z(2q&8eZN3~CaetUjuGV-$l+F6m`OpJ`cIrJ+@;Ub@nJ2P+>r4=hHJ1oF`>n*Bt zz97Ituh!<&TDyeABK!dC503EmCB2^}hz_+UfLq_zF?I2OB|tt&b2?#~f=dM^l@Xy+ zH4=cv>0L5{PX5KFAh&=1tRSEaz zUVEB|4pM0d08~yv+UnivM8B=a_)d9dW^x$GQgn1bzATW z3l|AZXhtntd;5mac0>02n8?AwKLDMAN1AW2xt{53Bi`tHTx?0VT~xnp@iN(WcBFQHhvk3NA7i+%+Bb&@8_`bv2`kDgVPwvN`GaJt+KjT-K6{vvw;>p?4 zD_xC95U_yH&V?%lTArG;GoCnNC&nFyFH}kG>H>SKcGU)Y<6k*0_IvTf*RSHWb#=_I zx#)>~Lk9wGtPnxPS0r*@R512ESe3bFM_KNoprD-x7r-1)iX8c4qxoEz!B`YMPp;>G zl>E9K8qR>~5zQh7d>ddtPUFpkL3VlVi8q$-v8ZBlp{x77b932dg*^_3#B*q6=2eGm zz~5P~#y;%p@6X7{nQ5VqQ6a{oF2)$9<}-Ya2e*AjV3QQ|5?%n z%Y#t3qjZJim8=oQm)B4(M*FHg1vzinz(f~7mA5qXsu%GqT3k+PmBj_)K<)znXbUJK zlCrZedOzn*2|rJ|@Z|?3M5~nxfgd|L*@DBV{<>wM*Q_l1Zihcyvv!uAjDJ+`c}NQh z+~dcnmolJXwQ`NXokzdI1K5T5`8Q5RTwLLn+x=pZU%#FOh8DY?_0y&9*ce?Y5IF=6dS>H4Pdt%MLU2z_1g&*%a}fk*Q#-eKeW-sZ z6YOS@xO<_Svb7jfW4Kb<1yMLyW$@8)Q3Wy^(6JxxuJ>$&Do@*2*j5aIcpaTfzv<&^%&SS*~vgd8|;MKV7B4y5U<96-B{`TxA9RoXH-q{x}jp z6oPgu_~>|$5Xs{1>^LBkGWV5pbh1LGiLgC-6}h9)PBL$%TTz04qii}G~FW0iF@iG&3oj6uXf*$J9(uZiwf zdyqu)G6GmYKo#c~TJ4>BuZj;gf!M?z`*kH z8EY-%dj8}jXcSOCNXHyde9F>+hUklzecna+F_98ch<5wbCZKFAIb&{pq)w)x2zKeQ z@bp+Bj#6l@8X6w331-LK(|-6`F$pyOxuXs_d6}b+odS8J$_5d8?wIU07Sdg6J}EB` z>%6($uPXUo5ogWf?s!_xq_*LRg{p8BR50nmh{Z39H@63_yTMtzV_^?^p5~ZW4MOcE z>kw86Q!9PsAE)H-rW>LL&5i5JmG5(K%q-mq&iI4)&V9=QdI;Tat!~Mzvs~2FmW*^& zRW$IBFFU~SkWpLjC3uJqBUYT@$O4Qir1T%MmdCO5j~cpItEt(Sc~ZZd)l?O`4@(sP zccPZpE|{YL=PsG79W@mr-nOQ~`{m2a z<Vg?&X08%WFV)Ba=}mKFWvAbJIM#S%SQ#{I z_q$26OEsO>#|?f7Ghj3xqMN~U{VmhvXFevst!Ln;u!xI{IMY^|00|8b>tbXQ(vxP( zx_Ws(@YQ8UMsJ2B9@&P~Mzqj^z<|+vc5Okk3^ULQnVSc?5eD*Lj$>S?_XOyNDWCvT zTPWv7JaZ(p@)HJ>uD1I`rR4$CPmTn$NC0=&yy1qV2WMwwcx46v-&qX*JVqHa0W8@K8o4uu_0sU3&O+xg>~zZ5lMa|( z%oQ{wJj^ixy0EYU9h8gCS#cmYe_ci*wn%0yn ziT!IMXibFmjwF^Fhe-BO)=oGWU78k^E%O910!}2DukJvv6^>d8-=t|g_>D=4Fen@4 zG5lWlt?8RImlUmgSJ`dZyoM#Cwv3GYl&@(CcXoWcgPnH#&V#_T2G8YCzXl4fNhnLb zCvL7Tnkq$=S|vFwCTmt&@<0H`v16R)F^Jo_S182KyFhgLS3l44ahM$L(2aHyrr1om^z4qk-HEr0kv^c&{yAqVrO-K zJeO2jh5J4Md6lMooGxh6wCHYh{;Pg)|63g^v07ZaynmtbgUV(6K#^w?=$voGm3DI~ zVb!$vpXw4&rcdd&1bt4F0|lX>OMcE9{~3Mx&CRDKChUG)N@yt1pl6G7>M(G=mMA$IC77hf#i7crR(2Xrb!ecqW6 zp1dTd^_#5bTM%1jXXeq~NwD&GaX!l&Qb|;k)zP5=Wi|E5IBHh4O+jMvn;WH(cAF7! zT)48Yc(JNE-Y?B{nDSifgmo~rtEC2UKSjXR26BkyWJJM;Z&uLI*OhQi~j zx8y2%FPedY##uO3tW@mErKwtfuHf%ph~2n}EG~vb0GWFc2``Ed*=WiFNfq;onA57l zvuCUztx1!8Q%B;LDAf@m7)|v(71N0_TKai>v%yD0qk1O*U*W$bn~l~(I#+Sh%$yl z2V?4A=yLH|dLo?J@oqCWTmBNxVig2$))mwDZu^EeeRz1YYLXQqYP!>GY6sTx2emG8 zx4dywND$!(Z@~N^XiE(sWA1$gG=phnV0F%(PgR)&HU9oVMdFix`so=c>r@v5%TvjZg4lfc` zg$$qUY;l#w{!hWeASf6F6h1ujMQx#L(D*^$swy3Tc@fqAKa!I-_VzK84a;#S7XU5i z3YMnBsDSr(DdG((jTQ#KP~k>RDLK2@>ezRVPx#*b*NGkkM%RB!gVOl@USVb!f9|N2 zGBG^6&G-)`4C{}@_QwkeAS3~?Y;OYYs}%GHRH0Ef8aR(W?aR~`H#P#si&;@fgCLpV zbKtdjchA=8JS(J_ENifth2Ewc^i~S`F{!mJt60)Ghbp zBrx|mE7c;4WU9|CC(pOk_fD%C;;D4{*2L6A;8i4;^ZmzWfE02QcLPjDI{{;)vodZF z%qgkwpC|#Mla2sqZuLLX-!u3c@Hl?|>J0~7ez;VEchBPBIa2r+aySUoDux_6Y?4od z9xjL-VV=pr0NdUYvOCjJ161{HPm!tS*#MUn_uo;zBVr>hb6F2^_!&B>p(t=>z6Zv$ z)Mi2ep&u{$xGS-q`~aQ_~sy zikN_9rwW}au|vf(4&sYmb%?pSjt*>84IGNlxHtu~3>lpN*po6X%Zoq~JqU@jt;gJ5 zc_uG^w`r=glN|t$cTq6T6I`H`<_TQ+T&AL>Wk2F%4<6g9k|*k;%>TAOIhjo|ki3pp z9pH#6*oTD!_5hedL0V`g0=IjR6f5f)@zs)C{^UK-ruO;JMLLJyr^c9WbTTXE

    Sj zTm+3>Wxr$iqIhh~d?Nqyr&2Ae!OTIGNWF#-#|V(++AwvSFWy2jnp?N9|8MYHtER%ay+PXb>kJhu!O*Ev}S5IHT5+} z?X#g2e{)%E`5)xFz_6imK=d?`C2?iw1kke3)$pq1oIlBNMD4IEwZ&zVfc2f0khlLv zgoJrAFX;T{ksgBUKsY%#(Ah5*pTCT%suUu<>dbbc=Qxu(F($T3)J?VH1u_phBDgMn zE68};chWW==h)ZAu3z)P>6S&YGlA)9iBM>4Y)8p(a1#8Kw|V09vOozc;};;ocbTpu zuy@owiv#eRj}v;6VYL_bBsxd*k7G`3tmmTR)w7`9`IO~>DCAwnob~ussy^U~s35+wh1)d~Y$* z&2siMBvC)gT}mp?duqx_DqP>gSBM4K0fI^JYRmDNx01KH*2zFUk8Cwdk1tT}q>Lbt zA)s&iBBbB2vZL~%zz0AzyzT=c5+~HblwKYj%ExmS==J-;b2ee)}G1FAl$V~jVi z0AKrMsq@7*NPKCM3+IyzL&kB~8}`h<(Z#;Sda=0fKx8+OJI2Av+Fau?wg}=|($?|B z;dS@`tAFuQQ{jc-6=0Lq+Yt zE5Wfc?JznjGO;BJ|vzQQo~TFIK|t@qA!o`3Zg(DJ4YDUJa`n7p=9 zlvIzrKhG8~pV%=^`GmERlI&RXWr%OT9~1g=cgF)7vCBHIN!;7%gd@w9k@mF8vo7)GlIUD%vO#_z zM=X>=u*Ov!cf_xS0WbrqhAXG4teB$$4p5!+md_+|%{1SQnh2Q0P3Fc(TWg!IjX&@o zjxzR&MBdpcuSgD)>x^otet!pZr)&~|tLSsA^o()0lbO^X|Hq_cfBrE=;78{c%H#H9 zvo%XR1J#MTj@akrx>$aF)x2BY)wEdsnM86pvemi_ZkT_C?neB{C^RvJPD9MRSV+q%Gy z$#Og#j391qcq7?E1DLf7_pe|6**hbVeaID$WrFmG?~%rq zy)-P*s+gPYaC1E}9=At1NhHv`DbT$4EA==m!2~)V5+4433e)d1NR2Pfn+mORB*v!MV9w4eQEcg|(qO2QJ29bsu<$DYSH-ud)Jld7?qBvNl#py4kFd zMY-zf=Nk$ysU3}#o0r;lrfn;UqR|q)wyB23Y^<#H)(+E~CjEVY7_z-x=*qGJlz!vBhoavTy+~wNZ-r97pc66lLZmcF?Ar+It^n9~xd9H0w z(AC4$7-M*cN`l|CfyWXawyGT2p%~Pn7&}~UM@F`Aj5?-NjB+ls0h9QJ1*p;8Ys%Ji zPd+v=%3(M>Krlz0^jwr3xQ6ofm%hGKvoWfxCsxte@p<$R`2RL2(RXhVM?%^%0;Cdo zEA93qBFNte(!rlMMlf}7ol<=z^cn*-}q+IbH%A!+<`P$a`$;K z&xdzy^gT-Hd`@}nWvJ>YHs|+sw{y8@J^%+ESFtcL{TwR}&tiKK2)G1YcMsBCj|!wV zr9fl@`ili~5nh>Ck;dWFk|Wwy*xky_{+ENOZj^%Q_)Na2w2H`#Jv1%%!B>XZQIonE z$Jy?IJs-x3I8GX(NSJ(U!c+w(IVI)L`!Eu}$FuotkaP#+MT42|U}sN{2aFY>kA!^a zya+&q!K4e)WbXup)4^0movwQW(|I~sv*bryHU^yXK#$;vp*DZ=UDh(Y6!wd4X%Y44q&pGzU5>h^U@q!2Lw)qMY!J%LO zD!F?mzz}Lb@S<&YeX$7`Zf2QTDv)_|)%;R(IB+Z2w{_aX&`l3M zU-#Hz?uq-9H|mR%TuAUB!L!_$2yCMdFCEOXqIw$J5e=$Thq~ z2vQS4w;tmm^ZYe`PoHWQX|k8|^7+Xytd3QLM=YcjX9xGBV*(!6Up!tC@o0?)R&8lQ zT=AMN%E}4rRmHDzvUL$L2Dd{>t1e!~yYNBUOqi$cOPmji+l|tHX62c_o@gBBxHWm! zX~k#$PKBT@>xOl?ZP#eSx)P6cCJKV^T^MPUI(Dy2cegW&iA8pI6Ak6) z#wS!cim0k`ySVqOUM?P8%V6U<*41q^GVAVm;4LTYBDH0@8g_2TSFI$KRjfWTCK?i&gHOtn12!q`bNjUjH3FLC<&C5F|GF*eQ z`Jd^(7#o6QQ7-KDGx>(~&-nGMM}%G0_e10u-EPC4Cx-gO(snk8_gG)LTH>%pyU-Ate$%^U8>RMwcaWz2(|RBgl_#SV7r=P$Ca^!OvQBz4;g z=jC_BUjLq{HQ}AHe_iN>C7az8ZfatRb8iR62|JPy+W55*OXn@|gW^&qOi7_h+ape- zB>|L(zHMt>o+nz7K;lr7ipr&TU|>Hru?$%917T0q~f}*p!Ae#p4a&4KiuwRf87a_t~<@xi*yj-LJMklle$gc(Y9THiM&onZ~O*oLxfR*1m5%(CQS8E zQM!3lrQ$>G;hZ05(D&q4cCn@qJLlCOVAuE@@Kr?T@WGH^_?5Ep#OFDTx`!l_7t*Fx zIJx&hCS)~#k^6^Zv0+Q{SiC$AcP!sMHmR0;J3*mb#arI^dlJ{q7+sUOqDM({jv1+V z=`+QPR1Vq1E~!^*y*?x5m2!EmWuc;#5kewE&_!B#r4!(^SX6Kl4vtlk>pdpv9w79N zR~#%J@{Bw3iX8QQhto_G`>tjI?0WX2XF*~;A$HAZ9x9>Er$I@SoQHI;JB4wyN?}p# z5UA#T$(f_P-$I<2qdCkF3DG3%fyRV8xFOcOy$GuXp~^~1TmBuW-XNDewj1C{(OLO< zkn|S7hf!vh=5o3R*ow?$#k02KpQN43OUg=!4Q zA>1u1b6r?M8noqap{6Eetk#^@>0N}3;cg5%KA&&%nF_%)Vauy zk*B*yzvQ6$;t0r~IL_+7bAs@NZ$JXP3;&9m-V^iHc@TwABYO?OA_|+1b z@4f?F{gKB&<`3jw#J0dIYct%RD;WlrAXfH-Os#voxV5P+}IV0W|3{O6ul05$Oxc()mg~z4$Qqm&Nu_h7H9W9*w6cXi227{Tw zdu}}|JI-uZ;V+&vX$4X7e!)q74T9(~s<$Nv1o&5H`#QMfB=pN&>8_FYplq;D^N@`x z!w7O|A)DOUNvZKsV{FUmh?n^<;JRXu=0M6x*qE6a%_gIFv2Kcrf!X@VAkcB>A7VhX z8@Rtu(vxcT`_`dF0|S_TPtP#iVPUoC18g=&^ z$m-gL%FAB{Bl$C~Vq+rat-Unx@hkj-0^ANmzhAx>wsx`Y{TRvntvDcNY>oZY_DX{f zA^vUBt1z=9P4`65S|8D$L{XkjNe%p==6^rF`FD61S5ZdBzmwAjHa49vR=!`~A$(F! z0=dV^ZaUB^vG-Znc*OS77iP?X7}z=+%$?~8A{~}+xH|`2=ie}hY>G|2ZePLt92#l| z=4|J(f&XjsI(fP{G*|JW4~I;0%f(;)+;J{0G9PS-6IiCX^d*AFq_y|MqlZCa>)Md) zLr113Pny2$a>?I<6PINq2Cnz_NX~>MCgcVEq1>%SotY{{IsK1#-dv*r5;q&kp$!{Ka%B+M?KU7l62=-l)x>ch;?HW45T#sFw~^sl#bq zeV_z#xcDn8MWYXQsllj#)*Gyc;9*oivlmDL651^@$7|)eq*``ktTK>^yF*xEn(l$^ zTl5^z`bKQY%|BmvF63VTD@Akb4XDFCGn<4@Ki?;xJDpR;vNr;k1q9?*x0Du?$|hqL##w;SH20i(f+-( zssk#3&3yhW0NCFly~*7;2B5sH+Ahlg;K$FtvCc2|^IoX*`Hqi|TX%p;6p*OFq^I{- z$Lt&-v;Xn)*7-GHU(2q^9m%7i6NrlU>lyyo6jm4AAiFia@8NDKW$-`*)8opu z_51#kZ0Ft{2UwlJiM~Dd-!29_yTig zCTsNX!;V$+AH$!qcSl<^gmL;WZx0-yxm| z8(=n$Fs2ObvH+h1g{~z;#j5xBy)^4Am>mA%KK=q3A@^+}3K_lonQh>eeAk!@f#?wa z?r*CWrDe%@)1NqwJ(iC)e*}*WUJ4}n^yEDPO`wY0S_6+bCNb?Wq_&12eHG14iTUD+ zh00&A8=|P8QZi8R&3ixt3_uoWoa1bpqoZp^?)rg2e8YMpW-m1Xd(?ITq|sO;;^t4x zKyF_jad^%0HESSDVXjqii|yC9|B%6x}3HZ&suvrfQYrP_blaBG<#ak zYWvQwOi+A~h1o4Ek{fk#-!SJkjVt8}NOGidv1A2M;JiJ$%?>@GZUsK<+SabP8P=Rd zMe|)7iB+~0zKv&VkDc*dlK;RJ45mXys(+IHteyjKuI)fb()mm%lUp-79uMT)9|T*44viP`}R-C1}MeAIY)2 zJ6Zq40WA035gda@d#b^q@$n5mlY@Jo{V{zX$qRt4y!1^U(UVk8d&@3*(OtWkPolyDSOHomAY*$fZ z^-r^iD(ksWyWX0rJk?JqCTKM!itpRtBi8TL>d8XLj+JD7uY^;bWq9kvfbYr2Cz^ad z21S$(**4ah--_5os_l{Q!tKT+KQ&N60N;gN#HI9@XUjUP+{Lw;N6Ie?= z+H|7ypwZ|~=K{f{FY(7;Pna7jE%ro(UHMiAF#92Iio<7TZ&JlleQGft>pl4V5)$zx zl%acJ+yUMwu{Yt@GX-1+B-_u@p0sV0*9(qEENt!y2}N4u(MzA6ybeH|-Y_>WQ+-iz zNm)-)ZBR`iVT%d}naE?DJ9C0%ww)lYk58CECL0r&g zG+@FST__Ke5xyxn*H*c;6UEK*qsO+Pd3gLqG%F8KjB?x(Yx+;)&UddJP|cuQ;q!nTcgRrkD(Md^ zIS(=UchZM134I_$HtRS&u#C5Pvm)rggb1BQI;OHKDU-aJ4W%WN26j9>EInD7-|uyC-L{ zIP?-{#E>J!rfv@~Hlzl^SRn|S1A==33!3y(x{yoxnsL7|&`yk!cYKMwU(=%~1@%jw zO_&r*3@_vofiU`8(`etzhx}t1w6S>)@un{r)Ozz;S1$>D`AW!m({F3$Gt<$0D%9`s z-EmL!8VIJdJEf;%rt;%;YtI65Uf*ONf_X4cQB{`=NlUw==YHUJQgNsX=@#3Q1*-?Q zsAv@d6+ealX#Lq>rt-4@0zR&Fa1do7yzK!E1rUbfnjoF$FS_pIx`Q)xfLT(M^A?J5 zcuGe62D&LS0mZW33yp684#cB+MYp`oFjZ|)iMkWUIKmBKb)ROEDTGe+09dPsp#p$= z>n|`L1S2cZYNU17;z#k$3&1wuQjFkI-QAt*wz?boYm*)JPZcO~1n3;T^l{tLC%+e2 zYKkX-PuCJEonBe|Fnyr&asOm=jQeQ-Uw2AoW|&$cjG$INPrS#fY`o1PPoZgXZ*w_? zJY%Ff&vDB|Ik~tvps$aY6EO$PcztvzB$WSJUf{oVSRk#wr2cvjK_Z0$7f<$-&rp?#G;fdw^ZKY<)(rr<`?gIgHFm3;Rt%=%z2aAD;k5z7ap#rzaa+}`{_T}hk;L5>v#1zlFv3!&A5>th*`MjPT8WxT2-X9<%kDRs@ z26FWt#m+LziO$v=$sRhpKa-}umE_#Y8;*7FLT_<?@bRQQGrxsRaxN%@RGFXxH5e zks0$u`%sLNOv4g980m40;)rkrUaKe> z_pQDG4z4b<4!?n2!`V;W@q44)u{N~ zIPPUNW|_f(fd}_UjYhYcP7BwlZEBjqpCPmdaR35=ascy6V`cGiV^qbqDWA*a>M=~| zyot=A`O_7MWdDG80=MK9m?}q3mqOwuXL`K-B{w6hy1FTtL{g?4@VgIfA%3b9l@}2OT!pKYP9U#K;#B{Kv z-fKiH>phoOf#2+mGXNd?P_LsRzwRrlrJ6HX$jgH6GH)-*p6{&(MFl0AZn%cX|Q5Z!7vW@={xe zq!$*AvQSX)?Oe6wEh-A$uj*{gr@G7K54IzZ<7$&|^sabMTHRUjm06EGIT6$yBk{`_ zO2xn>-$(!c&K56=Z+0Bb+9!%AFhm8@($|||c$Jmv=T|sH6=xr^F}J|uyc!7ZYzkg> zK}$h4G3SBxGA~2J8!ZIKmrk*(SsBALL}Ju}>M02>j;j^+f>eRmgInxIXrmOSoe`rI z7SQ011W*=9iDOzi6ZqbP$C4?S#OzYba+B81k2xUkl(F$0xZ6%$;7>BR4oUBi7WGCu zBlarZ34w|C6?CG(SaC)MuFwMu?Q8TofMFDLC<;6f?$g0nm5+Kr**g#iQHDIy z!E0Z4!sFxL1MVPt^WG~w@9*F5zN>p)wx$qdnVNc`wY!^%^lB#G#iG$iZU+yXKBNf8 z6K!o@vr9XeyUY_bu0sqAf#w!z_l;aFwAo5ZP{bK0cEWKAu(dpXLBWk$X%{xbKZoWq z5ul?HdjkaKfd-TuL?TK6W?zwvSE2(F4qHBUt@6O8Ey$cyxZetvJlQ}Vzk(s@6Q^Vo z!z3LYi!s|LpMxXpIu!p8Z|@z}RNK6ZV!?ud1rP)Q3(^rmdR39$Yv?Gw_YMhGcA%d0pmJhwYou=FNa-Pqj*qiy*_w|4c#j#dwOu#`f{XcjvQQ8KiuBd2nOp z{`|IpRXp7J(6#;Ch3Fq+V_aMujCaW^&VsgW@T3Nr6$X z`0hZ@@e%<|!;Oxv5MnmKi1p!)JFbfxvT{ao0Uged+$?2l_t^U`jg!I`7R^A^!Iv0X zU$47Pynly^*l}f%GViy7^!anr8{h^KfOMYOBIDP&YrxskC%G8b95zk!q4!o2>o0Ca zKDbhc`NWkZ=*s|jj8;=VuDxXwE6d54gyG{Z->okSZMdBabbdS6b@lY9xQ{GBzj&~6 z{~eMT)i(lx#w;bosC~dJ_|g$pe_I`z!6wO;iDL?(2I9k8Dr* z_~aCZJ)SPR9^$ufm0)3E;gQoHkhUsmLlUQgA!FiqJr`<-9R2621elYiDu zk+Txe6GY`UcCOdpVmFhW7{%b>yf&vDmuG~y_Hh$~Ia`IR_DiveK<5HE{R(KmTEEGu z5bROI5fL25#$R>Ztu$Djz$LslI^QUZ0FMhHWi!19dNf{j=fip z2UAmz1{T@11>%~a=4zc z7e*OoWf+s-o5|7$V-ssEcdXN81>j~1e005kjG zchSTj?(MJden9nX=)Asfq846bo4<5$T3n{XZ7s?-PYxtN+iT&ySo`N(g#8I$n;h zw3JWK;u+V_P!g2azf*9A!-e<)VQwLzagf9Z%SA<7yIWNv%>4U@bb&R}UqK}x;59Vu zGC&3vh=M?tT~bmK_|qFigjIibn!n#{v%eu5K`Pgpf`77@FMQKSczk7fOKlt;f`_Pi zcO?$3)HRy|0lHGO`M2IS&lhZ#tIj%Ft-OL7UU~m!XvjVnh>L)5%)N$f{%F2g7Bn0X z9IFFLv)iP_|91V?BTDl}HqSJI57hUQ^&%|SxukeX$!`6?zBDE_)~QvwbZGofE%cxN zJHGny>bGZZ^hW=7oQSua5(a-4#t_AIwHEWwbmZqINkJe?>NBvnps&tm4HR5P&CG$` zg{#zG!_c%MCm40x(Xxm8I3VR}1@vx|*aNI5CY(&W(e(8nsoHZ=EAzhz&D*RXqefO& zSEuiTrK0n5zBhvV3AybtS>ruD$D3HUxe)+bOp%Dg(KpQdQoaBUTu9fVT-8`(c2Mpq zh5>Bb`7VFD%ao=4tqaZS`CCDDF#Zn(*#xoth5P>D#+`|!bNk-;dD<8323=(UT) z^Y>iPwgv)XYJrcK#t8-bTUW z!%qcbxHOji+9sECJ;iz;fMZ^mBD3gXEVvH?EfoXNhe34_iZPp@VepY56E)%49u0eR zA&BfeMTka8^S5E@`!kn<`ML%d|BSEpjV8{I~3p!CEZFKKVi#CT5* zmHYeOd>gi(K0GwvcHnmYSx?*T)s|!*Sll}l@ks*<^~A1Eb5~82q9*t|S)eD%iG5gd z|H6FO{trvX!;kbi&V!YJ`zskXZPu_K_v-&iyivu`0hj)p>y}Lfr1Ly*scA+tIFL0m z8T#&`{mJ$24@=Y3YWIM1ub$lb;NA0!==0XE2FQng*|2Qu*jIqIq?Dqt)fnWEwqET^ zx@(aN3JM-!#U4m;e*azua`Tq9%Swm(krsxIv`P88`n4O)a0k` z*2L>~?9F&|Rn!beuKGb=hinlxPrGI)?r9I6IVl^jaU?y*1%`4?}?8&1=1 zfpS-R39`%cVd;QN1YC==ONFJwj4__a+dW;!DKIEBoNnT!L5pTD@6r6B@CGo!W2^f& zh%b-^Zngn2V|{fsqhQ(C`su+8DKp1lO8zfb`TaWcjleR~m3vZQRCs-1_hadDK<9~8 zlMqHpG69I{-Lvjac4^O7Q}+?oxBd4&LB@db+Fs%O-wO<}3L&7RpzvFV?}M3=f6CFr z9_>q}LD%!=g>9 zbOWD9LMDI`#TZD`nzfS3wL|><4K5bZ1K9LKd+!xY%Awq>7Ss#B0~up|_@yX8xfG^U z{Rt0Z8Vki1>GNMZtk(VaglU4G=DwO9t4|;`5Xs~z`Q(mUswaz*3LNaoVG+ZGg^5CpWynVXlSxA)r#$6X6@Kl7+^Po$+yXJn77 z`4@Rss~~Mseappr-GA7v@+1<1zRD~@lG_3fuh)p+@z4O8?(*ls->XP~qJp-WcFoq{ z+>@uhDmxXx*u;nA?B=BjOodNwxhGzg#@bHRT_UKd_zOqK1WK&{S!6baS8<*~u;J~UFE?|h$wgF}CNu+Y-UAB$jfywaDwZ7}QiV(0f zUcr*wMuz2R=AYeql`+T_AL2zeYwp0?Jay>Hx$R4nD>?$mR>A~z!r7mV^IN7dpgET| zyMl`}1;JR!Q*u+lj+Q#$*unXF*dMR)&5YLpUaVU0%Cg>zZUJn1EI6ClE>*Sod>~bf zTSX*1PSG7r?MXaU?Q&W5>qo`d(#cevbv6QKHnDY36jWixF*jw>gh_-8`q(_xL?mvx zGnCGX;%XhBi*Pm$j{8i)$Jvl3uc+bQz~}pQOWd2Q%$zjez*HM}Gv6&P!!Mx*X{Cq?AE}07ldcTWZC|p7gC)jAwpM>VF%zP>1B(*dXpEK;bz>-rE!qC zp01-LC@V4Ehui+C*a*nY$&nf^HEJ*MM|0XCL5sb$z<|ROAgXsyJ^LxgT-amUZ51|g zx{)%0IQ|XwoVB?Qm%ajtg95Nt(uTLe4;$CyH!s@O2!p9sY#l)VLr%nfYLvA2r?*uA z69|fr#W7qJOWi8J+XMgg7bc*)sp&I-BV@qmu(B`hq#6QrG$v*j6ckhk4#P4o$}!Td zvw5m_Nc=~;-_jB|bNo=<&@Z66S_=e9rGHQPIKH~?eWtd&{B^#AavM-!v~0`PnEO0OZ>zaKkF{%oGYmw>~ zL_#xs25c$c0bct8VC!8#G!~ZBg;XDCJ$Uu6==rzuGwr*>!|@*DSm0cU6LTo13zz72zAo*Wo62Q-UcBKaYfx~|%JTB;)ZsKhUz=wf zH$|_$tP%EHY-J&s6yNYz8&z#rmGF1QjwZ6O(aV5bGG`M;bm9d(jA&tx;(L~wsvlqdVd=doXi+gS z5KDGb-hXH3iq}d<*Zpf-xxAdoN|aiK>wqahRU58ZbzQD#HO_17?p0JKzIDq7R$^vu zc0Ig(Ffr~SrPf45V%$v^Zq9X5Y4pxO^Py+F$HG#!*=D0_4il#$TJg^$`g?6r=??{t z#7@?thUPm9LW_9j5`PhmZA6bHgAQ(C)Nq}$h2m&Ypu#7(nJMaebx&Rf=P7_M1CbhVt4Z=nB87bH5B^*dQSFU>t?)?GARNQk8Y*UIEt$l^_?-5H2E3V6vyMY=YWflJ5V;WNt#Q zF_V+dISNGi>8U(-aZwMym(9r+SL?;^RML9C_>fNw3%V z(ByYBE&(!z#jA#CYV0p8{ir#>9KELaXvS@x+c@L*L73=Lj2emNEV}3-TMay zKF02EAMYhcI9`JH#l=zq*124pdmt(9q3ywf?YZe{zB{yDE1y@K=Q{RP>a3tOI#)?` z4b8RHA(w&unrl_>-1y$vdsmn#&XZkk5eDO@4QZ?#Tl1&ljPe?jWv7;PF+^VqcxELd zPbOGLcO&pTUn0jk{2zVB0hjZ(_kTn1T18Oy-}SD0i-d*6di?4}M(v|U)8Df?ILSg8 zH_u>>dHR839Ox1r9gNd})>Ip?II>Vlok~wJ@dk_>osaQZ?8!dTr$y=!EYbtK7HuNh?uQlYkVBwW=E_=rCQ{4gMyP?x;&Tu4|{oJqDc>SXLfH%K%mD4@-7C z!MV=vZ^p6?hOm?;F&IMkb-DN5r28MhzVPuhmI{>;4f~}~oUg3z)_`~RSt+^rWa%^&Mc_Lv$snb2nM zB%}<}a{gBs^4H$Iiu&2_<~70srvc*T!%4SLf5?{KluLv}5`HWj8GP3urx8bLXIBgw zBL62>_}3tLzjWCj;MM%E^V5Ef)B2kmqU!vABcK!Ahy6Et>pzP^WBw+}*#A}!etRSp zCXndThXu2jJeb7%D6fhhF~=D?6FM}B*0mpiI4f{+QXfx^d2M>gPI?%^<;iQpS?pL+r*b0FTQQ5DtPVkf^;|^W~xw zu)NlO6LogFmZ10A_l%bX-bRQD*14}vyaH`o-H$zT=pcx`H}AMgJ5XEeGNRM^Z~_p& zcH<^j8!FPVk5P$07;*8LozG8O14YP-+^2qz-PU>e5Fo@4Z#!7K=z8Qq%?E<}a^wli z2LB!vb>IWI_mBy9KH!2-LSP4&?2Z!?O;7f#xH{{C3i!Tjs`d32Z$C2rivJ>ob1hE0A0={OZuB;)BH`dP2sRov1uJ;T8h(g(=KC><&+vKtU1BF!2>!KTua$l|L(=@28T$GqrV-3rl7{rpsH)Km{HJ<^*e{ zlwuiGM%Rdq!Mw3mSODdVnR#U&&#yF@0S*t88Qs2dV)IYYY98nr0Y)5;J=vjyU%3e@ zo2mYyLJ()uN5S+a6q~=eP+y`n*0EEgEN1Wsuzd;rzESLo_a;-&d$?cMRtGEtS9=| zEB*at!79u$UdJy$OrB_Mq+UVtYJA*oZ*_#e5Fy79**Ko_8$}JzTQL2ubaLrK%C#GF zQB#vnYF-EFUrToHC*qkkrfoe*gAo>^N9Gna*UR;oyAQXFa_hkqr_+bU7B)6JL~kxW z6pmF00JRy-by=}})m|NaUC2+5otd}E0eN-mAjxq>OkSzntH(>)3G*cU?mWiP;J%+; zBS}Ngmo10D<v!Lh+(P;n7dP&NlMsX)X>m~|P0xIA8m>8ioN7h!Jd>+g zm?4!*oSDZo38xa` zr<@qg-x{L!2835fE1P06zTPW2e#w$zTX=Uq>EI~^eX;EHh4~yCf37@saJF6p{noYj zG){GsNp8+!?ZoWZ)3Jul&&&l?;q!3Qs3|b3=h@kGvV>O#s!!PediBOwVkL;^p5!%h zntL*u zB9oxKg1c+owPu1fYas(3@FgfxxY9c9WaE-cmouG6Y+5N2_dwR#9h#S7J@K8R>sH*y zlEYlPSO(sFnxg4&BV_DicV{mGOieC$M(C$`W!q(yO2{2$o}FdZX}vvAOS#!t1eev1 zJfsS)rj>gr=F4YMm|;9XPOYoSRsQ3cF8|fn2!aa);7ebVG(cjN@7-z@WM%+&?u49% z#-#%aDM1_b9e5kA4wtId_vA1MxgmBax{@a7Ef3Rv{xsPi85WO;NzUe-V?#8ps(bql zcp{2a)jg_gv&+e*V;R;b;E?U@ZPf6hRgcjl>EcbhLV{BQyJ<`SL%!1bBSJ#Wf%KLD z)AEmqAPQz6&cSuyG!LYpwznhDl=yJM{rCRX*xjuwfjL@l=o=ZBMYs|p0-V;S|V z$9j{WlKj@hGwq~zQ>wg9?RZ_GO;2jrElgbt9-o;+EupMuCPm3&b;jz@cQJVkD@*=g zP{&UUYS@uGJ?+VtXBl6qF zt(`6PuED!c5014JP-D#z6R|F+@CNYjxpda-SFaxWUf3$N3AdanZ=4_(AFgk<*4p>z za<-qLL@E%>%oIc<%%D=BJbYrB@(`rxbo#L(0s1PYJQg0?0F7-f=>@%+fTdkeo;|gaUOhr{>R!}qJ{@QG_$IS#DmqN7x zZnFbE19xc2blgbpCdU_aYLjPphH+=xedkG%%P;UB>SxUw<3(=K{*9;5gVwBPhk8}s zbp?>ZbXhX-sE!5v$5#1bP6T-%yxAyyk#uduNbfY7K{5Ft<^o(m;c-rndIE>ypoNm^ z^KYcGUXV4Vj{hlJ*S*1DmH%DCq{qSJtprgIsn-)yp|Hm@K8Za!hfJXj z3+4Cjhg*%>yd|2fKK*P+LN^fC_UkBAK}?$XqY%^Bru>|o$iz#Py^_~>rWTIa$m}Q;6*^ht0t3t;6_YIWY*JUJsykIq>wKCuVzomP+Zc+Pd&yoql-hdP}sqJmRAVBdUX`ex>1f=4P#5>hw8sDyqC` zs861Gj}Wz}^P79&^y=WV_VV_&{+4J`38qkmQe*wcD`8JEr9#wGDZM+*D`X>$O-zOy zW*_&?gR36A^p7)sK0SANwlK|oU5J`z)(nxa4vCM~L7U9$Vl7anX6EMBFHY|g5f^1e zIE1Tfna4IKKQ>^4aHzLj2py{FdxE%JSU5UrsfgW#pnXoXWYq!=t`>S##L6itsey*z zW;;AqJCIas2(ZM+nwlE#15c^LNw-v1jV;K+cBZ#Qw_}yx>|NgaoWSX1fpMK`81y0= z6ThB%4$MonZensoNgk$`WfCFHAPdMJ&G3yTA_W4U_{JgB#xS+$0mp*}4_H{N0>JRj zV!q-DmV3ql)cn>NoiS`Sy^ZnC0&BXJM}FQ55$%XZ9r7%;6nhTr+)hafbZG_Y*2Al~ z*QO^yK~JB1VZT&WNhu8U$1VXj)zMe_W?(?gjN5A^rC}2xY!X1Jy)jO1v5U5E={l) z|zOvdMx!T0y=kO}SaHaTru^h8bacoHc{=*NUM$n~HLb`NoXH(i+r>?xd{obl z8ds%YL-t%waPCC}LhDDJA`-kuCQH^j;v)EFdqE zrB`I;$9^;2QIm;b%{C01?TnFcBBprzMy`qYuI97sU8TIZ3&98!#{LJVJlx3NUrT-5 zi>sXHNOLRBh|mw69p|&x^?@}#d#B|cX8V&`{>a-@zgHN{Ik`F`^mpTZ76kVZ8eJ#d z==zqsRsvo$OP@xRJ<0jokS7U&5#M1hiv+s0QmGN1*tiaMFd~FWnGU{yi(!MP&P$D; zW}P}Ax{<-7?7()MGA`x3*W8FJ6j}nc6SH!2Xl!;4uWFj_g@4^SAwR{ zQxgTnTE#eQnrG8+3w>YB64Cn-kqh(l^S(Kysi~>EMAIQ5#K$R$ z7lgx+$lR=j=duIw?>z0s8=pqHa--fc`t9e7d7eIo?yQ`2cJ|gU@3P-41j|*8eVy!@ zimFzj=2QC*uJ3&E)Ut!Pxz~=HEtqGl5e3DsUOgFGzol_uVZ};KTiby3tdf|R7U4xm zY1@P1P&LFG~wdBvq?`wnF+5s%?_jwVcW3)LsYVXioMvH_d%*+Ns7aK&c3(PXB9-g z0i?&?p~S)57usPWDMhELtu1O{45lffh1}4lulrU)!^AimL?vFn+^_OER6^n+EJ|Tw zFif8HP#%keg9GrQa*1^}{bXK_LQW(c`Vtebkt~+%5xiR_q`82!VAlva?ML$-jLI%{^8=z*bs(rv50=|-4N zc}A-BaGpj1cEEUB>n&PSzSt-yXFMh<=F5&qLn>&ANxuqLcdaZupvKWP_v zrx^tJPDDlC&?we*Lq2{rQeseClI*jL_pAOSs|#~0>0oiFx^`P3?UJwTo@~kwA?sl1 z+04sAE{ujGD5K1IrhC9yg2tcy^(1rN>xi1l)6dP%!fTuno8HwUySg>7&;dI>(I5GR z!2c{w;+`~Iq@biPgjeJ8i`LkSFB|1=7^DuIM70%~hbGh)nz4l@Z&fo`&ET4X!owjo z50!1e{4#5(gC610Ma$hKaRyobnIVl17TF*%sNCJVntRV?o{rbAD_eZ370J_NdoJ%s z17S_YqG<+sEjFxW2l~XDIE-c@;Z3VG6_C}Qxxojr^srHBjs$+zZEj2xwoFJ&L@RQy zyP|kcBfLSrkr)VW3TTJ*B8RGbdeE2(O1W%hk^Yy_fEU%wRf+*lxU~7esOQAKm|-PO zGSjp?8dGuGqFX=xx4jJx}oP0ww0 zKZ0RuX2Tr1<20kTLbkp^pD*eYUTCjvnZMr!@mEBArRS52gc*qFPES7&cT3>IdW2H* z>P7CYJb&F1w_=lp)37kzls%^6^QdcsYvEBy5IM5TU_TD{r z)Z)u?`674=dlD~s_K5Y!z1NlG)?UFz{VpAF50xg!nfjGBz2pfbXu(XGLZ3%@kr+|A za{2?PdwHGr5>dPQek5z+8iImj093=lkd2BibYuJs4x+s8sYKs=TIcJFab@4hYYXw| zs)M$6nt7*9y+yAD%f6?xK%f@!fSgegqreyyH~g-&|IPc|q>0VOT{>EUG9y1bta>Dm z+c<<8^Yc;9M?Ph%Y97T@LCNdag&AbW#>NsJIYs*u!xWS0HjDDqRjCg*n320Y5)y^h z{h#9~z&O^0Xj8kfPdXqv$lk@jMdnCvx79-R+!FLwrpM__dx3skP<4vICjL1B6|D$@ zhm1`p2ae5RzDH^PjRWE{2V*W40!9u;CdGW1a=w`Zi2%~&;+g;o&$S0|jsXCC7f))W zg_9fZbyn@xM7%O7E zn;y3>SPBuzn`RvKQFEw}oVxRZ;Cme+fwKyQ*_Du^&$Wtq;eD3&0*S@Jk zRC0MsFt)-TUR-DBGymWLBV&O>DhNl{t)N`TCQ!`E$jq^1dSfIg%2lBJS;Y&!#%=$hLPJY zix}fC4||Nf@YKPrWRx$q1!baJ-XYp`%Y||37+s^t;cnTXb; z_iLRt{H0H&ceryO{ga$*3qhmPCUL1IY3vdpHUu4lTu~tgP3@SCYAzlw#9i4>W~K#) zc@N_D`8C}au@@~E>DbDh>BQo{{ahuAAmDohAngoLKyq>YpypjtWW|SJuk?hXu9X^G z7j_#-F2(~9-llzb&|79@_6wG9<8(Arxu7e`%i|@tmsf{zwPkM+ebPkg%9JMPU`>cy zFF>$GfBeWSJH9hwF1ow7D6T$f%FV)%wWC=WKyVsVpk1C}YeAnpzwvXb);5Ox4kxDr z;RVfsnG=)A>ZI6L*WO=PXqqNNwozD4mx|Y@KhGOVIyQ?WgQ*Y8}9Z&jfg%^)cqw-$4l zlHvs!YX2t1#>A9J>Rz`mMS*`C1agW01`xzRoT0+<>lytsf5?WKGC<+HN9046*HeFC z^e1z*ggp4yeCGJpCe>wMV}teI>cS-g_qx14Cjp2PSPOOWOj+A8?(Nm~#ffHsB=`Nc zb&x09n_*eke;wV74)W*z`8lCh^4sa4YI8Oa!PLzwzn(665vus-0;3S9%wre7VGe#? z#9G~8{{%}ltCp2Pp&bJPR-B4R4YY%IciBw7n;cSA{9qPkHy1k|_l}S;J&YdH-7%Aq6&m5R=+)1sV5^Dqt?3p#6cQ217(zc^*w&UE*d}uV zQJ|0LNuY?_T&@d~@;70la5_;v90gRak+>reiC)~h)m3GCJbZ&wLdm^kcRzu*L9SRi zN#Ht@vT}t|5_^-A?@UTm?}+j~x_0PfF;8_S)*GI5(6g!g$yFX;9AUNqS&m=#)=`)) z(z$Rr&*bB&%6pW=k7j{lgFHg_Th?O+syRzIq zi7|Fc7Ru8kMFqqNg7l7|g#U6DcC9u*2umW#5;^?dT3$;kbIW zBO=}lA_oP_LIyzLf_-uas4E$lZv$x=@T;Ag)s`yE}TaL;9=4w5+XiJHHFE6<= zF|S^9a!j^uTNZas&RpZuG0iV2hw~T>`3@tV`G?3QW8;jNgaqgP)p{!DWc@QC+;{nz z^uSK8alRyor+|R+;POwBWiomYH?+9(|Mxo^a^J_p!NV6n0IvKiQQQZM{nm6_O4%PK9STa4By4JDbn!ZrXqOW z(Rl66F%2y>pJ+C?e}oy~5<%=Q`}$;ci-<_zatUfS5-9x)_4W1TH9n$36zVD8 z3=Rj3%20@odX?M6O{Cb<*D)X%qIA<6H1F)_K-BLS?zb+4gX`lq-Lk32p=m&zP6FMU z^90CEa~8%)N>xA#DAU5ys{?=9y9h(Y$_+Y8!C%oWhJEv zQ6=B!>lc-nSV*Vr0T3g1s46M)MsssBb^qoAJiVk~XgFSOlGuzn%$1XoN#p7q)z7F! zKM$p@;wd}51_;aY8#l;!T!#Is+^4LmzY;iQ*0x1v90|;_AY5|3w#3~hQ>!78c)M9{^S@}(N5dwxU_YLTl;F1Ppi2so=?y75&3>HQp z>u^LR$l1nTTYAh9p1|GC!uD2z^hq-R2Tu5`)Kj)F+aNjnAB6)(#;riMa^6KRTTv!xt9rTIQnoAOznW0x&hth=hsjW)>PiO(VJ)78br;r zMd7x-6Ajc62@Y^T7q9fEV|iZDbbc>eshNSPzEJrvJ~Oi&d17EsQ(^$uJw35%9vB$V zsE|?w{Gd|%y>ROZuXBp<*L|!Ytq~a=l`L_;Hh#2L8zdbhJZ2E6$P+**6iC-txE_5^ ze-;kf|L1Q~cK_I(kLc{~R2$T@AN*_11hHE85cFz~2?z*~Y8F+J4yXIk54phHVh`z| z=CwM{@3OpgrdYp9;q8TH=MHFz)u^`H>M$6EXrA7V$F-$6Y+Z&)#^U;7Hb~0+=#rQj zvT-1eW$20LSD%5Nu6vOU(pe8Ict^q)JsOA>l61*>X}P%n#&6yJ=*=EPKfy8RW=C$C?>MorZOSPvIv{!>Rl^G|4b{d;JrchHMa*1wIG0j0Eg;|>U8}S)2v7y*mW5&!1`t8HIrIcIP(GuO?lqcw=yfLf-29xt<3ogiTtB zK}!8v-E&00_4po=9{8_Za9exp=~%{QLc&;9vx3yMd_g!A3H0M#bq)P3gKU z^{S|;MFKX*5|jg457ryh-AW8@fD#pMeiArsa#UQf#;sZ_y1qQ&u^)O42=ZZ&B{8cQ ztF+ucjZTQo0J%K5Nc@37a^VM>#0IFF3RWKHqH<^)RApoqxf|LVw}ia~ZMt`npn+;U zq|w|>b7pZ)M5%A1&$6wEWxUes?F}+AH;@>YlumuPs%pup^1*ShiQV%DmbU`Y=!=uVn zb-bN+wIa*J5>bGt1f(R`!|~jDwcV+FmCXoC85B79-xggjM6VKk;572fqJx(_EL#f} zg>HI12ZW0YvS)ynV?*mb5st7ryl}HpI83%R@cbjjoR-lEt> z^FIANok2=wekKV1(rciY(W^X*t2+4ozkpgdS?-|;-Yh`>N}XWX@pgtCh~C1*hjnli ztP3ZHhm5{OUKM^rx%ANN(8OUj?wyYe`Kp;VZVTV(-s zTY04PVjnl3OTC1Coe_Uhk;B=KsS_{kbHVcZeMveS?la}a&st{u0XJG3Cdb#WpJ5)V z>sIb}`PGqDm{HmKZU4kl&mL4kbEsIaw~tSuVbcX7BI1D%hvvcVm6^hsD{pt8;(g*N zg;ZAGfFXSZzPSn{VF%!1sRj5unOD<}l=r7_`&T<_RiWe>_={^#u6Iy2veg)2;88u$ zso7Nmpnx&-rUh|K7QLTg-f;0m2apPQS5eom`0G`=t)}9QhDt}PJ?m#kdP(m^@@!}) z>Z3*y*WRQz`gT-DmaYXnlM-)Ot~$4)g5KyoX>m-L9#_EO&ghagtA*0=)>-;IJxrT9 zIJ`;XRR>ZEMcECT*6WR-F$`|2JJG{M2Kj?OR5k2xEf$bm@xIvsR`=d&WU8xO)GFB9 z+T@g#J?u|L;!;ulbLVyr4u;9XJrK3!o2#u~`jCm;pP?X*v3vr*o4{Q)ixgyFNKtK< zp3vXh+*G=qNd$vgkL{%R_B|xJO!@;Xf`I^fc%cVVm{vpeI`2+#Icfq) zN`$n~N(|&8MQu}>SVTfXMx1|h+UL(ts_X_0rNeLfP7L!EZ;sUO&vky6$;}|)iMwRf zfXc8qPr(-_;JlDF@OXgN1??+4kkZsp1*ZJ5)+hCHDubkjNo!yW#1 zW)1-@s!L1oI9)P>zx^4IY>y4>&Ma|(LK-oxnTma@9!RQ}TLNS1GkdPp4ZLZ+|2q6J z*bl37InV+$Om~ysjiui0_3`!^4epgbM{f4V$Syz%fK=w{6in|52{aoH;B3?59RoL5 zMuT}@veOSdlDyy6pV8L%VYU+_G=xK`cm0);g9bk1I9Cvm>48LtCSOSxlg!rsj1&pS zD#J!*|N6foq(T5TR6Y!k^_VOM8A8dmX|L9G^_!~|wSDg$#y-im7}Axz-`%x8Zd{I| z-yiY1Ji9y5t{K7}3}9lr3IusBXh6&i?7$mTXk=_CY7Bd;Si4SRd)o;$aLj`As7X6; zqLt_x{-~j$e}-6sE3pSPVUdQz?jt494PYBgmi=oaEZNTnSz?E0&}F@@Ho-Gn4U)1r zT1qNO?D!IY{xqg&c8_7d8BFI>S>ZB);_Y(79C6W#`Kr+Sp_|X1Wi9x@8=Aky{S)VY zm6dU+5^Pe2S#Wt-*@65)Ro$Z&5jWp0RbDISCWel=veoYq5_ppb8ZG<{zjbTHdU9Ep zwIO7AFc*Fkx0S3$*2j)?~QoMN=D!8 zpYta9wrw|b*oWCrF1HR`kI^Z@Tr0@L!z<_z;>5LEUQ7xg0PD3o+*S9kFV-JI{%bU1 zW@`HV@C{iM3 z{gEkc%dhEWbzEGPmaCEwu1G3xC9v&VE*8ufh)Fk<9kAH!>r8SD8JAmk3vXH>P-dv1 zy0uUqkeyc8 zT_!%BwAH{^bX~|lwwNnF56Fo8)w1O@HH&o>=i6Z)N4_*E>>TjffbEW2)E>Y_A;(+N z+bW0xuR{cUW1I$`L^;JVZB~I_P>Z`1^G14XbUePB2~fv7~ajj*s6H8Vna(q*8=eJKV)-fLyle3s8!V;9Ao zT8itB+j4+m>)-`5I-4QB5RfnF=M46142f?J!1!VpHaAl@A%TQgG}WTR!@IIC-M;c1 z7xqObB{9pQ-8$|m;*1L#6%0kBo0AR#fpmt~c%iWOkgN@w$KmIs=hzn6SpLm=pT#*f zI7FnNtjrsaRT%PrC$qC|lLlmDDg#mnw9tUAiwQF#iuH^lBBZ-|M)fD#*}zuTf~m=~ z$(U~sFOUjN0N&SKY=V!yD22~nH?>A|C(pii?j{m<_ioH0fR^9q+dCJj)gwP~S1hc| zslHQ77EUQd^PvTN81EW(bOhFiV8R*dLV!sS(~7z;XS~zhSk!VnvwO$b^2cy+i7^8g zkMd%trYqKsAGPZT^(4#|8WA%q77G^kzW^c9dh$dPi|Gn?7PyVafIyQ~k1QER+v92^ z!~V90Q$Mj;{gdbpuKbTCmiG1~OHh9K-R8s?O7)Tk)s>YMA*c{4$kcn@T0 z>Af|CJvLPIeT%F|3W|ZI$ErQdE;0+uSGJmOb2+Eh9zdra#W-R$AUzL)!=EcFES!LC z^NL@Xo5KT#q$GVaGc!Xi8nGYwiS8R1y_2hJAbnmsK0RWXSc6HgtmK(;#P-yey6Hi; zOT!*OXFy}>rn1&&^wWwFwHSYeP>g?mYD&D|N$19|8$BQgl?L?2+0_q{`Q$+>O zaaXt*pI*{Q^8xA-UgUI&-2)B^#0XOj2Ll*F7kpd*_<(%pMb_~Ore}0?bk?VzCGd%O zpF=5`$YF;|kkZ_g5j;R>3tdH2+xh6QYpqY!B>Fq%UJj3|5fK$#aH{tE*aHU3nz1l3 zNx${qi^vV=sN1Z}uh{>=L{`OSEd3gqu}FK!%p#}?42t5~wKQbPN^TUBGU(-cT1qle z?e(Nki<9$b$Mxv(Y(LN{3U!s=7@W*e~*nirB0C(lUYIFcR$3 zE`dK&jO5hjr4-4@$nMM5pW-fpX~A}(iH{=vnKPXvBD=~LzSgRaYJ=I%L6b;yQ&C^wkeS*Zku?*?shQg@WYaf2wR5ne)_#$Gu&sj0om1o=DDUuO zCz~2Qu!@ErI|#W6$xYRZ6@#Q%5+v#0k={QeiWBL?6T1El(FSr_XnjB06(P2vo%-OG zM5akb%*T&;0YV}LV52iUbB4mL&!E_CVLAOQegk&>6-VS1?w*mfqW7SUlH>MApFlLK z3K01418?Pi;Ujd3ApIst z=R03qz{~aWzRz-R&vQTb?|$B~_fXUpDj}09!W0u6H7;%4x^)TKqelj|#x@VGsMHl! zl4NC%1LCD};J}@G{)d3Qgkzf?2pagS(d^`F0OU@NJNTsCr!nxxpsJ3N6QDv6D=Sj! z@f*2wjEtl=S0J|*f3n1DKnfZi&bL+Xxd6Z@0f&0WRc}22;^7{*U&|Fpa|6mXtm4?v z)C_TK5m;J!<(LEToL>k4sD-l(=?7F*ZGlDrF8PPDk^w@q;&dV4gaXTln433|i)Zuv zfN&O|Z^*UizUAJ0TUdls(!+;OwzA`F>g%FJBlzy%m8VUXx6@d{Dr``5_*35^au-lV-aCx~uJ#MLi)#1f@5U)PbhGSZgmbmRGpQ6RZl@7N6Hv&9lk6ng46x>Yk#J- zQFy&Ykp@K=U*!x9xqbVo$#T5hG0$IXER=C4161#5}FH)V6SMzULH%ScU@6LesxO4Sdrxt`sKbj`QxjI&kV!iN>3og(Gc{^wYaUafG(0b~oD+_f4YTPZs;e?v7?Aihi%Z z{t?CepN$;+Uy9y6F1CnvAJ59R7X3 z#o^5L3tX>acUCWyQyg~vHJI4|NlHktJ&14oYdQXjPK8F!)t46_!hIx=+F9>hH|f}O z85zndB05H928X12l{h}e!eO;*pzz4FqW0~E($ru$cTo~+U^&knKLZ!l+B7~ptCWyu z4xH}0xYmpp{HmH`g}vJ^SFo=3A2qUvJpJy}8mi=2VTVQabvV2e_1(16+NDE5Zti=( zZ&2%>|;*pT}!aq z@DS=X|7V3?p&rXjm~sEsx4#@IAuHo*+%aS7l;BR}*ur}k769hmeP#pE^y4oeF`DVp zw_k}<$ew(+Nt`}*>cBUi>-32{^KFBeQg^@GNEzF9U>$$;rNA#V1m@*m#73kcUo>Ui zPW;Oo2MJi-00^`*N`wViC&_=mv3>p4Oy|>5$=&Pw@RXKZcjswo&0bOhICcRNl6Oxl zv?g&fKhUaE(q;2{10r>b$G^HQ7`Gh=BwyF$`eCxEd)xe}7PuUc!H{{kQR2%9{eeIf z2D>A8n))c+psLen{YFsXtJEzc){KWWwI~(F=!3DON7l;l@nj@ooXYGfl_P`A5pO9v zL^A(2%nXA@>#g``9#4Frz{Il9mrsk!u=lZKer3?-H`W!(Wews`(o-MWGD{4K`N&|; zb{5jMI*tGGN@{+$MIvzY^9fpLNTRUrkn!z_CQzrR3BEyn!HXFJS>KAnZR;gAY%4Se zt#V<;33O7;!8pPN%;NNE>E*m`HpRPqE+;wLL0v6e2}=x}_&hfyRHAl)_Y&t!ok0i^jc8;_0@+=`CHuW> zq2W^@2gn3$)rCRG{L-tEFaa^*LZT^&ej+(a9aXrVX$NXJ=Ll?5$I!(0aiPHhBs_T1 zt`Tkrnd;5A$stKhmX~MKYc%td5d_Sl%(G)D)({=rJgY2%rvAhj3z=Y!>En8vN+CL9 z4&6`{r(yfwvELI}F%xpa1ftbQ4ejZT8vC542cfojvCqd#S!I-c{rR`iIQI58D7iRO zQ$o&&OI~M^8yk28=K4$K@*9Bjj_S!boX{1<^mMq>y0;FB-2T>aGA`?V?Si{; z?ehvJoo)hVlz;`^_5kfnr$XR7wok?y*c(83huZIpq#>qqNYhfa=%vBRrgZk;a&zG@ zcw+|P`gD)tSB(8m=OGZ;UZ7Q8e9**^KGYV@4{-#n9a)>8!e-Z?aKI5lwMg^^&c~u~ zZ$gU0zPW`nl>=xycxb!5IFEZ|6Y(TSs%)wkg|nwxX%0Ci*Xyrlh4Eg;%HAj?t^?6Q4V#?LB#jk8~!We7WHXaUb@c&~dEqWGb8VYSNe;OgN?sgTMl zRPYk#iX7xnYJ4fnbaWz!P2#RB){CQYk9U(Hn4|i!;gxBOcO#OWXcHfBFJ0YdEprF1T{X!{ur3;2y`#_| zFE?9rYd&#NqSm!%5c9|sc!)GIm>8tpNDjeK{OHeZfEeZ#{D zNudodu@-a9=9lgqRb4$IfBgP<|D&H&#jB96H|2aaT0c@TxDj(__P_d11GM z$ob{D8SN@}qH3S~^wsNOM)JpxdJ9^vq!M#}81lIGp?BuePDRDG4mvp&Ic0d<*}csFD)z; z%Ei_bxoq@uS4g@mV-s4O|``t*I_-R;SGj{Da8#8GwdMgruIr)M&o&UiEjVH+;IeXYtLg&Mc4tPBzG>KkK8GC{|1(-KpC zDA};O$okoC6#jGTdjOcI%jyrTPSEELW5Yuc0G^Bnn^}zuo*0#&fj5a)gj|11HLokV zeCs{oa1~VOHc-K90=G&jE~7KTreZoMl!%GX=m?EnyCjewTI>R6tCXQx^2j%P=nM1h z^^R25wX#87ozU(!ao3a6o187WXXgx%GYb!ZZAH~_hu#HN>-Wsgov&u_8-}GUYcLT4 zW|lcdfq?_++EO6Cv3Qg3&2JWOR4_BVei}CtzPcmjDz7-48d~q~(UcSfvLB0TjcC9~Ksnlco9t&zJR7#T4;>WDo2y+dQ0qC|XNP z3)#nsiHX7jyo=r+d~BE>jljh;Fy=a??x*4L?U^wSN#iqf#hPN}k%3dqPT}Lr0D4h^ z1(UFhjvk5u7Az|-uRpEOz&@c7@Z&OuBtO3q zt6NeE<;4qv&=W62yz{H(^oTmo;#i~Kjf;7(b8Eu}xo!XxCGk{g*)U;kuxoi z|18T_F4CG_!MisE7=cncZXsSIU<>L7X{-$6VsEx!k9blMd68=NePluoqXD2-vR{x{ zQ**`Du=YOi#p-aFcdnL3z{f(|OW)HQD_&|4MJLdz@Zg`u;{Ulhe=P)^I?~=3tFW}T Ps%L)A>MZGu=g)r!DBp5+ literal 0 HcmV?d00001 From 69f0d91e17a1ef85e95075593f26fe73ee19823b Mon Sep 17 00:00:00 2001 From: allegroai Date: Mon, 21 Mar 2022 17:14:34 +0200 Subject: [PATCH 19/19] update requirements --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index a37514d..caf94e5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1 +1 @@ -clearml >= 1.1.6 +clearml >= 1.3.1