From 8b77d2cd28a86fdad4c5e6b6e417c81b837e733d Mon Sep 17 00:00:00 2001 From: fawad_nizamani Date: Mon, 13 Dec 2021 20:09:39 +0500 Subject: [PATCH] Added python example of sending request to model deployed on triton engine --- ...6565642f4d4e4953545f64696769742e706e67.png | Bin 0 -> 3225 bytes .../client.py | 88 + .../http_triton.py | 1970 +++++++++++++++++ .../sample_image.webp | Bin 0 -> 27042 bytes 4 files changed, 2058 insertions(+) create mode 100644 examples/clearml_serving_simple_http_inference_request/68747470733a2f2f646174616d61646e6573732e6769746875622e696f2f6173736574732f696d616765732f74665f66696c655f666565642f4d4e4953545f64696769742e706e67.png create mode 100644 examples/clearml_serving_simple_http_inference_request/client.py create mode 100644 examples/clearml_serving_simple_http_inference_request/http_triton.py create mode 100644 examples/clearml_serving_simple_http_inference_request/sample_image.webp diff --git a/examples/clearml_serving_simple_http_inference_request/68747470733a2f2f646174616d61646e6573732e6769746875622e696f2f6173736574732f696d616765732f74665f66696c655f666565642f4d4e4953545f64696769742e706e67.png b/examples/clearml_serving_simple_http_inference_request/68747470733a2f2f646174616d61646e6573732e6769746875622e696f2f6173736574732f696d616765732f74665f66696c655f666565642f4d4e4953545f64696769742e706e67.png new file mode 100644 index 0000000000000000000000000000000000000000..0aafd0be677d719c23726285a03eecbd1937d0bc GIT binary patch literal 3225 zcmb_e3pmv28XuQhOC@Z^j9i*QrDhTuA=k_piQFd47?)imVF+X8URfuL!H{Gpv}TM- zA#$72oG5lMG$S-LDA7)CC%17@oWJ&YcF*oP&$heg{GaFhf6w=R@B9A0|MUHR@9+KA z#o2zVJX9V80&R7`+PQ&1QkEM-P8zV#@l&UONh;dS-WF8Vvr`N#WKY>R*?>TIaunVN zYy#GwpTT-ZgFxHbHilG3#PvYHs7yh7QQRYfD6s)iB#?a+i4-0k96|BL$pIl-wmaC_ zc%1c}5k-dK`ZfL@xq7{PSsbE=Bzwwgx?v9~3GEyM!q%T`1H-bBIEO6>hn<-6YQ|=lv-Wfa-{soc6A1G9gc-)n)Kzl`=ZdbmH{pMz6 z;K&{*bIMeZrYVlkrhISDE`bw&q*cAfg5!VBJTgDg<*817R`Ws<=Mq`c0BP7IS~sz@ zMCRmS?oCzQdqDPZcZV-v_%41|sv#N{@gAx)EuAO9Q9gTx4P-(cAJO<625f7KVAkyN zbV?>jd>JM{NHYl;*habB@V&4jYQ`g;1=E(N9mgTY8e4L3%tH}l<3Sk(7pwjsK|KYDT7 z@YN%6Za)QJJu@jU%mw{0;z~B99t(Z|Cn5xCbH8goG^ho+?Z;W8I=<-l^b?Wm!bR+R zs4ad}3=xe9XjPSdhwxY6lKP_s|fXiy)^X?|c@ZtK>qh7n&-PxrA3)XV$CMTMq9M2I2sT}?V_WB9!E(xCX&V?(8_QIX z{g!fG`=`zO?nI)+V9>%Oj}SJJygGAGU0e-Odv5L5bNzqr))6ed3pi2@-SB^}XgV$U0m^YW${xbC{4%hg9 z3<|#i4gxKv;5EOYx)2EW4lBk!kL>tvg*8#9-K1Ww9BYsPiqQ z_#YlzcS%j2)W`BOZo{0%^#dwD4LiI2&u?%~Y^w()SC}-~uOAajViV z?&+>hPQ3jLKo)k-*Mtdc=b)m*1zs&d=XOiW2XDF$a`y>+uzzBrk2DptksHrc7^PJK zIbxZWX!bue#33|fqW!1Mp1yIUde@|+4}v2;v7(q5Oi0ekWvy_wn1w?De*zE>3vRl^ zI#G%ob~DBmFM5Imn~=Jn;MYek=GO|j}jlSC6Ok!&<6$v?osi0ya44}UWwWt zW3l;7w0mimFjdiVrG`J!lEA5g(p3mY099y~kF-CdeEbiJ-_L4(fcyY48k zv7Ga=q+R^5DzOT^o3y$NrBlohq0%;IXPaMVt1m4s{s7arPDxp>4l9Y9gm7DB<~ZwX zrPTO@1kPO={XqANwu_3wFcEvH15@c)eL0R1ockAG=$=12Ha6x=e_fiAe9^??YRW-3 zI!zWzZq=jn*4l!4ODs5baX=ZJAayDMcd^kyxaWtv=205$vC)!* z7BdP83%d@xnqdcvWT8}w5?m0YcOC;LK1!z)f2^ekffA)0Y%D-${S}w%_4P~1j1J9& z=^9=|X|jHosVpGvrOy_ZmrIP_jioA%)Cd8jZf2Oz-Cr{@kXI5|5XBPg|Zbl_r6rlzJw4akW^q6p#iC?}=V)!x2C1x2M& z7qY5amhljoGhCo6b!zmk59yb|3^$O?Lm8cyF!1Yoem!mQoFlMP)S9S?PUwb4NEBr= zewNi=OW, e.g. + 'localhost:8000'. + + verbose : bool + If True generate verbose output. Default value is False. + concurrency : int + The number of connections to create for this client. + Default value is 1. + connection_timeout : float + The timeout value for the connection. Default value + is 60.0 sec. + network_timeout : float + The timeout value for the network. Default value is + 60.0 sec + max_greenlets : int + Determines the maximum allowed number of worker greenlets + for handling asynchronous inference requests. Default value + is None, which means there will be no restriction on the + number of greenlets created. + ssl : bool + If True, channels the requests to encrypted https scheme. + Some improper settings may cause connection to prematurely + terminate with an unsuccessful handshake. See + `ssl_context_factory` option for using secure default + settings. Default value for this option is False. + ssl_options : dict + Any options supported by `ssl.wrap_socket` specified as + dictionary. The argument is ignored if 'ssl' is specified + False. + ssl_context_factory : SSLContext callable + It must be a callbable that returns a SSLContext. Set to + `gevent.ssl.create_default_context` to use contexts with + secure default settings. This should most likely resolve + connection issues in a secure way. The default value for + this option is None which directly wraps the socket with + the options provided via `ssl_options`. The argument is + ignored if 'ssl' is specified False. + insecure : bool + If True, then does not match the host name with the certificate. + Default value is False. The argument is ignored if 'ssl' is + specified False. + + Raises + ------ + Exception + If unable to create a client. + + """ + + def __init__(self, + url, + verbose=False, + concurrency=1, + connection_timeout=60.0, + network_timeout=60.0, + max_greenlets=None, + ssl=False, + ssl_options=None, + ssl_context_factory=None, + insecure=False): + if url.startswith("http://") or url.startswith("https://"): + raise_error("url should not include the scheme") + scheme = "https://" if ssl else "http://" + self._parsed_url = URL(scheme + url) + self._base_uri = self._parsed_url.request_uri.rstrip('/') + self._client_stub = HTTPClient.from_url( + self._parsed_url, + concurrency=concurrency, + connection_timeout=connection_timeout, + network_timeout=network_timeout, + ssl_options=ssl_options, + ssl_context_factory=ssl_context_factory, + insecure=insecure) + self._pool = gevent.pool.Pool(max_greenlets) + self._verbose = verbose + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.close() + + def __del__(self): + self.close() + + def close(self): + """Close the client. Any future calls to server + will result in an Error. + + """ + self._pool.join() + self._client_stub.close() + + def _get(self, request_uri, headers, query_params): + """Issues the GET request to the server + + Parameters + ---------- + request_uri: str + The request URI to be used in GET request. + headers: dict + Additional HTTP headers to include in the request. + query_params: dict + Optional url query parameters to use in network + transaction. + + Returns + ------- + geventhttpclient.response.HTTPSocketPoolResponse + The response from server. + """ + if self._base_uri is not None: + request_uri = self._base_uri + "/" + request_uri + + if query_params is not None: + request_uri = request_uri + "?" + _get_query_string(query_params) + + if self._verbose: + print("GET {}, headers {}".format(request_uri, headers)) + + if headers is not None: + response = self._client_stub.get(request_uri, headers=headers) + else: + response = self._client_stub.get(request_uri) + + if self._verbose: + print(response) + + return response + + def _post(self, request_uri, request_body, headers, query_params): + """Issues the POST request to the server + + Parameters + ---------- + request_uri: str + The request URI to be used in POST request. + request_body: str + The body of the request + headers: dict + Additional HTTP headers to include in the request. + query_params: dict + Optional url query parameters to use in network + transaction. + + Returns + ------- + geventhttpclient.response.HTTPSocketPoolResponse + The response from server. + """ + if self._base_uri is not None: + request_uri = self._base_uri + "/" + request_uri + + if query_params is not None: + request_uri = request_uri + "?" + _get_query_string(query_params) + + if self._verbose: + print("POST {}, headers {}\n{}".format(request_uri, headers, + request_body)) + + if headers is not None: + response = self._client_stub.post(request_uri=request_uri, + body=request_body, + headers=headers) + else: + response = self._client_stub.post(request_uri=request_uri, + body=request_body) + + if self._verbose: + print(response) + + return response + + def is_server_live(self, headers=None, query_params=None): + """Contact the inference server and get liveness. + + Parameters + ---------- + headers: dict + Optional dictionary specifying additional HTTP + headers to include in the request. + query_params: dict + Optional url query parameters to use in network + transaction. + + Returns + ------- + bool + True if server is live, False if server is not live. + + Raises + ------ + Exception + If unable to get liveness. + + """ + + request_uri = "v2/health/live" + response = self._get(request_uri=request_uri, + headers=headers, + query_params=query_params) + + return response.status_code == 200 + + def is_server_ready(self, headers=None, query_params=None): + """Contact the inference server and get readiness. + + Parameters + ---------- + headers: dict + Optional dictionary specifying additional HTTP + headers to include in the request. + query_params: dict + Optional url query parameters to use in network + transaction. + + Returns + ------- + bool + True if server is ready, False if server is not ready. + + Raises + ------ + Exception + If unable to get readiness. + + """ + request_uri = "v2/health/ready" + response = self._get(request_uri=request_uri, + headers=headers, + query_params=query_params) + + return response.status_code == 200 + + def is_model_ready(self, + model_name, + model_version="", + headers=None, + query_params=None): + """Contact the inference server and get the readiness of specified model. + + Parameters + ---------- + model_name: str + The name of the model to check for readiness. + model_version: str + The version of the model to check for readiness. The default value + is an empty string which means then the server will choose a version + based on the model and internal policy. + headers: dict + Optional dictionary specifying additional HTTP + headers to include in the request. + query_params: dict + Optional url query parameters to use in network + transaction. + + Returns + ------- + bool + True if the model is ready, False if not ready. + + Raises + ------ + Exception + If unable to get model readiness. + + """ + if type(model_version) != str: + raise_error("model version must be a string") + if model_version != "": + request_uri = "v2/models/{}/versions/{}/ready".format( + quote(model_name), model_version) + else: + request_uri = "v2/models/{}/ready".format(quote(model_name)) + + response = self._get(request_uri=request_uri, + headers=headers, + query_params=query_params) + + return response.status_code == 200 + + def get_server_metadata(self, headers=None, query_params=None): + """Contact the inference server and get its metadata. + + Parameters + ---------- + headers: dict + Optional dictionary specifying additional HTTP + headers to include in the request. + query_params: dict + Optional url query parameters to use in network + transaction. + + Returns + ------- + dict + The JSON dict holding the metadata. + + Raises + ------ + InferenceServerException + If unable to get server metadata. + + """ + request_uri = "v2" + response = self._get(request_uri=request_uri, + headers=headers, + query_params=query_params) + _raise_if_error(response) + + content = response.read() + if self._verbose: + print(content) + + return json.loads(content) + + def get_model_metadata(self, + model_name, + model_version="", + headers=None, + query_params=None): + """Contact the inference server and get the metadata for specified model. + + Parameters + ---------- + model_name: str + The name of the model + model_version: str + The version of the model to get metadata. The default value + is an empty string which means then the server will choose + a version based on the model and internal policy. + headers: dict + Optional dictionary specifying additional + HTTP headers to include in the request + query_params: dict + Optional url query parameters to use in network + transaction + + Returns + ------- + dict + The JSON dict holding the metadata. + + Raises + ------ + InferenceServerException + If unable to get model metadata. + + """ + if type(model_version) != str: + raise_error("model version must be a string") + if model_version != "": + request_uri = "v2/models/{}/versions/{}".format( + quote(model_name), model_version) + else: + request_uri = "v2/models/{}".format(quote(model_name)) + + response = self._get(request_uri=request_uri, + headers=headers, + query_params=query_params) + _raise_if_error(response) + + content = response.read() + if self._verbose: + print(content) + + return json.loads(content) + + def get_model_config(self, + model_name, + model_version="", + headers=None, + query_params=None): + """Contact the inference server and get the configuration for specified model. + + Parameters + ---------- + model_name: str + The name of the model + model_version: str + The version of the model to get configuration. The default value + is an empty string which means then the server will choose + a version based on the model and internal policy. + headers: dict + Optional dictionary specifying additional + HTTP headers to include in the request + query_params: dict + Optional url query parameters to use in network + transaction + + Returns + ------- + dict + The JSON dict holding the model config. + + Raises + ------ + InferenceServerException + If unable to get model configuration. + + """ + if model_version != "": + request_uri = "v2/models/{}/versions/{}/config".format( + quote(model_name), model_version) + else: + request_uri = "v2/models/{}/config".format(quote(model_name)) + + response = self._get(request_uri=request_uri, + headers=headers, + query_params=query_params) + _raise_if_error(response) + + content = response.read() + if self._verbose: + print(content) + + return json.loads(content) + + def get_model_repository_index(self, headers=None, query_params=None): + """Get the index of model repository contents + + Parameters + ---------- + headers: dict + Optional dictionary specifying additional + HTTP headers to include in the request + query_params: dict + Optional url query parameters to use in network + transaction + + Returns + ------- + dict + The JSON dict holding the model repository index. + + Raises + ------ + InferenceServerException + If unable to get the repository index. + + """ + request_uri = "v2/repository/index" + response = self._post(request_uri=request_uri, + request_body="", + headers=headers, + query_params=query_params) + _raise_if_error(response) + + content = response.read() + if self._verbose: + print(content) + + return json.loads(content) + + def load_model(self, model_name, headers=None, query_params=None): + """Request the inference server to load or reload specified model. + + Parameters + ---------- + model_name : str + The name of the model to be loaded. + headers: dict + Optional dictionary specifying additional + HTTP headers to include in the request + query_params: dict + Optional url query parameters to use in network + transaction + + Raises + ------ + InferenceServerException + If unable to load the model. + + """ + request_uri = "v2/repository/models/{}/load".format(quote(model_name)) + response = self._post(request_uri=request_uri, + request_body="", + headers=headers, + query_params=query_params) + _raise_if_error(response) + if self._verbose: + print("Loaded model '{}'".format(model_name)) + + def unload_model(self, + model_name, + headers=None, + query_params=None, + unload_dependents=False): + """Request the inference server to unload specified model. + + Parameters + ---------- + model_name : str + The name of the model to be unloaded. + headers: dict + Optional dictionary specifying additional + HTTP headers to include in the request + query_params: dict + Optional url query parameters to use in network + transaction + unload_dependents : bool + Whether the dependents of the model should also be unloaded. + + Raises + ------ + InferenceServerException + If unable to unload the model. + + """ + request_uri = "v2/repository/models/{}/unload".format(quote(model_name)) + unload_request = { + "parameters": { + "unload_dependents": unload_dependents + } + } + response = self._post(request_uri=request_uri, + request_body=json.dumps(unload_request), + headers=headers, + query_params=query_params) + _raise_if_error(response) + if self._verbose: + print("Loaded model '{}'".format(model_name)) + + def get_inference_statistics(self, + model_name="", + model_version="", + headers=None, + query_params=None): + """Get the inference statistics for the specified model name and + version. + + Parameters + ---------- + model_name : str + The name of the model to get statistics. The default value is + an empty string, which means statistics of all models will + be returned. + model_version: str + The version of the model to get inference statistics. The + default value is an empty string which means then the server + will return the statistics of all available model versions. + headers: dict + Optional dictionary specifying additional HTTP + headers to include in the request. + query_params: dict + Optional url query parameters to use in network + transaction + + Returns + ------- + dict + The JSON dict holding the model inference statistics. + + Raises + ------ + InferenceServerException + If unable to get the model inference statistics. + + """ + + if model_name != "": + if type(model_version) != str: + raise_error("model version must be a string") + if model_version != "": + request_uri = "v2/models/{}/versions/{}/stats".format( + quote(model_name), model_version) + else: + request_uri = "v2/models/{}/stats".format(quote(model_name)) + else: + request_uri = "v2/models/stats" + + response = self._get(request_uri=request_uri, + headers=headers, + query_params=query_params) + _raise_if_error(response) + + content = response.read() + if self._verbose: + print(content) + + return json.loads(content) + + def get_system_shared_memory_status(self, + region_name="", + headers=None, + query_params=None): + """Request system shared memory status from the server. + + Parameters + ---------- + region_name : str + The name of the region to query status. The default + value is an empty string, which means that the status + of all active system shared memory will be returned. + headers: dict + Optional dictionary specifying additional HTTP + headers to include in the request + query_params: dict + Optional url query parameters to use in network + transaction + + Returns + ------- + dict + The JSON dict holding system shared memory status. + + Raises + ------ + InferenceServerException + If unable to get the status of specified shared memory. + + """ + if region_name != "": + request_uri = "v2/systemsharedmemory/region/{}/status".format( + quote(region_name)) + else: + request_uri = "v2/systemsharedmemory/status" + + response = self._get(request_uri=request_uri, + headers=headers, + query_params=query_params) + _raise_if_error(response) + + content = response.read() + if self._verbose: + print(content) + + return json.loads(content) + + def register_system_shared_memory(self, + name, + key, + byte_size, + offset=0, + headers=None, + query_params=None): + """Request the server to register a system shared memory with the + following specification. + + Parameters + ---------- + name : str + The name of the region to register. + key : str + The key of the underlying memory object that contains the + system shared memory region. + byte_size : int + The size of the system shared memory region, in bytes. + offset : int + Offset, in bytes, within the underlying memory object to + the start of the system shared memory region. The default + value is zero. + headers: dict + Optional dictionary specifying additional + HTTP headers to include in the request + query_params: dict + Optional url query parameters to use in network + transaction + + Raises + ------ + InferenceServerException + If unable to register the specified system shared memory. + + """ + request_uri = "v2/systemsharedmemory/region/{}/register".format( + quote(name)) + + register_request = { + 'key': key, + 'offset': offset, + 'byte_size': byte_size + } + request_body = json.dumps(register_request) + + response = self._post(request_uri=request_uri, + request_body=request_body, + headers=headers, + query_params=query_params) + _raise_if_error(response) + if self._verbose: + print("Registered system shared memory with name '{}'".format(name)) + + def unregister_system_shared_memory(self, + name="", + headers=None, + query_params=None): + """Request the server to unregister a system shared memory with the + specified name. + + Parameters + ---------- + name : str + The name of the region to unregister. The default value is empty + string which means all the system shared memory regions will be + unregistered. + headers: dict + Optional dictionary specifying additional + HTTP headers to include in the request + query_params: dict + Optional url query parameters to use in network + transaction + + Raises + ------ + InferenceServerException + If unable to unregister the specified system shared memory region. + + """ + if name != "": + request_uri = "v2/systemsharedmemory/region/{}/unregister".format( + quote(name)) + else: + request_uri = "v2/systemsharedmemory/unregister" + + response = self._post(request_uri=request_uri, + request_body="", + headers=headers, + query_params=query_params) + _raise_if_error(response) + if self._verbose: + if name != "": + print("Unregistered system shared memory with name '{}'".format( + name)) + else: + print("Unregistered all system shared memory regions") + + def get_cuda_shared_memory_status(self, + region_name="", + headers=None, + query_params=None): + """Request cuda shared memory status from the server. + + Parameters + ---------- + region_name : str + The name of the region to query status. The default + value is an empty string, which means that the status + of all active cuda shared memory will be returned. + headers: dict + Optional dictionary specifying additional + HTTP headers to include in the request + query_params: dict + Optional url query parameters to use in network + transaction + + Returns + ------- + dict + The JSON dict holding cuda shared memory status. + + Raises + ------ + InferenceServerException + If unable to get the status of specified shared memory. + + """ + if region_name != "": + request_uri = "v2/cudasharedmemory/region/{}/status".format( + quote(region_name)) + else: + request_uri = "v2/cudasharedmemory/status" + + response = self._get(request_uri=request_uri, + headers=headers, + query_params=query_params) + _raise_if_error(response) + + content = response.read() + if self._verbose: + print(content) + + return json.loads(content) + + def register_cuda_shared_memory(self, + name, + raw_handle, + device_id, + byte_size, + headers=None, + query_params=None): + """Request the server to register a system shared memory with the + following specification. + + Parameters + ---------- + name : str + The name of the region to register. + raw_handle : bytes + The raw serialized cudaIPC handle in base64 encoding. + device_id : int + The GPU device ID on which the cudaIPC handle was created. + byte_size : int + The size of the cuda shared memory region, in bytes. + headers: dict + Optional dictionary specifying additional + HTTP headers to include in the request + query_params: dict + Optional url query parameters to use in network + transaction + + Raises + ------ + InferenceServerException + If unable to register the specified cuda shared memory. + + """ + request_uri = "v2/cudasharedmemory/region/{}/register".format( + quote(name)) + + register_request = { + 'raw_handle': { + 'b64': raw_handle + }, + 'device_id': device_id, + 'byte_size': byte_size + } + request_body = json.dumps(register_request) + + response = self._post(request_uri=request_uri, + request_body=request_body, + headers=headers, + query_params=query_params) + _raise_if_error(response) + if self._verbose: + print("Registered cuda shared memory with name '{}'".format(name)) + + def unregister_cuda_shared_memory(self, + name="", + headers=None, + query_params=None): + """Request the server to unregister a cuda shared memory with the + specified name. + + Parameters + ---------- + name : str + The name of the region to unregister. The default value is empty + string which means all the cuda shared memory regions will be + unregistered. + headers: dict + Optional dictionary specifying additional + HTTP headers to include in the request + query_params: dict + Optional url query parameters to use in network + transaction + + Raises + ------ + InferenceServerException + If unable to unregister the specified cuda shared memory region. + + """ + if name != "": + request_uri = "v2/cudasharedmemory/region/{}/unregister".format( + quote(name)) + else: + request_uri = "v2/cudasharedmemory/unregister" + + response = self._post(request_uri=request_uri, + request_body="", + headers=headers, + query_params=query_params) + _raise_if_error(response) + if self._verbose: + if name != "": + print("Unregistered cuda shared memory with name '{}'".format( + name)) + else: + print("Unregistered all cuda shared memory regions") + + @staticmethod + def generate_request_body(inputs, + outputs=None, + request_id="", + sequence_id=0, + sequence_start=False, + sequence_end=False, + priority=0, + timeout=None): + """Generate a request body for inference using the supplied 'inputs' + requesting the outputs specified by 'outputs'. + + Parameters + ---------- + inputs : list + A list of InferInput objects, each describing data for a input + tensor required by the model. + outputs : list + A list of InferRequestedOutput objects, each describing how the output + data must be returned. If not specified all outputs produced + by the model will be returned using default settings. + request_id: str + Optional identifier for the request. If specified will be returned + in the response. Default value is an empty string which means no + request_id will be used. + sequence_id : int or str + The unique identifier for the sequence being represented by the + object. A value of 0 or "" means that the request does not + belong to a sequence. Default is 0. + sequence_start: bool + Indicates whether the request being added marks the start of the + sequence. Default value is False. This argument is ignored if + 'sequence_id' is 0. + sequence_end: bool + Indicates whether the request being added marks the end of the + sequence. Default value is False. This argument is ignored if + 'sequence_id' is 0. + priority : int + Indicates the priority of the request. Priority value zero + indicates that the default priority level should be used + (i.e. same behavior as not specifying the priority parameter). + Lower value priorities indicate higher priority levels. Thus + the highest priority level is indicated by setting the parameter + to 1, the next highest is 2, etc. If not provided, the server + will handle the request using default setting for the model. + timeout : int + The timeout value for the request, in microseconds. If the request + cannot be completed within the time the server can take a + model-specific action such as terminating the request. If not + provided, the server will handle the request using default setting + for the model. + + Returns + ------- + Bytes + The request body of the inference. + Int + The byte size of the inference request header in the request body. + Returns None if the whole request body constitutes the request header. + + + Raises + ------ + InferenceServerException + If server fails to perform inference. + """ + return _get_inference_request(inputs=inputs, + request_id=request_id, + outputs=outputs, + sequence_id=sequence_id, + sequence_start=sequence_start, + sequence_end=sequence_end, + priority=priority, + timeout=timeout) + + @staticmethod + def parse_response_body(response_body, + verbose=False, + header_length=None, + content_encoding=None): + """Generate a InferResult object from the given 'response_body' + + Parameters + ---------- + response_body : bytes + The inference response from the server + verbose : bool + If True generate verbose output. Default value is False. + header_length : int + The length of the inference header if the header does not occupy + the whole response body. Default value is None. + content_encoding : string + The encoding of the response body if it is compressed. + Default value is None. + + Returns + ------- + InferResult + The InferResult object generated from the response body + """ + return InferResult.from_response_body(response_body, verbose, + header_length, content_encoding) + + def infer(self, + model_name, + inputs, + model_version="", + outputs=None, + request_id="", + sequence_id=0, + sequence_start=False, + sequence_end=False, + priority=0, + timeout=None, + headers=None, + query_params=None, + request_compression_algorithm=None, + response_compression_algorithm=None): + """Run synchronous inference using the supplied 'inputs' requesting + the outputs specified by 'outputs'. + + Parameters + ---------- + model_name: str + The name of the model to run inference. + inputs : list + A list of InferInput objects, each describing data for a input + tensor required by the model. + model_version: str + The version of the model to run inference. The default value + is an empty string which means then the server will choose + a version based on the model and internal policy. + outputs : list + A list of InferRequestedOutput objects, each describing how the output + data must be returned. If not specified all outputs produced + by the model will be returned using default settings. + request_id: str + Optional identifier for the request. If specified will be returned + in the response. Default value is an empty string which means no + request_id will be used. + sequence_id : int + The unique identifier for the sequence being represented by the + object. Default value is 0 which means that the request does not + belong to a sequence. + sequence_start: bool + Indicates whether the request being added marks the start of the + sequence. Default value is False. This argument is ignored if + 'sequence_id' is 0. + sequence_end: bool + Indicates whether the request being added marks the end of the + sequence. Default value is False. This argument is ignored if + 'sequence_id' is 0. + priority : int + Indicates the priority of the request. Priority value zero + indicates that the default priority level should be used + (i.e. same behavior as not specifying the priority parameter). + Lower value priorities indicate higher priority levels. Thus + the highest priority level is indicated by setting the parameter + to 1, the next highest is 2, etc. If not provided, the server + will handle the request using default setting for the model. + timeout : int + The timeout value for the request, in microseconds. If the request + cannot be completed within the time the server can take a + model-specific action such as terminating the request. If not + provided, the server will handle the request using default setting + for the model. + headers: dict + Optional dictionary specifying additional HTTP + headers to include in the request. + query_params: dict + Optional url query parameters to use in network + transaction. + request_compression_algorithm : str + Optional HTTP compression algorithm to use for the request body on client side. + Currently supports "deflate", "gzip" and None. By default, no + compression is used. + response_compression_algorithm : str + Optional HTTP compression algorithm to request for the response body. + Note that the response may not be compressed if the server does not + support the specified algorithm. Currently supports "deflate", + "gzip" and None. By default, no compression is requested. + + Returns + ------- + InferResult + The object holding the result of the inference. + + Raises + ------ + InferenceServerException + If server fails to perform inference. + """ + + request_body, json_size = _get_inference_request( + inputs=inputs, + request_id=request_id, + outputs=outputs, + sequence_id=sequence_id, + sequence_start=sequence_start, + sequence_end=sequence_end, + priority=priority, + timeout=timeout) + + if request_compression_algorithm == "gzip": + if headers is None: + headers = {} + headers["Content-Encoding"] = "gzip" + request_body = gzip.compress(request_body) + elif request_compression_algorithm == 'deflate': + if headers is None: + headers = {} + headers["Content-Encoding"] = "deflate" + # "Content-Encoding: deflate" actually means compressing in zlib structure + # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding + request_body = zlib.compress(request_body) + + if response_compression_algorithm == "gzip": + if headers is None: + headers = {} + headers["Accept-Encoding"] = "gzip" + elif response_compression_algorithm == 'deflate': + if headers is None: + headers = {} + headers["Accept-Encoding"] = "deflate" + + if json_size is not None: + if headers is None: + headers = {} + headers["Inference-Header-Content-Length"] = json_size + + if type(model_version) != str: + raise_error("model version must be a string") + if model_version != "": + request_uri = "v2/models/{}/versions/{}/infer".format( + quote(model_name), model_version) + else: + request_uri = "v2/models/{}/infer".format(quote(model_name)) + + response = self._post(request_uri=request_uri, + request_body=request_body, + headers=headers, + query_params=query_params) + _raise_if_error(response) + + return InferResult(response, self._verbose) + + def async_infer(self, + model_name, + inputs, + model_version="", + outputs=None, + request_id="", + sequence_id=0, + sequence_start=False, + sequence_end=False, + priority=0, + timeout=None, + headers=None, + query_params=None, + request_compression_algorithm=None, + response_compression_algorithm=None): + """Run asynchronous inference using the supplied 'inputs' requesting + the outputs specified by 'outputs'. Even though this call is + non-blocking, however, the actual number of concurrent requests to + the server will be limited by the 'concurrency' parameter specified + while creating this client. In other words, if the inflight + async_infer exceeds the specified 'concurrency', the delivery of + the exceeding request(s) to server will be blocked till the slot is + made available by retrieving the results of previously issued requests. + + Parameters + ---------- + model_name: str + The name of the model to run inference. + inputs : list + A list of InferInput objects, each describing data for a input + tensor required by the model. + model_version: str + The version of the model to run inference. The default value + is an empty string which means then the server will choose + a version based on the model and internal policy. + outputs : list + A list of InferRequestedOutput objects, each describing how the output + data must be returned. If not specified all outputs produced + by the model will be returned using default settings. + request_id: str + Optional identifier for the request. If specified will be returned + in the response. Default value is 'None' which means no request_id + will be used. + sequence_id : int + The unique identifier for the sequence being represented by the + object. Default value is 0 which means that the request does not + belong to a sequence. + sequence_start: bool + Indicates whether the request being added marks the start of the + sequence. Default value is False. This argument is ignored if + 'sequence_id' is 0. + sequence_end: bool + Indicates whether the request being added marks the end of the + sequence. Default value is False. This argument is ignored if + 'sequence_id' is 0. + priority : int + Indicates the priority of the request. Priority value zero + indicates that the default priority level should be used + (i.e. same behavior as not specifying the priority parameter). + Lower value priorities indicate higher priority levels. Thus + the highest priority level is indicated by setting the parameter + to 1, the next highest is 2, etc. If not provided, the server + will handle the request using default setting for the model. + timeout : int + The timeout value for the request, in microseconds. If the request + cannot be completed within the time the server can take a + model-specific action such as terminating the request. If not + provided, the server will handle the request using default setting + for the model. + headers: dict + Optional dictionary specifying additional HTTP + headers to include in the request + query_params: dict + Optional url query parameters to use in network + transaction. + request_compression_algorithm : str + Optional HTTP compression algorithm to use for the request body on client side. + Currently supports "deflate", "gzip" and None. By default, no + compression is used. + response_compression_algorithm : str + Optional HTTP compression algorithm to request for the response body. + Note that the response may not be compressed if the server does not + support the specified algorithm. Currently supports "deflate", + "gzip" and None. By default, no compression is requested. + + Returns + ------- + InferAsyncRequest object + The handle to the asynchronous inference request. + + Raises + ------ + InferenceServerException + If server fails to issue inference. + """ + + def wrapped_post(request_uri, request_body, headers, query_params): + return self._post(request_uri, request_body, headers, query_params) + + request_body, json_size = _get_inference_request( + inputs=inputs, + request_id=request_id, + outputs=outputs, + sequence_id=sequence_id, + sequence_start=sequence_start, + sequence_end=sequence_end, + priority=priority, + timeout=timeout) + + if request_compression_algorithm == "gzip": + if headers is None: + headers = {} + headers["Content-Encoding"] = "gzip" + request_body = gzip.compress(request_body) + elif request_compression_algorithm == 'deflate': + if headers is None: + headers = {} + headers["Content-Encoding"] = "deflate" + # "Content-Encoding: deflate" actually means compressing in zlib structure + # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding + request_body = zlib.compress(request_body) + + if response_compression_algorithm == "gzip": + if headers is None: + headers = {} + headers["Accept-Encoding"] = "gzip" + elif response_compression_algorithm == 'deflate': + if headers is None: + headers = {} + headers["Accept-Encoding"] = "deflate" + + if json_size is not None: + if headers is None: + headers = {} + headers["Inference-Header-Content-Length"] = json_size + + if type(model_version) != str: + raise_error("model version must be a string") + if model_version != "": + request_uri = "v2/models/{}/versions/{}/infer".format( + quote(model_name), model_version) + else: + request_uri = "v2/models/{}/infer".format(quote(model_name)) + + g = self._pool.apply_async( + wrapped_post, (request_uri, request_body, headers, query_params)) + + # Schedule the greenlet to run in this loop iteration + g.start() + + # Relinquish control to greenlet loop. Using non-zero + # value to ensure the control is transferred to the + # event loop. + gevent.sleep(0.01) + + if self._verbose: + verbose_message = "Sent request" + if request_id != "": + verbose_message = verbose_message + " '{}'".format(request_id) + print(verbose_message) + + return InferAsyncRequest(g, self._verbose) + + +class InferAsyncRequest: + """An object of InferAsyncRequest class is used to describe + a handle to an ongoing asynchronous inference request. + + Parameters + ---------- + greenlet : gevent.Greenlet + The greenlet object which will provide the results. + For further details about greenlets refer + http://www.gevent.org/api/gevent.greenlet.html. + + verbose : bool + If True generate verbose output. Default value is False. + """ + + def __init__(self, greenlet, verbose=False): + self._greenlet = greenlet + self._verbose = verbose + + def get_result(self, block=True, timeout=None): + """Get the results of the associated asynchronous inference. + Parameters + ---------- + block : bool + If block is True, the function will wait till the + corresponding response is received from the server. + Default value is True. + timeout : int + The maximum wait time for the function. This setting is + ignored if the block is set False. Default is None, + which means the function will block indefinitely till + the corresponding response is received. + + Returns + ------- + InferResult + The object holding the result of the async inference. + + Raises + ------ + InferenceServerException + If server fails to perform inference or failed to respond + within specified timeout. + """ + + try: + response = self._greenlet.get(block=block, timeout=timeout) + except gevent.Timeout as e: + raise_error("failed to obtain inference response") + + _raise_if_error(response) + return InferResult(response, self._verbose) + + +class InferInput: + """An object of InferInput class is used to describe + input tensor for an inference request. + + Parameters + ---------- + name : str + The name of input whose data will be described by this object + shape : list + The shape of the associated input. + datatype : str + The datatype of the associated input. + """ + + def __init__(self, name, shape, datatype): + self._name = name + self._shape = shape + self._datatype = datatype + self._parameters = {} + self._data = None + self._raw_data = None + + def name(self): + """Get the name of input associated with this object. + + Returns + ------- + str + The name of input + """ + return self._name + + def datatype(self): + """Get the datatype of input associated with this object. + + Returns + ------- + str + The datatype of input + """ + return self._datatype + + def shape(self): + """Get the shape of input associated with this object. + + Returns + ------- + list + The shape of input + """ + return self._shape + + def set_shape(self, shape): + """Set the shape of input. + + Parameters + ---------- + shape : list + The shape of the associated input. + """ + self._shape = shape + + def set_data_from_numpy(self, input_tensor, binary_data=True): + """Set the tensor data from the specified numpy array for + input associated with this object. + + Parameters + ---------- + input_tensor : numpy array + The tensor data in numpy array format + binary_data : bool + Indicates whether to set data for the input in binary format + or explicit tensor within JSON. The default value is True, + which means the data will be delivered as binary data in the + HTTP body after the JSON object. + + Raises + ------ + InferenceServerException + If failed to set data for the tensor. + """ + if not isinstance(input_tensor, (np.ndarray,)): + raise_error("input_tensor must be a numpy array") + dtype = np_to_triton_dtype(input_tensor.dtype) + if self._datatype != dtype: + raise_error( + "got unexpected datatype {} from numpy array, expected {}". + format(dtype, self._datatype)) + valid_shape = True + if len(self._shape) != len(input_tensor.shape): + valid_shape = False + else: + for i in range(len(self._shape)): + if self._shape[i] != input_tensor.shape[i]: + valid_shape = False + if not valid_shape: + raise_error( + "got unexpected numpy array shape [{}], expected [{}]".format( + str(input_tensor.shape)[1:-1], + str(self._shape)[1:-1])) + + self._parameters.pop('shared_memory_region', None) + self._parameters.pop('shared_memory_byte_size', None) + self._parameters.pop('shared_memory_offset', None) + + if not binary_data: + self._parameters.pop('binary_data_size', None) + self._raw_data = None + if self._datatype == "BYTES": + self._data = [] + try: + if input_tensor.size > 0: + for obj in np.nditer(input_tensor, + flags=["refs_ok"], + order='C'): + # We need to convert the object to string using utf-8, + # if we want to use the binary_data=False. JSON requires + # the input to be a UTF-8 string. + if input_tensor.dtype == np.object_: + if type(obj.item()) == bytes: + self._data.append( + str(obj.item(), encoding='utf-8')) + else: + self._data.append(str(obj.item())) + else: + self._data.append( + str(obj.item(), encoding='utf-8')) + except UnicodeDecodeError: + raise_error( + f'Failed to encode "{obj.item()}" using UTF-8. Please use binary_data=True, if' + ' you want to pass a byte array.') + else: + self._data = [val.item() for val in input_tensor.flatten()] + else: + self._data = None + if self._datatype == "BYTES": + serialized_output = serialize_byte_tensor(input_tensor) + if serialized_output.size > 0: + self._raw_data = serialized_output.item() + else: + self._raw_data = b'' + else: + self._raw_data = input_tensor.tobytes() + self._parameters['binary_data_size'] = len(self._raw_data) + + def set_shared_memory(self, region_name, byte_size, offset=0): + """Set the tensor data from the specified shared memory region. + + Parameters + ---------- + region_name : str + The name of the shared memory region holding tensor data. + byte_size : int + The size of the shared memory region holding tensor data. + offset : int + The offset, in bytes, into the region where the data for + the tensor starts. The default value is 0. + + """ + self._data = None + self._raw_data = None + self._parameters.pop('binary_data_size', None) + + self._parameters['shared_memory_region'] = region_name + self._parameters['shared_memory_byte_size'] = byte_size + if offset != 0: + self._parameters['shared_memory_offset'].int64_param = offset + + def _get_binary_data(self): + """Returns the raw binary data if available + + Returns + ------- + bytes + The raw data for the input tensor + """ + return self._raw_data + + def _get_tensor(self): + """Retrieve the underlying input as json dict. + + Returns + ------- + dict + The underlying tensor specification as dict + """ + tensor = { + 'name': self._name, + 'shape': self._shape, + 'datatype': self._datatype + } + if self._parameters: + tensor['parameters'] = self._parameters + + if self._parameters.get('shared_memory_region') is None and \ + self._raw_data is None: + if self._data is not None: + tensor['data'] = self._data + return tensor + + +class InferRequestedOutput: + """An object of InferRequestedOutput class is used to describe a + requested output tensor for an inference request. + + Parameters + ---------- + name : str + The name of output tensor to associate with this object. + binary_data : bool + Indicates whether to return result data for the output in + binary format or explicit tensor within JSON. The default + value is True, which means the data will be delivered as + binary data in the HTTP body after JSON object. This field + will be unset if shared memory is set for the output. + class_count : int + The number of classifications to be requested. The default + value is 0 which means the classification results are not + requested. + """ + + def __init__(self, name, binary_data=True, class_count=0): + self._name = name + self._parameters = {} + if class_count != 0: + self._parameters['classification'] = class_count + self._binary = binary_data + self._parameters['binary_data'] = binary_data + + def name(self): + """Get the name of output associated with this object. + + Returns + ------- + str + The name of output + """ + return self._name + + def set_shared_memory(self, region_name, byte_size, offset=0): + """Marks the output to return the inference result in + specified shared memory region. + + Parameters + ---------- + region_name : str + The name of the shared memory region to hold tensor data. + byte_size : int + The size of the shared memory region to hold tensor data. + offset : int + The offset, in bytes, into the region where the data for + the tensor starts. The default value is 0. + + """ + if 'classification' in self._parameters: + raise_error("shared memory can't be set on classification output") + if self._binary: + self._parameters['binary_data'] = False + + self._parameters['shared_memory_region'] = region_name + self._parameters['shared_memory_byte_size'] = byte_size + if offset != 0: + self._parameters['shared_memory_offset'] = offset + + def unset_shared_memory(self): + """Clears the shared memory option set by the last call to + InferRequestedOutput.set_shared_memory(). After call to this + function requested output will no longer be returned in a + shared memory region. + """ + + self._parameters['binary_data'] = self._binary + self._parameters.pop('shared_memory_region', None) + self._parameters.pop('shared_memory_byte_size', None) + self._parameters.pop('shared_memory_offset', None) + + def _get_tensor(self): + """Retrieve the underlying input as json dict. + + Returns + ------- + dict + The underlying tensor as a dict + """ + tensor = {'name': self._name} + if self._parameters: + tensor['parameters'] = self._parameters + return tensor + + +class InferResult: + """An object of InferResult class holds the response of + an inference request and provide methods to retrieve + inference results. + + Parameters + ---------- + response : geventhttpclient.response.HTTPSocketPoolResponse + The inference response from the server + verbose : bool + If True generate verbose output. Default value is False. + """ + + def __init__(self, response, verbose): + header_length = response.get('Inference-Header-Content-Length') + + # Internal class that simulate the interface of 'response' + class DecompressedResponse: + + def __init__(self, decompressed_data): + self.decompressed_data_ = decompressed_data + self.offset_ = 0 + + def read(self, length=-1): + if length == -1: + return self.decompressed_data_[self.offset_:] + else: + prev_offset = self.offset_ + self.offset_ += length + return self.decompressed_data_[prev_offset:self.offset_] + + content_encoding = response.get('Content-Encoding') + if content_encoding is not None: + if content_encoding == "gzip": + response = DecompressedResponse(gzip.decompress( + response.read())) + elif content_encoding == 'deflate': + response = DecompressedResponse(zlib.decompress( + response.read())) + if header_length is None: + content = response.read() + if verbose: + print(content) + try: + self._result = json.loads(content) + except UnicodeDecodeError as e: + raise_error( + f'Failed to encode using UTF-8. Please use binary_data=True, if' + f' you want to pass a byte array. UnicodeError: {e}') + else: + header_length = int(header_length) + content = response.read(length=header_length) + if verbose: + print(content) + self._result = json.loads(content) + + # Maps the output name to the index in buffer for quick retrieval + self._output_name_to_buffer_map = {} + # Read the remaining data off the response body. + self._buffer = response.read() + buffer_index = 0 + for output in self._result['outputs']: + parameters = output.get("parameters") + if parameters is not None: + this_data_size = parameters.get("binary_data_size") + if this_data_size is not None: + self._output_name_to_buffer_map[ + output['name']] = buffer_index + buffer_index = buffer_index + this_data_size + + @classmethod + def from_response_body(cls, + response_body, + verbose=False, + header_length=None, + content_encoding=None): + """A class method to construct InferResult object + from a given 'response_body'. + + Parameters + ---------- + response_body : bytes + The inference response from the server + verbose : bool + If True generate verbose output. Default value is False. + header_length : int + The length of the inference header if the header does not occupy + the whole response body. Default value is None. + content_encoding : string + The encoding of the response body if it is compressed. + Default value is None. + + Returns + ------- + InferResult + The InferResult object generated from the response body + """ + + # Internal class that simulate the interface of 'response' + class Response: + + def __init__(self, response_body, header_length, content_encoding): + self.response_body_ = response_body + self.offset_ = 0 + self.parameters_ = { + 'Inference-Header-Content-Length': header_length, + 'Content-Encoding': content_encoding + } + + def get(self, key): + return self.parameters_.get(key) + + def read(self, length=-1): + if length == -1: + return self.response_body_[self.offset_:] + else: + prev_offset = self.offset_ + self.offset_ += length + return self.response_body_[prev_offset:self.offset_] + + return cls(Response(response_body, header_length, content_encoding), + verbose) + + def as_numpy(self, name): + """Get the tensor data for output associated with this object + in numpy format + + Parameters + ---------- + name : str + The name of the output tensor whose result is to be retrieved. + + Returns + ------- + numpy array + The numpy array containing the response data for the tensor or + None if the data for specified tensor name is not found. + """ + if self._result.get('outputs') is not None: + for output in self._result['outputs']: + if output['name'] == name: + datatype = output['datatype'] + has_binary_data = False + parameters = output.get("parameters") + if parameters is not None: + this_data_size = parameters.get("binary_data_size") + if this_data_size is not None: + has_binary_data = True + if this_data_size != 0: + start_index = self._output_name_to_buffer_map[ + name] + end_index = start_index + this_data_size + if datatype == 'BYTES': + # String results contain a 4-byte string length + # followed by the actual string characters. Hence, + # need to decode the raw bytes to convert into + # array elements. + np_array = deserialize_bytes_tensor( + self._buffer[start_index:end_index]) + else: + np_array = np.frombuffer( + self._buffer[start_index:end_index], + dtype=triton_to_np_dtype(datatype)) + else: + np_array = np.empty(0) + if not has_binary_data: + np_array = np.array(output['data'], + dtype=triton_to_np_dtype(datatype)) + np_array = np_array.reshape(output['shape']) + return np_array + return None + + def get_output(self, name): + """Retrieves the output tensor corresponding to the named ouput. + + Parameters + ---------- + name : str + The name of the tensor for which Output is to be + retrieved. + + Returns + ------- + Dict + If an output tensor with specified name is present in + the infer resonse then returns it as a json dict, + otherwise returns None. + """ + for output in self._result['outputs']: + if output['name'] == name: + return output + + return None + + def get_response(self): + """Retrieves the complete response + + Returns + ------- + dict + The underlying response dict. + """ + return self._result \ No newline at end of file diff --git a/examples/clearml_serving_simple_http_inference_request/sample_image.webp b/examples/clearml_serving_simple_http_inference_request/sample_image.webp new file mode 100644 index 0000000000000000000000000000000000000000..9258c91271eca741cd12b56ed7db4c9a8f2c790d GIT binary patch literal 27042 zcmd2?Q*$Luw9Sca+qP}nwr$(CZ9AEmCz{yF#I~);B)MPRzi?mfQ+HMG>h6cNyLPX& zwyKPzWUdVeh?az?vZgY(eBys|ro}NAKvQv-135>B|I9Prm;+f&%ZXX#9*rZOOacQD z+n3`v7zpoKd3w5Pc8+Q+GU#2uYV6M-a;R$TTVeh$o+?$?yZ%48XZiWRu*o#WOtw2x z$m)JtsjlsR znP5`MWo%7nMAcaY(&qpx23x)J7Tp2(N#n}{Nz*5cL`G)zTTpLXJ0soG;~$QB>okDVsM0@8QM(&?hle4K^fmKIO zW-yB5HLQ1A1RdpO{Xqo&nro&tJqxx6M{Wisn}w%G%KCPe&81zanz_TGaN$Q>u0j5p zKOQfnQrw9B^8#3QO3wP)I>^PB9uaB5Vm8|)Y29%Gb$qo^7fqQlpmfD;3rocVUCm@D zD!Gp8rR7j4uIB-_n}?fpd9t#>Irr)A%xKhzC@+?fbg^_Kf5PSc1Cc@h(yIu|%2J`2 zq8}!kE^Pc6TEi@|An3ceRU6Mq?ZHsjpD5|up4r#ldf0>oPay(+#at#c70TDf)|)OY zER{cLyN6N*i+<`LoA^kqTbYTd$+-~?;;b*5e`m+bhnr9>O(`zVEvcV;@Uc?3;;N${ ztCQk90BECHUHRnPURcK;V+p{HMKq&%Y>s$KC5EdD0M%IBg>aUTksvhLupmpwOZtUY z#OLnq>{x|f^Ht9zI4i{G8Mx)82d!CIYl9(n z(p60!K;R^$a34^&ykwl(x5zUm?dz;crE+1l9|a0?$EE=*TlCn(MTL>YY>U7z&7IiVJM@(=a_=n<`sMLN| zMprH@@jgjlswA$q04?kXTzVIC>(yL+LIy=s)6^^$DLz`KmDwU-IYiLDxOBHsXK$sM z2jnS1jO#>wqnzPE$#ACOFJX>7i>VU3VeJ!j(*hZ$zX{WXHzQi?>v7poo#K6lP@ zL3p(G-u+U4q=4}*lUk8EM5`OY{|_7u6$jFsd4quj{7mH7i(dK?2t(J6S6>UOOUcUk zF>U-|cnKh=5E20NG-KM5-c(sBFwNO(swecK0(BdpFtJW#2F;>Hd-uGetS7VGblMj= zD2%d&YhCl>ue3HQx39~Rh6)0S#rj&nYK0BBEo_y0Lhl>Jt>)V&G*B!uh$L7RY|5B; zVUu=H`R5zIwRQXJLqt=`5)RJZ*>9Z0W$N6?x~QPs$|v#Ta4L2I254F#qWt`Ijf=OS zhD|%ago1<6RhXHhHWxtbH|-4@_4~swAcR6(^4Mc`uX3en`Xw=|4)}QkyP(!(zjw|l zInaR+r00R;BH8}{itmF`4=b|v?cRrw32I~uka`l8QOB3t&Vc>Ceaot|3h`>P)DvzDL2 zmJo5>wP#@EW{y?8nzFo0Q!cF101N9Ys6C^}qPgixA%S&CMOA}2vrnORGI!aTtyX;K z{yRtdQ|%mU?t|k8zjNMgz0XC%%((&nwdqL?JAPv2kAB(%eQNEYlvr|hqRRGtCkE?> zke%ZF6I_c^n)~cWs9xPjrj>AmTisG|sv1yO08#_~yR?K;GrOPF5xqIeKb+c7QKL}~ zh$fN$9{Mh8P_3Si;xo0`z}D2z>=)*M;i@mRQmdylOy!1yp#DSG!lE&D0@b?p0>Y#T_E`s%)4 zJQREd<@M=tUvp+xO7wB|bFSqjH>I#iT?$bX^s&FGT5uBp2Z}>3KIAF_K@)C83_?W-3-_S^9^MX`Nl!$@CZ!7!#ihafde3nWb5+4rf&N|mdlT!0bz5M= zs$t(~8w99yZjGAx#|SU@SG6ENA3;c!mwGSy+0apZIUY#B@4XJ(X-Pn;;)Ue(F$?uE zt1RsiQzNNfe7X0B^FKqQ7T1Z*`Q!H5t{FWR_a?H2!P~^2r?gvYnJJ zRAaN`&)kHImO*1cY6~x|rdIiY+h92RpPl`*;hIxWO^>00>9Xi6neU$6!6;)gOm$k5 z-Lq3MFzL!SQJ1ylNKGEg)t;ZL&XK8zC5aTqd87@X&JFT*YQjkkSIx$nY`vlA$6+qV zxQ-26U$1f{Cy??*Ta^Fm=#h@@meWuH8X4p+#6hzpHij34WCvU;cR;hwPF>J~RX_&5 zR)CV4AoJ1xq|~ln1M*Ka*~zsxtCY5AUQ0EYn1AxDZE0{FJNfIO=y z$?3UC9CmxTKLs%jxSr}O>A|h2tnuSp3>(WEV<*$+gv6}g8({f1S6&mmGhZ^hNTrh+ zi>Hy_f&A2H35_E$vsBl??slCzye6Ydw`wN)yV>*--n#MI1U`#sx=kMEkB{!ZDe+u(+^9(B`B>&G@bOS1a&?ZwP+`1S!r;G7>aVC}^WxcIMBq9#$qwsM6eD9Y6+C zT!#x&K3xk^_+dfw@LZ@4I#jxjNtbwwYa@S(-b7ZjiJKjRCX~74lVJG5n}NH+*2uczZ25KDGQkT3v61+NWrEh8sqDrI8nt zF0@kk0gl4!4Fm};1y(I%+87ZHDjL)t0G}@UP0zqiJ*k_>A-uZIQySO9W+`o5sj|aI zU2Y_{=mQ#$mGIrKPgBDdvQPMBp6ZHq(KI&UGXdA5KbSH<(*u+#R=A7?;%7s6E71F`kmcf@Lb61YH7vz?h=a2H1XJNp}#0D zSOZIw>i zO#q<@pQ49(Agka4^|dSMD*@WliIAUXnMld>P7Anfu$i|ynJm$HBXptp9l=WZ-p(Zp z9Z5zq@ochq({=n>aC>kzes+|V%JX8*Q*OIP(V5Nn=f!pWZ&%Q6zaM=M#FrwHd%mDz z)pAsgz&{z%sgtiR&^<&s=S6||%ll|IeANhv@k>7a6Bjssx~4$SpX#T9t%tWD4Nb!G zXBA=bK3}FcCO*vV!8e#~?fsgP@0=R|+AUbfAlh0x>Fn)7 z!}|he_s)IK_DfC4OWT=e1J6Z5S(_gZ)>rERxsnIp`kGAix$7kx}7H&2zzG}&u+yFOGibS>oqLPvhWNx_6 zY!c4;W>CK#w_}UyTX1&JF$9)l_G-^Byl1R`DdRH=7?@j4|5x%dWU)Ri2eS+qzOj?sR65yhu3ky%&EQ-bUb^%*b zy}BwrL{3SOu0g>^pv?dt=#0Ov6x@i95I+!X%MV(PfypL;r;P9g+4L<=hy8npZsg`iWEjoB02+5Bg`w^ytmtr{a#>Ok5#)3* z7On*n^yhdJcEzAv;vX2EhNlv`MoQC+0t;B31>};=0j1(RRR{=)+c*NMXsoOn-8NbU zEVK*Kmp%AXs)G>sG%ZdH0phs{^4W<6PZHlmDfcuTd$O13Pw`Xb9pi0_pj&WsY5)y% zHF#F}pL>nl%%kGsq`KgTCim2Ldi=?T6(OR7B9B$D!wqvAIx}NSW#dskq+P*YIEWCA zRTYbYeeRvS9$Djil9w__!Pq}TSK5F}4y)2krCw@G*5TBg(}tgbYu9znUNt0XSWcDu z_I<45VAJw%TJ6`1f__D)h)&k%h@1$sB7qRlkF&B;b?}l))fG0~6ImoCVq7+HLZx6^ z-S0d$XQgGZCXEIk;Z~dYOw#L0SAE+2w+}Y?dDiZ`4rP8PqD5=Xz)+jgl45dEIYomU zfu@pRdt7*rKPW8oVSBj%pHjkKMKcqE#4vT#@On?Jps#5k50GE&>?yiRo0*Y7+)5u5Yo4&giC&G;6FJ+3X5=Xh*y<<-fzTy4#8GF2n*89 zfj%8|J*b}NLl`|RQl$Bm{gOV9mJu`yk%6I37xl3Ji7I)jL^0zh&(+GXrsqsUpN(F; zqWGM9l#1sI_p8@c1-=m8F&0pIC@e&t673sZEzI3Wwsc_MzNIs770{Defo*Y`Nz6Gdm5)LSaGUEnl5rlxJ$t#g5l?yTkw;;wW_6sW05 zeiE7_xK_+DauF*K61+UFe?f3hLmQQQNu%RyMaLyJfnP2cZv+A%O6%8F;iivZfdz;27`0Nav`I0uo%?w}+@+v)i zIZ>ej0nXkCI?te!W59nH_{&YX$*@Mxt1PSOY}}->kZTeD8)n-}fs;2t%owPsu{T78 z3>ZUtHt{~dvB~C20XRP{)GGFt_Orsqj*_EhoCv4Q{*kK?-2~m?7yZ5HdT~aDq`w6T zdBmd6pD-yf9N@;}JYE+uB+`X~*ty+ad(3`(5W-&V2{M#F4ggUP+KuK)PzMw0C(^J8 zmCaZCB7i?krw#uy?=v+gRGUlPt?xwg9G4a1CL29>vIQ^z34NYj$Nudr_?glxKj)BWnAndZ#(P!e!Vuyi+ zUMIjU&726nP~4Bu-n&44h&Gmq=_BTzN~8+rDFVacqJD2lbLIF=M=MU4aHuTpbtG;| zb37{1J%Xmb(_TxA#fKkV)#s$8D8FfkSmLU{7{_6O9{kS?fT>JK1Kbsk3h|C|VJfoI zD$*{Pd-E;3O}+Y4&TGq7eVS{y$orlfm+yLaF+|k_^{&4e9<{5V*9uQ`s>!ULThT3aA5cp@4v2$e}RWSNon-9K|Z8R$O z;Vy8n8|bgW4f&~k9avl?V3p0mfw=^z(YaVja1T_$LL-ia>98r(c8}nghXHd{FfkcV4!K?gW+MNY;e0>U#xKYOvx)Bk6=pq&|9w@;ElVL@9=_kgZ(FsCT+#b z>|rWW;I!S!&80#D5H9;C3VX9Hu(Fw>w?9%GtCsn=h=vKdihaTaLtjZ4r9Zhh{H+4Z z4|aqms}##POchToZj=V)f@M*=&_GcUNEz81-nT4d0;1o}Nh>H;`b?TUN76o9Y$six zRN3Cx*Be>i$yocS)z>G@g_FQeC9T$N7;P1&(?B{wcRWOV2BP+NQ7jhIMR7Sag~=-} zS-UhnfVjSiafD&vF%?{Hgi3~epK|A=RY%s2?J`T0ZDn(~w>}_gLeVklQ2-ecr3V#( zHpTL3FX2#=J9F53NLKXNY0LTXVa#5!?D-dq*;7Ayf1-B`4vvs3v|B;v{)Yv4csj!O z#mA(F32aFJ5h;YY41A1@f*;5VU7Dqn$pEH|)!Wa*k%B=V>?akMs;~@X@A)x6`ZQ_0m_c$( zmq~%vxCZyx&ibPQH}~C@CD|EaOj73C{XzL33Lrm zVa3HJ6o3ro8ckHB$CiOx%4>pV!nv;6&hoyS5E^|$+tNmg4=KSN;X$^@VxV%*#t8y3`C*^v-$J65WUhglz z&`{prUO`0(hHJC9?*9gbNq*pej}OI$j9g%EA3=5gU;x-K-pO;$oY4%)8t}9qhOI29 zwGu*D16b^Xm-q{Xi)MgOu>7_Y?>di8Pala#Ak0ZzO;)D)05rm=~12hEG#daXJK-I>yX9MG}P>>yO zwp37jL#h>dK}{xjRn7F@*`g*oe_znNP{|Q64mcoEki}cG(*8IfY`3CEy&?_!=2boZD`eo ztpb;K#pts=lXERG3h~#_!A{UQWae7pl4>kZX1%_4FT-Lgeew%@)3?S&NHji4492RQ zjiwtqax6wd<}f76n1l-nqi>xr@zSS?3=2YPk0ZkFm*(|rvTh^8vJyTRmDTGH?~gVZ z#TtmEpb;MZ`!^XfW&X^Zz)n}djIvhpoIl*OsVquV5suY8PhR9sS#(I=>?^B zZ^Tyq6BBO&R$eBqtvWA3?FC7V?aWV%ZN3C~(rWn+q}#&CJZX#>HP%Hg${XO5JSmh2 z7f<))FXpYNgwBuhVldFARufr+1EpH+RYM&8N8csYD9!&kObX=3I@u-8UmNl} z0-hRz+nY(~C^(-X1WiGhM+UKD5q_-W;qXOEElx-bjbx`Jl+G)*)`r@#Y8fN%)FP1J z@b&S0KLw<|@xVy3N$1LI{FkI?h@cl#Wg>-sP@t7Wg_B1a+=FJBayS{dWw4=5omX!b z^Nu3PJ*_Y?2A3`;Ju3eplc+Hr>>-t|S?kU^rT4-B!=t(zZgc344N!$0zmSeKN~#6v z_->x=7gBpL9G{7!K)G9E)eFD+q~HO4;|;q|XRZ7%q4#Bn3dzhoR3U}W_lA0?!b>*Q z>AujLcP*9FRxOA#CJ95mx(ly%(hj?8&m}S{)1QL3RNwi)I5~(H_gKflIsh03E^aa) z_nrt~3^!Uzl!n;lXpB8TWQ?F7imJMG|4vxLUnzgMt0lx2nzI=$Ya)J`zj=i*s*Q+Y zJ3NF$_BDE)*&q2rC_eQ>%^98}=I@LD_x(X_2f3S7P!mg58uTVv$0)`iN5_0gl6g8Q z$QJ{({U5ja4Z!~Af^Vbu{84F1UQ9qFH0juW*HfgrX61t3=BSP6bj{vKrtI5^OU8qJ z#dexkT(bZ9&#{QGwd7eAGuYh*_UGGA0r5)fb1^~pSu0norvNpZ{eH% zmk3c5I%{zQ1|T^`$@mO2_`9L*iZ#!9R54L4_99d{Cz2snAl`wq|8^iNuzcCtbh5p+#Yu?NTTCpHRFO_kEsq2q zRJ=X|VN5Jd8gBh?Oe_%>vogV9rRu|*uLn`E8cLVpLS8|ZdbIIzs`7FPAL}-s5R4nPgi4H3;VYBHex{k0L8VdJ zZ@OTWOWjd7LM~zb>gWaM3;&u^ zd=U1s*(^((qPXSiFl&lkT#;2Z{aaCgTRe>87Yi0ljUg*wYEMpzO-aeuZ&tsQlTEop z_nM}2zajIwI_USAaeJc&eA@T&dK=O5X|M%nB}Gm=k8!Dyu?r^)-Yxx#wE1RO=VDKB z>RzSn)%9A4(JurG==5Z2<=ZQwHGhp>ZB^LJNm0T1+K`FxjO4H}H<`toaNeFHJ$p(* zrFXtY@+jVxne=8hWmjEx%Aw639k95*R!V{@Jog)x2KgGyVVpZ_ZXzld8_;KRk6Rk3 zSFqwfiNO-Hwb<_)OP}j)q`saS)+h~$-v<6>C8d=@Et|zxlcRur3Zg4;2BaOvZ=-m}65Q7?;&uvp`_=3t@!+6nV-hZ1r`3DwUt7tc$xPuo z1lFm0Xpj&j)?ZEzHmUczl&ohiq4I^G?*!oJNd<*}uy`|17^?`@@&8x~y$H7OR zP$~jc9D2sq{yoxfmQ6`&ETy_|M}vW;p4!cS6?kuCwX%`tdT3bjQqeHMziO~NR7C)M zO%CaQDHScHGM%SQ1d z3i*O!P138#3|^0m`p^K=^#_XLd&w~<|2eTcMEz9=rA0rEAg3U@+^tzCFl(J=XQ1+N z-)lTMXl$6g+v;jqSAv{5=k{Qk*|lX`Kk27y($KiguG$O@j9LEyvf~jwi@j8hME6pg z#%^^WjGHgp@3HV>4v8zLR4&V#+Y~kYx9giWAm)%H`=n;iHg^0!U8AlrT^e0{wCleI z>4TlS|LJ#-Wp?bgh|Q)Pc8#hv+?;l4b5LNlbMGZP+$?5=pb( zn_D_;XF9w}Dt{$63A@c)*c;mmrTAj^4lLRBz_r|KnoG|?<;@WApn(*OATNLVZLheE z{Sn|wz8?i5X-?j>vgBR4t8k1*otXVE>9Oa_+<%a}fx1G1Y?TALrh(u;@q%`4I%*1S zhjLTtH^*qHdzQF7BFNQinvYxUL2P3QZ`6(Kl2;P)L48~dS%`9~YlT3jL8*BACPSn*h61J=y=nA%Q1TEx$$5;J}>}D&iRP8)>bku!)B2wu_ z-sPGt5tKkD|6<#`iF*`XItpyoCnGMDioMt?rTTRyY+o9j5INpCrEQij+4sRprjoq_ zbs6|wreRDgN-e5n*|y&!=aBtaS0cKAh&dxpHbMc^hQ;U*Z&yR(YnwNShP!RmrG9@G z1}^2mKuZPseZ&TMJIgsz2gh1ct2bE_WB^XudmJ1M}b@SzE;;@GW&Ngv}s~@2y zqQ8`q)JEMpowh<|1}I_w`>vDMoKmg4FxYX*E@^BQvStngloN-}5n1JYBM7&f2P*@Z zQ7i2U{^j9AAgg7LhzoIIOi#@X3Fc9mV4Yp;`1hk?>wQPJZc)MiYX!7bP$G0$N;5?^ z{oBD?tW1+RApFlAvE|%!Fiza`7InlqfVpGndEiNPpX{E(b=BX~aQ9gUdY!bBOLlvO z0d7u>8=H92x|1}$rbT%NO4Co@?NeFmE8cRSne2m42b;wa1#|g+pQk$<74xzrzSPyGz!`j8 z*$>yMU@becu~@-GF>JwsiaO)XN`QPNOjBY3 zZT@gQG6|!r^4+knLXN0z?WZ(Pb+_-uf*=kKjDad}M%^aQnSA+m{F)mezhal(-SZb2 zM&pSRvP}h=ZaWPSS(pxQknzuz)@Z*5)Wc=fBeV)!@01Z^}J{hWEE3ITr&Oxcjw4~*O!ZJ8k%K5M| z46n#lCA24WuQBnMXHOA4D)QuLA`Cyw(tU88;vbZiKn}&uRD@koG2uqu5Gt0PN@xlK zaZOMPkz$|WMJ^ElPpW}`PO!E|42U3z5|!{W&kj%4T!xJ((G0ZkC6xEPC)%&4QD|DM zU&;ec!^2J)CSnO~*@yZhy%W;R!>%?(x6GYozN(GkYJ>M{0yu(s%h(qcdm=!wBX_LE z5HA}%Tqx|Lg<*-cP+w;2s@wSG8Yxqez8VEx5qotc@T}dO-+%hg1`7q6(Y?m(VC&uOf0%02eY z!t=gF_BSaWW<+<^1=1hRFY8Ndg|+zQql+sIKbjsC`R*Ev>b7 z)|gS(737V~x_GS?wq>E<07UioN9sO*O&Ff~yTlx1`)a^O5)RjUe}zp%3n*JuwGM$L zW8p}J1E@1g69gH)tlG5>c$VL+w1oZR2!Kn`x)8+w`cUI6a79~#EoECj4otdi@EgBk zTs${E4d$HhVK|1MmN5%?jca+Iz5lV|&(QRs?GJWAT{cWgGecG+tg#8XC;2`JKDNcO zC0j!%km+SrWRMDe9)g}z#S(|tX%vlrDczp9i(Q?Y(REtur~5ElSZV3W%e37g?>4qQ zbv)j0F3n=Kg8aBiV>OL8uzW#g$e_eGd;q))f}_=FBOzx>X1HQ4FpKF<^2Bd8Bdqtj zlV%nEpyK;9h{yx}gsDu?Wh--g7jwsRP;)y!evirSz7f6!lI1r9D%yW`)3wx*nPZ}4FgQcBp(AP*NMknvL8`!`|azxSy^BIp?)uE@k7f6RDZ z<&c^F5rNApGHdl|&RCC*!6a-{o`@?xg5x?m!WRR%eOC!^+tCM+dgAz(24)@z9lUNp z4?Si+drfrNZDpe#qU|Y@KRh?bS!@s!6e8wOJHpI@d!57B`>nHvo+!=B@Fk9^8Ce;3ovL+|ybqHcsSPesB&-B6k6KoGZ; ze$U?#`5EWFSTi%8^zTg=mC(lc6p>Y2yeJkRM~??L_kf&wFN`X6HX0{-7XdUKf*sBC zG57=PHu+o34!aN(&1Rh&GG)>FG&#5e<5u5N>M=_&qA~$Kwa@O|kv6WkT3(Gs_ z8l>f_Wpa8qT+*Y!Xw86L?dw21qtL!a+Wq#h5AzdLtkCaVlEn4SH9VzbJ)pI*kKaQ( zrC0>jADD%M&U=p;8oZZUjXclPO>taY7S!ZeLb!S|&B!_A0o5(I=Y!s7Y@dVf{FwuE z)u@R2JImi8{z3Z>*f&e(GjXn4HUt|5adui5U9C05Z3I0MoL8tE>tE)nRyLX#L9(W? zXXn1Q;l$~nAw*Q`cQ9q5YPqPCJa&7ugJD3q`sJthNDwXuXUV6sa1{=#pzFvzJp-j9 zXlDi3aJ)7Rc+67nN-uow4)MUC`iV!@aIX4G}vFWVj0GBJ6SzX0FxN z7*@2D6gVFr(-KAAehfP}_7O7lEN^g(`&x)#_Ng99*B=@tp6N2*%xBC>o0@7H?pNU5 zqlF{@&`JhLN(7?+K|}1aV=LqyHzX{bczawwkT+k_Q#uW|v(XQ^f2>3X{`R}>3Ya$X zIeGkSaT3U?&$o})CV+e$>eYk1o0e_#A~ty+Ah>o5R?jt7j0V~=aUd-ZT6&~ENdAH| z*vVV54BDh&vDNsAac`dZQ|k^E4V9dEl3_3GrVk}F7Q(^xt6l$^E=WWyHVR2w`(Ww7 zlkz-jtv1p-t4tEsxG%ZAr`*{B_wmuD_`mepINJ54xGeRdxi=IRUL6>k=iNaFvcfhF zIPRCco@u1Lkr76KT1hUJ=?TEPY{H@Vhsmyw$7-lv_@%pQCMVCf%;#8e?8KvqTr7ZD z91uFs6;Nh4Ukwr3+5$&L?>bs6B0nG2ik3+Qk{xZ(aBnp|p{L~w;`4C#S0ddp;PvgV ztFHUB;181YX~*$v&-tI9L@$*%bl9%^a?^05_X={7p1Az~x*QTd&t8hw)`G}&N!@1I zE`#p_a~~~X8IjyjhE1ZtdH=BaT935%0o%mL$XCD)o^Rzs6Du($`{$XDL4XKQ&@nBg ztYR3vxHos+LX#xH>WBew@#Leaz8`l8`^Mj2tn|jry}y=*y4?rckm2@sqvBgYwj;+l zliQscOq@N6%SSGufRNC4jmW?;?Tn5382T0JkWlXfqak5H2Heqk1O^p$CBj53;PaJE z8PkLmx}qBH?bf~Fi3~U}WNM7z9$RP=aTgy8+>P0Cb`f8f2B8nuN;l1Y z-j^StKT@QQgNePR66>-i)fMsHP{|!?tHfVhA{g$m&?Z*vhE#+-r&lsPlDc8+?U5cHIvBeD^T2Q$w*I0P8>kD69&&1_le zR`vwXunUwQX1GQiBwRT)M^fW9QV9<&e2&veU6VvXR!i8ll04kjdl^mEN*kpnUSwO8 zVJA2f`Ul{$0Ue-&8Hf^;exbJau=8CDBDu5HH?g=%?EkM4(xasC{HnX@x=B@(%z{R6N^C3>)OJ*BpL%x|reNG~~K{$J; zY^ueaW(O*jvs~>)wCgu^9QYoweZ(63-enVLG(>fqY)TZceoii6x-q1sqvvJn*_YKE zyO-&u;#3Mq!};Gi3*k@^{UKYnuP>T}YP$~xL4Ft}hMesF8{fKpvB1teW+py%H;w2j zA_Tv$6Yrvh<3`i7ex0O-|G{WKJ362I+1hLA2fL@RXkDxo%yJvU)y2}FV)=}qp9JgM z8!~qe65QSnCbf;$gb&%bTr#4$z}(jcUx#O{!0*xr+MQiakr3ELWw_C#uI)~f}V2v`@iX?Y9s-XBIF#OGw!@@qc57eTLM^=O=|-i)5p^3yy0%)uV9(%z$zXu2ROKRaRKJ#RHcV^B#7u@b zl9ohZ`Q|KX<{#N6>cZOP!`cZ~W1z0MoIQAzujj#$!x zi3$j~Sui(ot#=lpJ&6$16HzwzW!yI1R#;m2&bY-vjNl&QPuz#!2#SPA&W4qxEG+>4}v;@03KYD%7r%T7$~CgnY9f*7gz1HOu< zibbS-mu~Y3c2rD!+&)|OdwOTp2*Xk{!F`eUua%Us<#<$MSW65$hf$Lhc*$ga|Jsn1Cw>>tnnQ)(E8d54d%vZLmP&_emqh`*?0*LAn#dSA zh6?+FTlb*O)!P`VBqd=vk3D=i7N&5MdrTN#bYdFn>*A|@g}Cm_$#baSrBYY17v(U3 zL4lnDx+4YwW60-$*;`=b8L`as{8WQ6oo7+UXfwuLWo1nJoa|rK)fp?Q4BjY6OJJ-dIb|I%vHyNdbvW$b#>G90YN>GfnO#sHj7Cdgal0EN@Dj$95W;c141M2P9NP9} zpOf2DjSf?wtugG_Oc1{Xmg|W`i-7{tmGZw#MdEhQPz5N3_S%*t8EFyxPYIkO(Y$T` z{_f1uNufM_mdMS7W7kynVfsx&gU5qO>o~;wzQHgi%9-QLBb^Q8RZ3Z;IXnC^FV^J7 zOw~OLZ=J7Z+){$9i<4On1MuZE*|aNr1a1A}E2OW0Z}kqZ$q)Q?}s z9m}0Yp}{wkJN#)YcD{->7Y}xlA|!>vOAF_@)c_#7H1zTf(qKhff_Lb+3uEK#IFrSy zadAVaA0J01bVUo|TtB9K5SUCX$0aw`>5Fc18n-TI{2oxBCo8j3_7?;nw(#GQhMIi2 zCql4LLO9RNK=o(VLl?y&am`JJ_NY)QmDK3MH7|dE$@8tMDbW!KJJ&zz+HcY7zaaLMO~feHLNK!3 znmm^4X^CS26Zw1>yzfc&gl=^0*gl=ui0_{NZta&L1c zeyqv}B`t87x@10a-+0;^f6sw?%(d;5{zxSB$>!^hqx=3%aYX%Bt|>tS8UoDW#RP6OezI2%H#iM-Q`xpTke`8c1l zY(|jKz_GFu6~C2oF* zg7dxu20-aPGoE`+A0D|4FQKcC^i}fyg@}C&jvIg)%Gr4Z!G#y2RDctX`v9W#SrwY1 zU?0Hymklnf8^aH;3lB26IvFnmbDpN4qh)vRdX{ZXtS&Hc#_P5Ps5nHtK5i^bqNDQg zP^?Yiw)>GsLnoMFrP;d0{7;OWL*3vD>1v07`EWj~zP-hGnMO^q-N1?Mh6bU1Ae1+ISF7YLTcjM7rCJz(4qzPgs-=UE`@8E7Y8Ls(wpq)O;b zqfd{{8&l;cvtn{ae3%B2)ik`orU+Q(JW2GhXj^#(IX> zKx$O#8seK7Zg7vT98ced zO9fFIKdJ2iVLhRoyPlZB>%M-sS}sEm@CX(79p+JmW9!9a_xhFWWH-t&O-K8hULx|* zL)bSvJpNH(;&wNtgUr06luOg~Y;9MI+9;C!_WxX-Nnbhssw*`l3K%BI6i*nV`o;TBH`0E!0OmyuZH_c^OxL=?z5U*~PiW-x{v)C|A1y4^3 z0k!#HOX;EzYlCye{AUSzsQ1@ODn9{>4WFJE$&qez{s#O|p8d6MSZ(a$&0l2$?HU*= zufJYrVs=zH(?2bDn!c6_)-mB2GS(uY9bFC4Wb}Vctqq1DTK(Vz z6C#SkmY;AEH9L!lC-c=_vo>E;;$nl{<zxKZ$t6<1!OlXzEJpqNY2X%v z%_8!u-DNuoQHpD6Xg#(0!3)?($v+!FM963*D65G;HR1`Tv9`T8a|~O3Gc)iSHgQg!yn`+N~8nr?@ND{93) zY*Oy%3ZjY>S2g|9YvyE)+@=*)`0j#@zKFB2&+wxp(wgHSgJ{5g0C#+n8j(<)-kOKA|bj zgSXxDq6lH}sY>q0KDygP+N&21c2?_sU0+lYTQ?Pyej^spazLj4kBo-@FO62#w)q!L zrx7r*wee@s?bni<{}J*1REgyG3h0@5Qq%FN{EmUBMHZo_o_Q#BIF z<8;@j>Fei^*Phiv#pmr9JDqqnp5J~%Sy zomVy~@yG{6P>n2ikhHLj#uyZ9bf%wnlo1vA@2DOKg=KZEsxx?Td>aiZZQf#)E{7j% z&&Sl9y-*Nfn`np)h4Vn*iYDDzc0ZY*9OUg+CNiI`a~(YIm$LEFG3xeZ_tNp{Ief|! z^vy~=N^mEQ%7XWkNt{Ab=&M&Yxl@$5N6mgR;w{+3TeDz3=(C6JCxg+lBHQQtKcsr$rR_=eN8Pq1TvthNAv zmZcH3_ynD`Q+E`V5=2&e6KSI8%_k)F**13{0JAz3&52LJ3p99Ih(eMQLk*>7y88g2 zU^@0g0+p7T&ZkRjP@|^QSicjmXNJZABmD(=hncfyQGV@B7j^rg0H8G^7zB7XSPO69 z6Lf|K2`%&|5m1`JQqwR{<}hxr8=&~d7CY*-s>tC0yW!=l08Hv9)0QUc&S;8^RNuGJ zMFH#ge2j}{6u7?08Jg%WzQ~h-p*NJtwrpzNUgoku{qAhf{2A?xVi->f@g~06?;v1? z1ZaxKI65a(wRl2mnqHTNpUfw2*#qGr&CI2Xc9cN+lkvh~05M}qo2TaeRJ{q+#qRmp zNw7~oPB{6Q`XLS9UPe#xunBu++Gs~54XJ+_-Uov*r{UYD8z_ACPj?Cv-73Ktb|rkm zZc!&(&33CbrE9{@Px1hdq<}xM+7Y1k4mbj&D>~iuI|y(-Q_Z<&3NcrMVI&3wU}EP} zmZYP7R@`-|6d4R13Qa87oqA!(4;-55&uEvN2HKPn?t}?$ry2R~KO5>#uTIYb(_Ab+ z=^*Rg-TV(2D0EH$^EZf;cAdCr}_6 ziEi89@pf2%U@FH)gjmU8C5V3QX%x6xzpP9Rc3#KK*Sm4U`ZV$S9|R~Mg$4)+DG5|=VPZJG?g4CIKR0Ul;RV0w~m@&vOx zSXnGcdmezHMpIhieliC;?_&yTUv^+JX-}~Z-Y%uFaZznrAlTu6l@;X?{Ak*E)-*W; zu-&R5K~$MO2xc1RP1FYH;t=_Q5pXc2L@(BV<~&eS&rHkCLu6M$kd7 zW>Lz@_f2E9jtF<5@NHaJBow7R88F)w!Dt0)lM(uR!j$bNqdD|hP;G>PloriLRBCOn z1v0avg~M|R?rlZdH-g4k0TB+iwDoKnEy!6NUeO{Wu54=2X+S{~5xW?E{+=++InBKC zXIl_hS&3~mX&o2LbiI`htuT^Qd0M{(FBFmop%d8Dl^-QVu+f64YB2g?Ds}C84AE&J z@y4sQQdLaaHtI~Dg@;1OrXmhjD5@G7ExZw}K$-7LNx}{O^=P0c;Tf2p~gGT7&P}A_J^xJjt zA%aw_Pl3>hg0v9Qa`$CFIG-^}U?ddSwhLlFpwM?~;PHJMbxyiP^7=M#k86XUofKrk zbS6g$T5)7-m3^M?#LMztJ&o5!1}}?lmnFAt(d~1|?bo7P&pd|tHPi_bre+v5=tC)m z5)1bW@CJqEe9XV~%d~YW)1X1A1&T*>;_*=z*Kgg(=o+Lm%Hu!UEmZy2GrCcyX^j{> zVmMv~KC%pYuX|+T0k5DDAGzG-o0cpxUiCpAeHTWV(MQhNc-j6?;C)Z3kekb+Tu9#-1$(WBSF$4cd9z6RY-rtU-^jXI59)szZ1XN@~#u1-Z9 zK3sdPYPbIEB+Cu#C-ae(6?@hr4uyOqKXAjD(~$ud4ARh_(^N<04VEmMsud%%CcZ_d zX((#@Xv}Fo6$!{kY=ATQwyZY&bCgyZMPk%$`PiG&6u9WbSw31b=%@iJq7S$r-jLHe zr9Zn@*sb)llVZh`Wf;U69X-p#cO&SzA=%>WQQ=*)^0nx;ExBD5-G2WAF~;3*icKWp zWbBsU*Le+2ui?w%RvrH=x!Ip4Y4|!w@VTMLU@9~bENf55N&+c-m*H+ za4)3680r2ht#?g$pc4%nt8#_EYR=w)n87LYHBN9d^HNmI43E{6=~aK)I@4*>lt}p1 zG)7)zQ9-uFo9g-;&@;i01`<>DhSg^6h8UP)Oe^$tPw*pxvu=Z|%*%eBdN*A+?W{$E z&f0@H{!JO~1M~VZySCCeA&5qH)?Q1@#>`L*$hE2=t+3KM!IoV->#hiZ?X_tRL$`22 zymE{Uy8WpP#Q4&B1B4~^m-^3QgsVwq(GVzLYrA!Bky}&mp0| zqT%C3&Ajdh{e}r3*F1`M9tb`C1j70J1N+%YKq#EH07g?yzwPS!^-FIgR40D@6vEf} z2ln$#^)Nv8{k+=URMADn!!_$JEXcyx zthM{e)JOHqG(BUlrtHC4+=WeM9X#W}G)2DUTG&y~OtUZkLCt82fQ;3r(G&X)vT2QU z&&RK`_NtKp>B55^6C}D=;QMNu*Q)AwVWlVLIi-o65Ad$5b2CYCBZ73Lg@TNA9<4N? zy9u&sg9we2kD~fl@BE%HQ6<<3jM5+oC6+123iT)YYMhK<86uSp_^(e{y>YU#bhG>PTd6@Es1mKdX8tQkGX`Ns$ zMg_|x;S`X6%XMf;(W%C5UHP=$>6SwPASos(vhpk5rIFULo8_al^3^p#@65|FDyY?qCB+`zlH5o9@}=Yfe=nlL$l9oMRvu)6aiYUHNLA-{s4B zb*oF2?7%vB=VNwMrRlLl-*g+jP>P||155+C@5F1&AQCh6RF!56ofD*7NB~!M zHECNXj7>X$MkEOhG8V|ViSt166sN8U!DGh&P9uO%H6g*lnKS5-04-@wtEC4eeU^a7nsF>-Tp-V73R@^cx9h346Gf@+{MBA zNTG*Ep6eNYd>3$%8VUH&5aYNdwPufr; zfn=$10<-P}W}JYnrJhKe)gzlG>9o$zPRd#k%Ek`)Gyy8LwZhQD0MWw8VDL7RrlT6G zn~nnpQ;uN`wIh)h`ZF*8U7n%SH!D;n3MO|(LN$_UvcCdonib_k>nDwF0IzuIO7_G5{c-o=4LR~`w2HeP<$jh42p|_@ul%+D`-8!cOry;cw4zDs) zU+!ru;H{mefm8%Qeq;zqt1ledX4*=FTjan&)J)o2MOx>C$*gLC>Z9+-=*?+nEC2{H zYsrzcLlaIL+G67*D3SKodegi|w*ysiA&Qz$yB9kvjA9saq=4kda2hC0Kuw{wrn!7D z@l%lu(hl6EiL}-QJ|--P1ZPjA{T}yp?5>#@uZ<&5BC0J>G}ToF;|>d{$>j#vMA%zVwF%Zwt97?a*S= zq_$uK`uFTY%zP;{>A=bTr`O z*4Ty%z9&LJ;+mGR!reCyHzv?Dbb=~R$AYvwl-FA)l%O3oY-5SJ!wPJjAl`h{k;u-+ zf`)rs$4!gF0sii+tgJ{ItJ3=Bfh9vR@d8t51;Jihak&;H1szwJ7X7`pBCU&{gM}GJ zS(>>m@U_i#@UEQb+i;mZ<)+a>oi(tHA~1z|cL%*zm0=3DQNN!|ZB`6vpD;&&0fB{h zY+JQ0hX(34-8wGBMBxyoqNU}!YaDN6HX8ZBIOf3(3F z>~^w3w5HR#)hrM1j~Lc6oF&j`zb8zEy?UqF^O5$Cw5HKk9tw*Zrb(P-WJ)juB(Ts;)*C*zf--F{CP>rijM0tS}| zz;e@dKRYQgUytYWv-`nbr$KPD^gvn%g4t zy5!a~55FCZqoLx=P6OF>@GNFJ_Ej}Op=%ymD>f$`t{{n}(_nE#*uSO;>?Q14CTQKz zo`hup`}QjH5Q#}9_89_##feqjbEKlxzp?5K@%zT zOvq@%&}v}RVdQKA)I5E#AgPf;`XpW;psfY-#w$2M@ORF2@IV^;C;$*c(o-ZVug(&% zH2P)*OM_o4SqZY7SqCpt%n8VB{;zHd%2+xh2b9bNJ_ybK=%&``t})3pXgx5|utvCg zB79`?)GoeBGd7r(vza{~S&$qgC|(a{-4mS2ztRUb!8DV$7iwYVe{9^oX}aUA4z@*U zqr}LxxkRkwv+t?bstQ6CX;sFlMwr-L=-0hwD+aW4uz@JQ?_gV*2WSIekenU=Mf66W4*bEB*@O?OrY;AH*Cl}84oTv&H#Qqw*i5k3|HlYuPMnxGwSEU*VV z5f~1;mI)~p@J8?%d4QT@+njKiF@qQz- z6k07h{43eLg4Cch{SrPhX<4E0HqSP~JSKoi$~0VXp`~Vqy-QSOLBq8)PnJ=r;|4;~-+>s3=wn6?7ERb7Z#dl1t^e=^=IsGtRDdKds^cBX-Y ziRqnbrZ$pRd#lriPV)_c4w=yJnXt$X9Pv=1)U~R9!XE4iW*1O)QP?VB!G1ELAYc89 zS$j*OmF~-4S8=mrtfZvjNa{sdpe&aQ)p zE8k6I*X-!y>D;o!rHD-I#BqR=6=|C&EU*(v?c+)mXrwHFilN@GN=2OeHChT{^nX2Q9~p7S{nwHT5rSSW7=2wap(P3Wl$0vZQMxJRJd zhRn!X@V2X=5DkI-tFG-`gR9MC+SzefQA^v98oQn2s{T_+l@Si4E!fR|ds1wy&u0WH_i?KR3i?g827sG$VkJEJ*c3x0>2f6Fjak^oW6gDy0VvMs{iv>74x@ zG;}rhGx6IessYTjNc26Ah?X?%xD8}2<=3Ah=$ zlXjC9^()y}{n7LGdORROPXu)|x2hE|h@ktmWv?!R4tw!0;s!IJ6lFsin`f_8)iR+7 z>{Wv``v%xi3PiygnwE3h_hB6*?dIm~S)!fc?`)L=W8kaWErXS6NO93;!7u)YQVY*( zqLrHX5dy=2M5qQeUg&CY$i}Kn#YDXII^g++Vql=wNiS8E!vUc!z#WGHpp2lYr8!3l z0|_c23w%GB5ELn>03SP)5z>Q|W&#&N8@RRUd z9tJo;lqG0h?F0{W3lc5uTGc9#ak&A2QWac|6;Xz&l8^_%D`On&jdI9|dUGZb-qhfL zJvtZd;wu|42s#g-b_8w6=tCeDKomF#(J*Z@l`i+$Nrz+w%m+yjXOhA()3DO=NHbPe z6$Nbo2Vw_yi&DvlRqq3l9=8sj#QQXIGokV<c%;z`{m?vVZ-_^X1oXu}; z?!%rjfP)4Fx@Oae+m@NJqX$i4aN3B>{DOaOuqn<+a?K2IA0iS^$$;uQwyQLL{i*Id zaSbaOScnq2FGf9rsGtVJp{G?2HLimKo6o@TAuZd`;Rpt-+3pCLyIzo~df%+v{j73; z!ok(0!0kb$B|kVxo0{!qWCL;gSN_q;$$US0=xAvNGGFhfed5%#p|Kq& z(!?kLXo~RfP4}%W?O^7A7C^>5zo5q>fouaFRyeDq#w?W#n&O+SX>9HT*&0l;A^XV; zmmsxcIAXCQzL6tL<7TK51Cch*@AxhR7de;T%dS;*3*B)93W3ET3~PrYnXB3!DS2<*o{QSI<4=*nYq}2swEM%;n-eHPk-hnj?^kR5TpqGJOSc4Y7 z+sPnNfrhDza_i^(;RXpCCQ%1QW(c?vA=T9Od_>*mE?K?F2C5vI*~~`Iwkwkv4wSeD zog;ePd!t7Ql9eFU(mrW}^^xKd6@r(Qz9_eR)@K=36z0^V9n4M@&_MEI&n?Yu6Ma!S zjGNExKy8`b)(ntnY{qB@1#Mh|A2!6P{+70j@1Z37ASeof7>TlZpZ4x{Fl0Eksjtgh z;C91-jOl5RSRV-_2JZP7g~OGt#t5)(uLGU}xA`x@E zXf9vLfK`Ol^7bMCM!W&b!LPXKYPINXO?{1J9 zDX6J@54Jx^!-S$2{ALPXzS2)xnWl~8DgzVnK}wqT=+qN_HK* zzJe)pR_(@SY^4j~Q4Vpdk~a89wCie+SoD6Np#p-JxFZqmriQvl$_(R(WX$(%bPc3s zZ#WY-GHd+%e+eW`3Gm#h%c2cTnzev$f%i1!+UP z%le=oA!D~_G zz;we*9pyV#)(Q@&_wMG-y#;HFawwpRW_kxOyKS@~XaTXvm{3`AtOV@g8c9t+OzO9^ z-S>hS1t&7}WN1_hGqi19+E}g ztyko@>3s9^A3-ZaK|=%l4Kt+X?T78{#`lu}JO&>TWXBVxaq>&0kW=|&XPK_%eqVH3 z_U}!a10@|pm84nz-|@BAN9}%7EG!vAYS+w;Q;a*gnr9No$@uqRZv0zNSEp0$vr71_wK* z948tI$eCC3R0oEC?T=Q(NEKqd)SUQuK=MkobRB$92$7_H4jP(U%*&f{{{2~gH4{`7o9g(C=ow6i)Hpy4 z2eS1L0xo1y^6?4^uiXqx8=Cp?z0H32yq_>I7fcCpJ0|d+Y(0XWsF%HaCOr*gHk{(2 zBD0kmX$$0LI7Y%I+(TwJyKxRkCxufE$08&tM^*(K)*Vc9)@XKj*+0ojNO>t2rItt*N#*&Hzw}VWh z16cJLj@qdnj8E|yrp;^72<|UjCmJ({OE9gXJLGO_dp_EsfEg2*3U?QA8Fu=|cmSm9 z;BqIOhD!8p+HO`MeV~+&mh_APZ@|*L&gp*EkKC0 zJ5l{2jT%%qhmPikd&a0q{pYw=H3dns(G4oHALvrk@+AD#7iNa7>=Uc?OaFAaIxS^H zYM$-j8$D)cfifjJ5A>4@=kpK9cM#BJemm6zHx+EdypEHOWc4fV`IspnIzP9^*N?>E zPZ@i(!v!NSHSP?k{wv4=FPO7;E&wd z9tcrD(kDDgTa}hDR&A1OWpAbbxfu>Y+}+{QbPPV@6Tm8?;LD!L6u+?hwKQ$l$I^CY z{fwFV!IwLgxoaUKMXc<&Fs%`#e=4DsUD-rub=Vn~ri}#_eRk4UCiKg~0@rIJJ9a=m z{Kw;=9m3YY5<9_{cYKkvd3QegpXLT^`TL2)+td|ytV*+y$>JB-^RW<`+P>^3A{$O+ z1gWaDComJhYMiNu^)C%Q&&^-`yLW1>xU7Li4G)n3RlviaMF>W1T05)YRQ5*goVJa$ zDTM5vkF=V~cKgY+!L(Uah`g*$8%xvJo3M_EPn#8@>80iLf~rh7RzST4|D_p^ET|_I zBecFWJENnoVbcA(v{7JTtsZh!d%kshKDM>*#GCtgQ(Q^gI#aVRyU*5%nRW25ozHzD zi)l`-b6c5VEPw@ORt>nK2Gu<@zBK+-VNwfjwpC`-rzK~25+8gTgZdswFWi2>tBZmq z3*nc!(VPw&FnsmE{bcxOYnb-Ll}*(!7Nmh?HDAC1Dtgbyo|i_J(2!i;eu^*_T&ucv z9eifNjV#sxbhk86Q#xQHcAzUO>?Azv)7*tlvZrRapNyHCKjqK5G_Bq|{7NWjT5`|F zwwLDKYgKMFq3=$!F6~iRxaZ@SExQz}H60LCvErv!UzipPrFP72auSo0-1D*LrTs$7 zBuzRi*8uG$Z4vm{`B0|yM!27hmyLWEr#iQ1v;@BByuIsz@A=sD(tvzq$}`34G)FL{ zHb`2Y;P?8H_`w^S8~B|EvYYtEf>4Z7_*!y%S#%2``D?ek`)3AI+^U{!CH?b^O!n2k zXo%TVV77WQzS*(l=4w(iKiTh?agvZShEAhBc6Ry9$$41D$$c6g2TQH(g7TONAV5IE zD6^x{f{R97Lsr#xv|Bit2_63iY#H?<%`GlPN-NXNz*{=NSRg(Y)Ejj zL7?IxmUEXaHCnfy&dGisfmkDUcS|fJ*K>&7~tQ(k$hB&(;&(v4bVp$~@ ztc<5$NWBdMEeF-$)A?F6WWiWp`||mzz}{I=ijgRv2q|De7mJ~trpb60t$OmH(g$sc z$ECwocJeCJ(w{M7vke7iNVrr4HqlD%M23Zj;7B#V%B8mgov`OS%b+xqSN_>aEV6yV z`zc#F*~^>qX@l9!ap|Y~$s{K1ZNQ19K@YM0HtqCoI<{4$HF+4$sE|u(=UucTmIk!u zly$QrflF^qLq*{{0i^+H#6c6kssg)dTH-P%cd`l?sJS?(b*7=IoD(`pDy__JWtm+R zPOSYz$|Y&EJc9~uulus=T+waf{aNOA=K*C3mvrl;Z35cdQPgYFXlX+qu2dD-K|zfM z(%=5*QH5#9F_fVpy|qH>QCWbqY(E*&?TD32kKKI$j{uDj`eGN(j$Cr2+rjoFO5DE!hq z)7%gMR8Dqk-@(W?XjP#Rm!A5))%9{3)vw4SlA-{nVe?fL*g(@lkNKN<)0U{}QX1OZf>(<;u|M)#8e?SP4#4N9hEetmUl!v4}-_k0w^s^4K