Add model metric logging

This commit is contained in:
allegroai
2022-03-21 01:00:19 +02:00
parent d684169367
commit 4355c1b1f4
37 changed files with 1733 additions and 253 deletions

View File

@@ -9,11 +9,11 @@ class Preprocess(object):
# set internal state, this will be called only once. (i.e. not per request)
pass
def preprocess(self, body: dict) -> Any:
def preprocess(self, body: dict, collect_custom_statistics_fn=None) -> Any:
# we expect to get two valid on the dict x0, and x1
return [[body.get("x0", None), body.get("x1", None)], ]
def postprocess(self, data: Any) -> dict:
def postprocess(self, data: Any, collect_custom_statistics_fn=None) -> dict:
# post process the data returned from the model inference engine
# data is the return value from model.predict we will put is inside a return value as Y
return dict(y=data.tolist() if isinstance(data, np.ndarray) else data)

View File

@@ -29,3 +29,4 @@ Or add Canary endpoint
> **_Notice:_** You can also change the serving service while it is already running!
This includes adding/removing endpoints, adding canary model routing etc.
by default new endpoints/models will be automatically updated after 1 minute

View File

@@ -13,7 +13,7 @@ class Preprocess(object):
# set internal state, this will be called only once. (i.e. not per request)
pass
def preprocess(self, body: dict) -> Any:
def preprocess(self, body: dict, collect_custom_statistics_fn=None) -> Any:
# we expect to get two valid on the dict x0, and x1
url = body.get("url")
if not url:
@@ -25,7 +25,7 @@ class Preprocess(object):
return np.array(image).flatten()
def postprocess(self, data: Any) -> dict:
def postprocess(self, data: Any, collect_custom_statistics_fn=None) -> dict:
# post process the data returned from the model inference engine
# data is the return value from model.predict we will put is inside a return value as Y
if not isinstance(data, np.ndarray):

View File

@@ -12,6 +12,8 @@ The output will be a model created on the project "serving examples", by the nam
## setting up the serving service
Prerequisites, Keras/Tensorflow models require Triton engine support, please use `docker-compose-triton.yml` / `docker-compose-triton-gpu.yml` or if running on Kubernetes, the matching helm chart.
1. Create serving Service: `clearml-serving create --name "serving example"` (write down the service ID)
2. Create model endpoint:
@@ -36,3 +38,4 @@ Or add Canary endpoint
> **_Notice:_** You can also change the serving service while it is already running!
This includes adding/removing endpoints, adding canary model routing etc.
by default new endpoints/models will be automatically updated after 1 minute

View File

@@ -9,14 +9,14 @@ class Preprocess(object):
# set internal state, this will be called only once. (i.e. not per request)
pass
def preprocess(self, body: dict) -> Any:
def preprocess(self, body: dict, collect_custom_statistics_fn=None) -> Any:
# we expect to get four valid numbers on the dict: x0, x1, x2, x3
return np.array(
[[body.get("x0", None), body.get("x1", None), body.get("x2", None), body.get("x3", None)], ],
dtype=np.float32
)
def postprocess(self, data: Any) -> dict:
def postprocess(self, data: Any, collect_custom_statistics_fn=None) -> dict:
# post process the data returned from the model inference engine
# data is the return value from model.predict we will put is inside a return value as Y
# we pick the most probably class and return the class index (argmax)

View File

@@ -7,14 +7,14 @@ class Preprocess(object):
# set internal state, this will be called only once. (i.e. not per request)
pass
def postprocess(self, data: List[dict]) -> dict:
def postprocess(self, data: List[dict], collect_custom_statistics_fn=None) -> dict:
# we will here average the results and return the new value
# assume data is a list of dicts greater than 1
# average result
return dict(y=0.5 * data[0]['y'][0] + 0.5 * data[1]['y'][0])
def process(self, data: Any) -> Any:
def process(self, data: Any, collect_custom_statistics_fn=None) -> Any:
"""
do something with the actual data, return any type of object.
The returned object will be passed as is to the postprocess function engine

View File

@@ -24,3 +24,4 @@ Training a scikit-learn model (see example/sklearn)
> **_Notice:_** You can also change the serving service while it is already running!
This includes adding/removing endpoints, adding canary model routing etc.
by default new endpoints/models will be automatically updated after 1 minute

View File

@@ -13,7 +13,7 @@ class Preprocess(object):
# set internal state, this will be called only once. (i.e. not per request)
pass
def preprocess(self, body: dict) -> Any:
def preprocess(self, body: dict, collect_custom_statistics_fn=None) -> Any:
# we expect to get two valid on the dict x0, and x1
url = body.get("url")
if not url:
@@ -24,7 +24,7 @@ class Preprocess(object):
image = ImageOps.grayscale(image).resize((28, 28))
return np.array(image).flatten()
def postprocess(self, data: Any) -> dict:
def postprocess(self, data: Any, collect_custom_statistics_fn=None) -> dict:
# post process the data returned from the model inference engine
# data is the return value from model.predict we will put is inside a return value as Y
if not isinstance(data, np.ndarray):

View File

@@ -13,6 +13,9 @@ The output will be a model created on the project "serving examples", by the nam
## setting up the serving service
Prerequisites, PyTorch models require Triton engine support, please use `docker-compose-triton.yml` / `docker-compose-triton-gpu.yml` or if running on Kubernetes, the matching helm chart.
1. Create serving Service: `clearml-serving create --name "serving example"` (write down the service ID)
2. Create model endpoint:
@@ -39,4 +42,4 @@ Or add Canary endpoint
> **_Notice:_** You can also change the serving service while it is already running!
This includes adding/removing endpoints, adding canary model routing etc.
by default new endpoints/models will be automatically updated after 1 minute

View File

@@ -9,11 +9,11 @@ class Preprocess(object):
# set internal state, this will be called only once. (i.e. not per request)
pass
def preprocess(self, body: dict) -> Any:
def preprocess(self, body: dict, collect_custom_statistics_fn=None) -> Any:
# we expect to get two valid on the dict x0, and x1
return [[body.get("x0", None), body.get("x1", None)], ]
def postprocess(self, data: Any) -> dict:
def postprocess(self, data: Any, collect_custom_statistics_fn=None) -> dict:
# post process the data returned from the model inference engine
# data is the return value from model.predict we will put is inside a return value as Y
return dict(y=data.tolist() if isinstance(data, np.ndarray) else data)

View File

@@ -29,3 +29,4 @@ Or add Canary endpoint
> **_Notice:_** You can also change the serving service while it is already running!
This includes adding/removing endpoints, adding canary model routing etc.
by default new endpoints/models will be automatically updated after 1 minute

View File

@@ -10,12 +10,12 @@ class Preprocess(object):
# set internal state, this will be called only once. (i.e. not per request)
pass
def preprocess(self, body: dict) -> Any:
def preprocess(self, body: dict, collect_custom_statistics_fn=None) -> Any:
# we expect to get four valid numbers on the dict: x0, x1, x2, x3
return xgb.DMatrix(
[[body.get("x0", None), body.get("x1", None), body.get("x2", None), body.get("x3", None)]])
def postprocess(self, data: Any) -> dict:
def postprocess(self, data: Any, collect_custom_statistics_fn=None) -> dict:
# post process the data returned from the model inference engine
# data is the return value from model.predict we will put is inside a return value as Y
return dict(y=data.tolist() if isinstance(data, np.ndarray) else data)