From a49fa0321e8cbdd95976ad4e1a3b1b27daa9757e Mon Sep 17 00:00:00 2001 From: Allegro AI <51604379+allegroai-git@users.noreply.github.com> Date: Sun, 6 Mar 2022 02:12:02 +0200 Subject: [PATCH] Update readme.md --- examples/pytorch/readme.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/examples/pytorch/readme.md b/examples/pytorch/readme.md index 926472c..2ea4a26 100644 --- a/examples/pytorch/readme.md +++ b/examples/pytorch/readme.md @@ -15,16 +15,20 @@ The output will be a model created on the project "serving examples", by the nam 1. Create serving Service: `clearml-serving create --name "serving example"` (write down the service ID) 2. Create model endpoint: + `clearml-serving --id model add --engine triton --endpoint "test_model_pytorch" --preprocess "examples/pytorch/preprocess.py" --name "train pytorch model" --project "serving examples" --input-size 28 28 1 --input-name "INPUT__0" --input-type float32 --output-size -1 10 --output-name "OUTPUT__0" --output-type float32 ` + Or auto update + `clearml-serving --id model auto-update --engine triton --endpoint "test_model_pytorch_auto" --preprocess "examples/pytorch/preprocess.py" --name "train pytorch model" --project "serving examples" --max-versions 2 --input-size 28 28 1 --input-name "INPUT__0" --input-type float32 - --output-size -1 10 --output-name "OUTPUT__0" --output-type float32 -` + --output-size -1 10 --output-name "OUTPUT__0" --output-type float32` + Or add Canary endpoint + `clearml-serving --id model canary --endpoint "test_model_pytorch_auto" --weights 0.1 0.9 --input-endpoint-prefix test_model_pytorch_auto` 3. Run the Triton Engine `docker run -v ~/clearml.conf:/root/clearml.conf -p 8001:8001 -e CLEARML_SERVING_TASK_ID= clearml-serving-triton:latest`