From 16ffa620b6474f1d9e8284b2130922829ae5e24f Mon Sep 17 00:00:00 2001 From: pollfly <75068813+pollfly@users.noreply.github.com> Date: Mon, 27 Dec 2021 10:41:43 +0200 Subject: [PATCH] Small edits (#144) --- docs/clearml_agent.md | 2 +- docs/deploying_clearml/clearml_server_aws_ec2_ami.md | 2 +- docs/fundamentals/pipelines.md | 4 ++-- docs/getting_started/mlops/mlops_best_practices.md | 10 +++++----- .../frameworks/fastai/fastai_with_tensorboard.md | 2 +- docs/guides/frameworks/megengine/megengine_mnist.md | 2 +- docs/guides/reporting/html_reporting.md | 6 ++++-- docs/guides/reporting/image_reporting.md | 8 ++++---- docs/guides/reporting/manual_matplotlib_reporting.md | 4 ++-- docs/hyperdatasets/overview.md | 4 ++-- sidebars.js | 12 ++++++------ 11 files changed, 29 insertions(+), 27 deletions(-) diff --git a/docs/clearml_agent.md b/docs/clearml_agent.md index 2d85e743..16d35747 100644 --- a/docs/clearml_agent.md +++ b/docs/clearml_agent.md @@ -41,7 +41,7 @@ and [configuration options](configs/clearml_conf.md#agent-section). ## Installation :::note -If **ClearML** was previously configured, follow [this](clearml_agent#adding-clearml-agent-to-a-configuration-file) to add +If **ClearML** was previously configured, follow [this](#adding-clearml-agent-to-a-configuration-file) to add ClearML Agent specific configurations ::: diff --git a/docs/deploying_clearml/clearml_server_aws_ec2_ami.md b/docs/deploying_clearml/clearml_server_aws_ec2_ami.md index 69412bdf..8499c748 100644 --- a/docs/deploying_clearml/clearml_server_aws_ec2_ami.md +++ b/docs/deploying_clearml/clearml_server_aws_ec2_ami.md @@ -50,7 +50,7 @@ The minimum recommended amount of RAM is 8 GB. For example, a t3.large or t3a.la 1. Open the AWS Marketplace for the [Allegro AI ClearML Server](https://aws.amazon.com/marketplace/pp/B085D8W5NM). 1. In the heading area, click **Continue to Subscribe**. -1. **On the Subscribe to software** page, click **Accept Terms**, and then click **Continue to Configuration**. +1. On the **Subscribe to software** page, click **Accept Terms**, and then click **Continue to Configuration**. 1. On the **Configure this software** page, complete the following: 1. In the **Fulfillment Option** list, select **64-bit (x86) Amazon Machine Image (AMI)**. diff --git a/docs/fundamentals/pipelines.md b/docs/fundamentals/pipelines.md index 3df23be5..3c57ec90 100644 --- a/docs/fundamentals/pipelines.md +++ b/docs/fundamentals/pipelines.md @@ -155,7 +155,7 @@ def main(pickle_url, mock_parameter='mock'): X_train, X_test, y_train, y_test = step_two(data_frame) model = step_three(X_train, y_train) accuracy = 100 * step_four(model, X_data=X_test, Y_data=y_test) - print(f‘Accuracy={accuracy}%’) + print(f"Accuracy={accuracy}%") ``` Notice that the driver is the `main` function, calling ("launching") the different steps. Next we add the decorators over @@ -222,7 +222,7 @@ def main(pickle_url, mock_parameter='mock'): X_train, X_test, y_train, y_test = step_two(data_frame) model = step_three(X_train, y_train) accuracy = 100 * step_four(model, X_data=X_test, Y_data=y_test) - print(f‘Accuracy={accuracy}%’) + print(f"Accuracy={accuracy}%") ``` We wrap each pipeline component with `@PipelineDecorator.component`, and the main pipeline logic with diff --git a/docs/getting_started/mlops/mlops_best_practices.md b/docs/getting_started/mlops/mlops_best_practices.md index badd92d7..8975179a 100644 --- a/docs/getting_started/mlops/mlops_best_practices.md +++ b/docs/getting_started/mlops/mlops_best_practices.md @@ -18,23 +18,23 @@ If you are afraid of clutter, use the archive option, and set up your own [clean ## Clone Tasks In order to define a Task in ClearML we have two options -- Run the actual code with `task.init` call. This will create and auto-populate the Task in CleaML (including Git Repo/Python Packages/ Command line etc.). -- Register local/remote code repository with `clearml-task`. See [details](../../apps/clearml_task.md). +- Run the actual code with `task.init` call. This will create and auto-populate the Task in CleaML (including Git Repo / Python Packages / Command line etc.). +- Register local / remote code repository with `clearml-task`. See [details](../../apps/clearml_task.md). Once we have a Task in ClearML, we can clone and edit its definitions in the UI, then launch it on one of our nodes with [ClearML Agent](../../clearml_agent.md). ## Advanced Automation -- Create daily/weekly cron jobs for retraining best performing models on. +- Create daily / weekly cron jobs for retraining best performing models on. - Create data monitoring & scheduling and launch inference jobs to test performance on any new coming dataset. - Once there are two or more experiments that run after another, group them together into a [pipeline](../../fundamentals/pipelines.md). ## Manage Your Data Use [ClearML Data](../../clearml_data/clearml_data.md) to version your data, then link it to running experiments for easy reproduction. -Make datasets machine agnostic (i.e. store original dataset in a shared storage location, e.g. shared-folder/S3/Gs/Azure). +Make datasets machine agnostic (i.e. store original dataset in a shared storage location, e.g. shared-folder / S3 / Gs / Azure). ClearML Data supports efficient Dataset storage and caching, differentiable & compressed. ## Scale Your Work -Use [ClearML Agent](../../clearml_agent.md) to scale work. Install the agent machines (Remote or local) and manage +Use [ClearML Agent](../../clearml_agent.md) to scale work. Install the agent machines (remote or local) and manage training workload with it. Improve team collaboration by transparent resource monitoring, always know what is running where. diff --git a/docs/guides/frameworks/fastai/fastai_with_tensorboard.md b/docs/guides/frameworks/fastai/fastai_with_tensorboard.md index 8bda0b0a..e04c165b 100644 --- a/docs/guides/frameworks/fastai/fastai_with_tensorboard.md +++ b/docs/guides/frameworks/fastai/fastai_with_tensorboard.md @@ -1,5 +1,5 @@ --- -title: Fastai +title: FastAI --- The [fastai_with_tensorboard.py](https://github.com/allegroai/clearml/blob/master/examples/frameworks/fastai/fastai_with_tensorboard.py) example demonstrates the integration of **ClearML** into code that uses fastai and TensorBoard. diff --git a/docs/guides/frameworks/megengine/megengine_mnist.md b/docs/guides/frameworks/megengine/megengine_mnist.md index 7897d9e3..1dc52580 100644 --- a/docs/guides/frameworks/megengine/megengine_mnist.md +++ b/docs/guides/frameworks/megengine/megengine_mnist.md @@ -1,5 +1,5 @@ --- -title: MegEngine MNIST +title: MegEngine --- The [megengine_mnist.py](https://github.com/allegroai/clearml/blob/master/examples/frameworks/megengine/megengine_mnist.py) diff --git a/docs/guides/reporting/html_reporting.md b/docs/guides/reporting/html_reporting.md index dda848b3..1e73a8d3 100644 --- a/docs/guides/reporting/html_reporting.md +++ b/docs/guides/reporting/html_reporting.md @@ -15,12 +15,14 @@ When the script runs, it creates an experiment named `html samples reporting`, w ## Reporting HTML URLs -Report HTML by URL, using the `Logger.report_media` method `url` parameter. +Report HTML by URL, using the [Logger.report_media](../../references/sdk/logger.md#report_media) method's `url` parameter. See the example script's [report_html_url](https://github.com/allegroai/clearml/blob/master/examples/reporting/html_reporting.py#L16) function, which reports the **ClearML** documentation's home page. - Logger.current_logger().report_media("html", "url_html", iteration=iteration, url="https://allegro.ai/docs/index.html") +```python +Logger.current_logger().report_media("html", "url_html", iteration=iteration, url="https://clear.ml/docs") +``` ## Reporting HTML Local Files diff --git a/docs/guides/reporting/image_reporting.md b/docs/guides/reporting/image_reporting.md index 67ce04e9..61b45e22 100644 --- a/docs/guides/reporting/image_reporting.md +++ b/docs/guides/reporting/image_reporting.md @@ -10,10 +10,10 @@ demonstrates reporting (uploading) images in several formats, including: * PIL Image objects * Local files. -**ClearML** uploads images to the bucket specified in the **ClearML** configuration file -or **ClearML** can be configured for image storage, see [Logger.set_default_upload_destination](../../references/sdk/logger.md#set_default_upload_destination) +ClearML uploads images to the bucket specified in the ClearML [configuration file](../../configs/clearml_conf.md), +or ClearML can be configured for image storage, see [Logger.set_default_upload_destination](../../references/sdk/logger.md#set_default_upload_destination) (storage for [artifacts](../../fundamentals/artifacts.md#setting-upload-destination) is different). Set credentials for -storage in the **ClearML** configuration file. +storage in the ClearML configuration file. When the script runs, it creates an experiment named `image reporting`, which is associated with the `examples` project. @@ -48,7 +48,7 @@ Logger.current_logger().report_image( ) ``` -**ClearML** reports these images as debug samples in the **ClearML Web UI** **>** experiment details **>** **RESULTS** tab +ClearML reports these images as debug samples in the **ClearML Web UI** **>** experiment details **>** **RESULTS** tab **>** **DEBUG SAMPLES** sub-tab. ![image](../../img/examples_reporting_07.png) diff --git a/docs/guides/reporting/manual_matplotlib_reporting.md b/docs/guides/reporting/manual_matplotlib_reporting.md index a69970bf..bc902ca4 100644 --- a/docs/guides/reporting/manual_matplotlib_reporting.md +++ b/docs/guides/reporting/manual_matplotlib_reporting.md @@ -5,8 +5,8 @@ title: Manual Matplotlib Reporting The [matplotlib_manual_reporting.py](https://github.com/allegroai/clearml/blob/master/examples/reporting/matplotlib_manual_reporting.py) example demonstrates reporting using Matplotlib and Seaborn with **ClearML**. -When the script runs, it creates an experiment named "Manual Matplotlib example", which is associated with the -examples project. +When the script runs, it creates an experiment named `Manual Matplotlib example`, which is associated with the +`examples` project. The Matplotlib figure reported by calling the [Logger.report_matplotlib_figure](../../references/sdk/logger.md#report_matplotlib_figure) method appears in **RESULTS** **>** **PLOTS**. diff --git a/docs/hyperdatasets/overview.md b/docs/hyperdatasets/overview.md index cfe5e38a..308e3afc 100644 --- a/docs/hyperdatasets/overview.md +++ b/docs/hyperdatasets/overview.md @@ -7,7 +7,7 @@ through parametrized data access and meta-data version control. The basic premise is that a user-formed query is a full representation of the dataset used by the ML/DL process. -ClearML Enterprise's hyperdatasets supports rapid prototyping, creating new opportunities such as: +ClearML Enterprise's Hyper-Datasets supports rapid prototyping, creating new opportunities such as: * Hyperparameter optimization of the data itself * QA/QC pipelining * CD/CT (continuous training) during deployment @@ -28,7 +28,7 @@ These components interact in a way that enables revising data and tracking and a Frames are the basics units of data in ClearML Enterprise. SingleFrames and FrameGroups make up a Dataset version. Dataset versions can be created, modified, and removed. The different version are recorded and available, -so experiments and their data are reproducible and traceable. +so experiments, and their data are reproducible and traceable. Lastly, Dataviews manage views of the dataset with queries, so the input data to an experiment can be defined from a subset of a Dataset or combinations of Datasets. \ No newline at end of file diff --git a/sidebars.js b/sidebars.js index b5076543..39387ae1 100644 --- a/sidebars.js +++ b/sidebars.js @@ -67,11 +67,11 @@ module.exports = { {'Docker': ['guides/docker/extra_docker_shell_script']}, {'Frameworks': [ {'Autokeras': ['guides/frameworks/autokeras/integration_autokeras', 'guides/frameworks/autokeras/autokeras_imdb_example']}, - {'FastAI': ['guides/frameworks/fastai/fastai_with_tensorboard']}, + 'guides/frameworks/fastai/fastai_with_tensorboard', {'Keras': ['guides/frameworks/keras/jupyter', 'guides/frameworks/keras/keras_tensorboard']}, - {'LightGBM': ['guides/frameworks/lightgbm/lightgbm_example']}, - {'Matplotlib': ['guides/frameworks/matplotlib/matplotlib_example']}, - {'MegEngine':['guides/frameworks/megengine/megengine_mnist']}, + 'guides/frameworks/lightgbm/lightgbm_example', + 'guides/frameworks/matplotlib/matplotlib_example', + 'guides/frameworks/megengine/megengine_mnist', {'PyTorch': ['guides/frameworks/pytorch/pytorch_distributed_example', 'guides/frameworks/pytorch/pytorch_matplotlib', 'guides/frameworks/pytorch/pytorch_mnist', 'guides/frameworks/pytorch/pytorch_tensorboard', 'guides/frameworks/pytorch/pytorch_tensorboardx', @@ -85,14 +85,14 @@ module.exports = { ] }, {'PyTorch Ignite': ['guides/frameworks/pytorch ignite/integration_pytorch_ignite', 'guides/frameworks/pytorch ignite/pytorch_ignite_mnist']}, - {'PyTorch Lightning': ['guides/frameworks/pytorch_lightning/pytorch_lightning_example']}, + 'guides/frameworks/pytorch_lightning/pytorch_lightning_example', {'Scikit-Learn': ['guides/frameworks/scikit-learn/sklearn_joblib_example', 'guides/frameworks/scikit-learn/sklearn_matplotlib_example']}, {'TensorBoardX': ['guides/frameworks/tensorboardx/tensorboardx', "guides/frameworks/tensorboardx/video_tensorboardx"]}, { 'Tensorflow': ['guides/frameworks/tensorflow/tensorboard_pr_curve', 'guides/frameworks/tensorflow/tensorboard_toy', 'guides/frameworks/tensorflow/tensorflow_mnist', 'guides/frameworks/tensorflow/integration_keras_tuner'] }, - {'XGboost': ['guides/frameworks/xgboost/xgboost_sample']} + 'guides/frameworks/xgboost/xgboost_sample' ]}, {'IDEs': ['guides/ide/remote_jupyter_tutorial', 'guides/ide/integration_pycharm', 'guides/ide/google_colab']}, {'Offline Mode':['guides/set_offline']},