Fix docstrings

This commit is contained in:
allegroai
2021-05-02 09:47:02 +03:00
parent 5e660392b5
commit bd110aed5e
7 changed files with 222 additions and 210 deletions

View File

@@ -64,21 +64,17 @@ class PipelineController(object):
default is ``None``, indicating no time limit.
:param bool auto_connect_task: Store pipeline arguments and configuration in the Task
- ``True`` - The pipeline argument and configuration will be stored in the current Task. All arguments will
be under the hyper-parameter section ``Pipeline``, and the pipeline DAG will be stored as a
Task configuration object named ``Pipeline``.
be under the hyper-parameter section ``Pipeline``, and the pipeline DAG will be stored as a
Task configuration object named ``Pipeline``.
- ``False`` - Do not store with Task.
- ``Task`` - A specific Task object to connect the pipeline with.
:param bool always_create_task: Always create a new Task
- ``True`` - No current Task initialized. Create a new task named ``Pipeline`` in the ``base_task_id``
project.
project.
- ``False`` - Use the :py:meth:`task.Task.current_task` (if exists) to report statistics.
:param bool add_pipeline_tags: (default: False) if True, add `pipe: <pipeline_task_id>` tag to all
steps (Tasks) created by this pipeline.
:param str target_project: If provided, all pipeline steps are cloned into the target project
:param pipeline_name: Optional, provide pipeline name if main Task is not present (default current date)
:param pipeline_project: Optional, provide project storing the pipeline if main Task is not present
"""
@@ -142,23 +138,23 @@ class PipelineController(object):
:param dict parameter_override: Optional parameter overriding dictionary.
The dict values can reference a previously executed step using the following form '${step_name}'
Examples:
Artifact access
parameter_override={'Args/input_file': '${stage1.artifacts.mydata.url}' }
Model access (last model used)
parameter_override={'Args/input_file': '${stage1.models.output.-1.url}' }
Parameter access
parameter_override={'Args/input_file': '${stage3.parameters.Args/input_file}' }
Task ID
parameter_override={'Args/input_file': '${stage3.id}' }
- Artifact access
parameter_override={'Args/input_file': '${stage1.artifacts.mydata.url}' }
- Model access (last model used)
parameter_override={'Args/input_file': '${stage1.models.output.-1.url}' }
- Parameter access
parameter_override={'Args/input_file': '${stage3.parameters.Args/input_file}' }
- Task ID
parameter_override={'Args/input_file': '${stage3.id}' }
:param dict task_overrides: Optional task section overriding dictionary.
The dict values can reference a previously executed step using the following form '${step_name}'
Examples:
clear git repository commit ID
parameter_override={'script.version_num': '' }
git repository commit branch
parameter_override={'script.branch': '${stage1.script.branch}' }
container image
parameter_override={'container.image': '${stage1.container.image}' }
- clear git repository commit ID
parameter_override={'script.version_num': '' }
- git repository commit branch
parameter_override={'script.branch': '${stage1.script.branch}' }
- container image
parameter_override={'container.image': '${stage1.container.image}' }
:param str execution_queue: Optional, the queue to use for executing this specific step.
If not provided, the task will be sent to the default execution queue, as defined on the class
:param float time_limit: Default None, no time limit.
@@ -452,8 +448,10 @@ class PipelineController(object):
Graph itself is a dictionary of Nodes (key based on the Node name),
each node holds links to its parent Nodes (identified by their unique names)
:return: execution tree, as a nested dictionary
Example:
:return: execution tree, as a nested dictionary. Example:
.. code-block:: py
{
'stage1' : Node() {
name: 'stage1'
@@ -461,6 +459,7 @@ class PipelineController(object):
...
},
}
"""
return self._nodes

View File

@@ -134,14 +134,15 @@ class OptimizerBOHB(SearchStrategy, RandomSeed):
Optimization. Instead of sampling new configurations at random,
BOHB uses kernel density estimators to select promising candidates.
For reference: ::
.. note::
For reference:
@InProceedings{falkner-icml-18,
title = {{BOHB}: Robust and Efficient Hyperparameter Optimization at Scale},
author = {Falkner, Stefan and Klein, Aaron and Hutter, Frank},
booktitle = {Proceedings of the 35th International Conference on Machine Learning},
pages = {1436--1445},
year = {2018},
title = {{BOHB}: Robust and Efficient Hyperparameter Optimization at Scale},
author = {Falkner, Stefan and Klein, Aaron and Hutter, Frank},
booktitle = {Proceedings of the 35th International Conference on Machine Learning},
pages = {1436--1445},
year = {2018},
}
:param str base_task_id: Task ID (str)
@@ -210,53 +211,51 @@ class OptimizerBOHB(SearchStrategy, RandomSeed):
"""
Defaults copied from BOHB constructor, see details in BOHB.__init__
BOHB performs robust and efficient hyperparameter optimization
at scale by combining the speed of Hyperband searches with the
guidance and guarantees of convergence of Bayesian
Optimization. Instead of sampling new configurations at random,
BOHB uses kernel density estimators to select promising candidates.
BOHB performs robust and efficient hyperparameter optimization
at scale by combining the speed of Hyperband searches with the
guidance and guarantees of convergence of Bayesian
Optimization. Instead of sampling new configurations at random,
BOHB uses kernel density estimators to select promising candidates.
.. highlight:: none
.. note::
For reference: ::
For reference:
@InProceedings{falkner-icml-18,
title = {{BOHB}: Robust and Efficient Hyperparameter Optimization at Scale},
author = {Falkner, Stefan and Klein, Aaron and Hutter, Frank},
booktitle = {Proceedings of the 35th International Conference on Machine Learning},
pages = {1436--1445},
year = {2018},
}
@InProceedings{falkner-icml-18,
title = {{BOHB}: Robust and Efficient Hyperparameter Optimization at Scale},
author = {Falkner, Stefan and Klein, Aaron and Hutter, Frank},
booktitle = {Proceedings of the 35th International Conference on Machine Learning},
pages = {1436--1445},
year = {2018},
}
:param eta : float (3)
In each iteration, a complete run of sequential halving is executed. In it,
after evaluating each configuration on the same subset size, only a fraction of
1/eta of them 'advances' to the next round.
Must be greater or equal to 2.
:param min_budget : float (0.01)
The smallest budget to consider. Needs to be positive!
:param max_budget : float (1)
The largest budget to consider. Needs to be larger than min_budget!
The budgets will be geometrically distributed
:math:`a^2 + b^2 = c^2 /sim /eta^k` for :math:`k/in [0, 1, ... , num/_subsets - 1]`.
:param min_points_in_model: int (None)
number of observations to start building a KDE. Default 'None' means
dim+1, the bare minimum.
:param top_n_percent: int (15)
percentage ( between 1 and 99, default 15) of the observations that are considered good.
:param num_samples: int (64)
number of samples to optimize EI (default 64)
:param random_fraction: float (1/3.)
fraction of purely random configurations that are sampled from the
prior without the model.
:param bandwidth_factor: float (3.)
to encourage diversity, the points proposed to optimize EI, are sampled
from a 'widened' KDE where the bandwidth is multiplied by this factor (default: 3)
:param min_bandwidth: float (1e-3)
to keep diversity, even when all (good) samples have the same value for one of the parameters,
a minimum bandwidth (Default: 1e-3) is used instead of zero.
Parameters
----------
eta : float (3)
In each iteration, a complete run of sequential halving is executed. In it,
after evaluating each configuration on the same subset size, only a fraction of
1/eta of them 'advances' to the next round.
Must be greater or equal to 2.
min_budget : float (0.01)
The smallest budget to consider. Needs to be positive!
max_budget : float (1)
The largest budget to consider. Needs to be larger than min_budget!
The budgets will be geometrically distributed
:math:`a^2 + b^2 = c^2 /sim /eta^k` for :math:`k/in [0, 1, ... , num/_subsets - 1]`.
min_points_in_model: int (None)
number of observations to start building a KDE. Default 'None' means
dim+1, the bare minimum.
top_n_percent: int (15)
percentage ( between 1 and 99, default 15) of the observations that are considered good.
num_samples: int (64)
number of samples to optimize EI (default 64)
random_fraction: float (1/3.)
fraction of purely random configurations that are sampled from the
prior without the model.
bandwidth_factor: float (3.)
to encourage diversity, the points proposed to optimize EI, are sampled
from a 'widened' KDE where the bandwidth is multiplied by this factor (default: 3)
min_bandwidth: float (1e-3)
to keep diversity, even when all (good) samples have the same value for one of the parameters,
a minimum bandwidth (Default: 1e-3) is used instead of zero.
"""
if min_budget:
self._bohb_kwargs['min_budget'] = min_budget

View File

@@ -38,9 +38,9 @@ class ClearmlJob(object):
:param str parent: Set newly created Task parent task field, default: base_tak_id.
:param dict kwargs: additional Task creation parameters
:param bool disable_clone_task: if False (default) clone base task id.
If True, use the base_task_id directly (base-task must be in draft-mode / created),
If True, use the base_task_id directly (base-task must be in draft-mode / created),
:param bool allow_caching: If True check if we have a previously executed Task with the same specification
If we do, use it and set internal is_cached flag. Default False (always create new Task).
If we do, use it and set internal is_cached flag. Default False (always create new Task).
"""
base_temp_task = Task.get_task(task_id=base_task_id)
if disable_clone_task:
@@ -147,6 +147,7 @@ class ClearmlJob(object):
Send Job for execution on the requested execution queue
:param str queue_name:
:return False if Task is not in "created" status (i.e. cannot be enqueued)
"""
if self._is_cached_task:

View File

@@ -380,8 +380,8 @@ class Dataset(object):
def finalize(self, verbose=False, raise_on_error=True):
# type: (bool, bool) -> bool
"""
Finalize the dataset (if upload was not called, it will be called automatically) publish dataset Task.
If files need to be uploaded, throw exception (or return False)
Finalize the dataset publish dataset Task. upload must first called to verify there are not pending uploads.
If files do need to be uploaded, it throws an exception (or return False)
:param verbose: If True print verbose progress report
:param raise_on_error: If True raise exception if dataset finalizing failed
@@ -491,10 +491,11 @@ class Dataset(object):
If dataset_id is give, return a list of files that remained unchanged since the specified dataset_version
:param dataset_path: Only match files matching the dataset_path (including wildcards).
Example: folder/sub/*.json
Example: 'folder/sub/*.json'
:param recursive: If True (default) matching dataset_path recursively
:param dataset_id: Filter list based on the dataset id containing the latest version of the file.
Default: None, do not filter files based on parent dataset.
:return: List of files with relative path
(files might not be available locally until get_local_copy() is called)
"""
@@ -571,13 +572,17 @@ class Dataset(object):
def get_dependency_graph(self):
"""
return the DAG of the dataset dependencies (all previous dataset version and their parents/
return the DAG of the dataset dependencies (all previous dataset version and their parents)
Example:
{
'current_dataset_id': ['parent_1_id', 'parent_2_id'],
'parent_2_id': ['parent_1_id'],
'parent_1_id': [],
}
.. code-block:: py
{
'current_dataset_id': ['parent_1_id', 'parent_2_id'],
'parent_2_id': ['parent_1_id'],
'parent_1_id': [],
}
:return: dict representing the genealogy dag graph of the current dataset
"""
@@ -644,7 +649,7 @@ class Dataset(object):
:param dataset_name: Naming the new dataset
:param dataset_project: Project containing the dataset.
If not specified, infer project name form parent datasets
If not specified, infer project name form parent datasets
:param parent_datasets: Expand a parent dataset by adding/removing files
:param use_current_task: False (default), a new Dataset task is created.
If True, the dataset is created on the current Task.

View File

@@ -459,11 +459,11 @@ class Model(BaseModel):
:param project_name: Optional, filter based project name string, if not given query models from all projects
:param model_name: Optional Model name as shown in the model artifactory
:param tags: Optional filter models based on list of tags, example: ['production', 'verified', '-qa']
Notice use '-' prefix to filter out tags.
Notice use '-' prefix to filter out tags.
:param only_published: If True only return published models.
:param include_archived: If True return archived models.
:param max_results: Optional return the last X models,
sorted by last update time (from the most recent to the least).
sorted by last update time (from the most recent to the least).
:return: ModeList of Models objects
"""
@@ -825,7 +825,7 @@ class InputModel(Model):
:param object task: A Task object.
:param str name: The model name to be stored on the Task
(default the filename, of the model weights, without the file extension)
(default the filename, of the model weights, without the file extension)
"""
self._set_task(task)

View File

@@ -194,18 +194,22 @@ class StorageManager(object):
# type: (str, str, Optional[str]) -> None
"""
Upload local folder recursively to a remote storage, maintaining the sub folder structure
in the remote storage. For Example:
If we have a local file: ~/folder/sub/file.ext
StorageManager.upload_folder('~/folder/', 's3://bucket/')
will create: s3://bucket/sub/file.ext
:param local_folder: Local folder to recursively upload
:param remote_url: Target remote storage location, tree structure of `local_folder` will
be created under the target remote_url. Supports Http/S3/GS/Azure and shared filesystem.
Example: 's3://bucket/data/'
:param match_wildcard: If specified only upload files matching the `match_wildcard`
Example: `*.json`
(Notice: target file size/date are not checked). Default True, always upload
Notice if uploading to http, we will always overwrite the target.
in the remote storage.
.. note::
If we have a local file `~/folder/sub/file.ext` then
`StorageManager.upload_folder('~/folder/', 's3://bucket/')`
will create `s3://bucket/sub/file.ext`
:param str local_folder: Local folder to recursively upload
:param str remote_url: Target remote storage location, tree structure of `local_folder` will
be created under the target remote_url. Supports Http/S3/GS/Azure and shared filesystem.
Example: 's3://bucket/data/'
:param str match_wildcard: If specified only upload files matching the `match_wildcard`
Example: `*.json`
Notice: target file size/date are not checked. Default True, always upload.
Notice if uploading to http, we will always overwrite the target.
"""
base_logger = LoggerRoot.get_base_logger()
@@ -235,19 +239,24 @@ class StorageManager(object):
# type: (str, Optional[str], Optional[str], bool) -> Optional[str]
"""
Download remote folder recursively to the local machine, maintaining the sub folder structure
from the remote storage. For Example:
If we have a local file: s3://bucket/sub/file.ext
StorageManager.download_folder('s3://bucket/', '~/folder/')
will create: ~/folder/sub/file.ext
:param remote_url: Source remote storage location, tree structure of `remote_url` will
be created under the target local_folder. Supports S3/GS/Azure and shared filesystem.
Example: 's3://bucket/data/'
:param local_folder: Local target folder to create the full tree from remote_url.
If None, use the cache folder. (Default: use cache folder)
from the remote storage.
.. note::
If we have a local file `s3://bucket/sub/file.ext` then
`StorageManager.download_folder('s3://bucket/', '~/folder/')`
will create `~/folder/sub/file.ext`
:param str remote_url: Source remote storage location, tree structure of `remote_url` will
be created under the target local_folder. Supports S3/GS/Azure and shared filesystem.
Example: 's3://bucket/data/'
:param str local_folder: Local target folder to create the full tree from remote_url.
If None, use the cache folder. (Default: use cache folder)
:param match_wildcard: If specified only download files matching the `match_wildcard`
Example: `*.json`
:param overwrite: If False, and target files exist do not download.
If True always download the remote files. Default False.
Example: `*.json`
:param bool overwrite: If False, and target files exist do not download.
If True always download the remote files. Default False.
:return: Target local folder
"""

View File

@@ -92,37 +92,37 @@ class Task(_Task):
advanced experimentation functions, such as autoML.
.. warning::
Do not construct Task objects directly. Use one of the methods listed below to create experiments or
reference existing experiments.
Do not construct Task objects directly. Use one of the methods listed below to create experiments or
reference existing experiments.
For detailed information about creating Task objects, see the following methods:
- Create a new reproducible Task - :meth:`Task.init`
.. important::
.. important::
In some cases, ``Task.init`` may return a Task object which is already stored in **ClearML Server** (already
initialized), instead of creating a new Task. For a detailed explanation of those cases, see the ``Task.init``
method.
- Create a new non-reproducible Task - :meth:`Task.create`
- Manually create a new Task (no auto-logging will apply) - :meth:`Task.create`
- Get the current running Task - :meth:`Task.current_task`
- Get another (different) Task - :meth:`Task.get_task`
.. note::
The **ClearML** documentation often refers to a Task as, "Task (experiment)".
The **ClearML** documentation often refers to a Task as, "Task (experiment)".
"Task" refers to the class in the ClearML Python Client Package, the object in your Python experiment script,
and the entity with which **ClearML Server** and **ClearML Agent** work.
"Task" refers to the class in the ClearML Python Client Package, the object in your Python experiment script,
and the entity with which **ClearML Server** and **ClearML Agent** work.
"Experiment" refers to your deep learning solution, including its connected components, inputs, and outputs,
and is the experiment you can view, analyze, compare, modify, duplicate, and manage using the ClearML
**Web-App** (UI).
"Experiment" refers to your deep learning solution, including its connected components, inputs, and outputs,
and is the experiment you can view, analyze, compare, modify, duplicate, and manage using the ClearML
**Web-App** (UI).
Therefore, a "Task" is effectively an "experiment", and "Task (experiment)" encompasses its usage throughout
the ClearML.
Therefore, a "Task" is effectively an "experiment", and "Task (experiment)" encompasses its usage throughout
the ClearML.
The exception to this Task behavior is sub-tasks (non-reproducible Tasks), which do not use the main execution
Task. Creating a sub-task always creates a new Task with a new Task ID.
The exception to this Task behavior is sub-tasks (non-reproducible Tasks), which do not use the main execution
Task. Creating a sub-task always creates a new Task with a new Task ID.
"""
TaskTypes = _Task.TaskTypes
@@ -222,27 +222,26 @@ class Task(_Task):
call ``Task.get_task``. See the ``Task.get_task`` method for an example.
For example:
The first time the following code runs, it will create a new Task. The status will be Completed.
The first time the following code runs, it will create a new Task. The status will be Completed.
.. code-block:: py
.. code-block:: py
from clearml import Task
task = Task.init('myProject', 'myTask')
from clearml import Task
task = Task.init('myProject', 'myTask')
If this code runs again, it will not create a new Task. It does not store a model or artifact,
it is not Published (its status Completed) , it was not Archived, and a new Task is not forced.
If this code runs again, it will not create a new Task. It does not store a model or artifact,
it is not Published (its status Completed) , it was not Archived, and a new Task is not forced.
If the Task is Published or Archived, and run again, it will create a new Task with a new Task ID.
If the Task is Published or Archived, and run again, it will create a new Task with a new Task ID.
The following code will create a new Task every time it runs, because it stores an artifact.
The following code will create a new Task every time it runs, because it stores an artifact.
.. code-block:: py
.. code-block:: py
task = Task.init('myProject', 'myOtherTask')
task = Task.init('myProject', 'myOtherTask')
d = {'a': '1'}
task.upload_artifact('myArtifact', d)
d = {'a': '1'}
task.upload_artifact('myArtifact', d)
:param str project_name: The name of the project in which the experiment will be created. If the project does
not exist, it is created. If ``project_name`` is ``None``, the repository name is used. (Optional)
@@ -287,7 +286,7 @@ class Task(_Task):
all previous artifacts / models/ logs are intact.
New logs will continue iteration/step based on the previous-execution maximum iteration value.
For example:
The last train/loss scalar reported was iteration 100, the next report will be iteration 101.
The last train/loss scalar reported was iteration 100, the next report will be iteration 101.
The values are:
@@ -301,8 +300,7 @@ class Task(_Task):
If True is passed, the default files_server will be used for model storage.
In the default location, ClearML creates a subfolder for the output.
The subfolder structure is the following:
<output destination name> / <project name> / <task name>.< Task ID>
<output destination name> / <project name> / <task name>.<Task ID>
The following are examples of ``output_uri`` values for the supported locations:
@@ -313,6 +311,7 @@ class Task(_Task):
- Default file server: True
.. important::
For cloud storage, you must install the **ClearML** package for your cloud storage type,
and then configure your storage credentials. For detailed information, see
`ClearML Python Client Extras <./references/clearml_extras_storage/>`_ in the "ClearML Python Client
@@ -323,11 +322,11 @@ class Task(_Task):
The values are:
- ``True`` - Automatically connect. (default)
- ``False`` - Do not automatically connect.
- ``False`` - Do not automatically connect.
- A dictionary - In addition to a boolean, you can use a dictionary for fined grained control of connected
arguments. The dictionary keys are argparse variable names and the values are booleans.
The ``False`` value excludes the specified argument from the Task's parameter section.
Keys missing from the dictionary default to ``True``, and an empty dictionary defaults to ``False``.
arguments. The dictionary keys are argparse variable names and the values are booleans.
The ``False`` value excludes the specified argument from the Task's parameter section.
Keys missing from the dictionary default to ``True``, and an empty dictionary defaults to ``False``.
For example:
@@ -345,10 +344,10 @@ class Task(_Task):
The values are:
- ``True`` - Automatically connect (default)
- ``False`` - Do not automatically connect
- ``False`` - Do not automatically connect
- A dictionary - In addition to a boolean, you can use a dictionary for fined grained control of connected
frameworks. The dictionary keys are frameworks and the values are booleans.
Keys missing from the dictionary default to ``True``, and an empty dictionary defaults to ``False``.
frameworks. The dictionary keys are frameworks and the values are booleans.
Keys missing from the dictionary default to ``True``, and an empty dictionary defaults to ``False``.
For example:
@@ -374,10 +373,10 @@ class Task(_Task):
- ``True`` - Automatically connect (default)
- ``False`` - Do not automatically connect
- A dictionary - In addition to a boolean, you can use a dictionary for fined grained control of stdout and
stderr. The dictionary keys are 'stdout' , 'stderr' and 'logging', the values are booleans.
Keys missing from the dictionary default to ``False``, and an empty dictionary defaults to ``False``.
Notice, the default behaviour is logging stdout/stderr the
`logging` module is logged as a by product of the stderr logging
stderr. The dictionary keys are 'stdout' , 'stderr' and 'logging', the values are booleans.
Keys missing from the dictionary default to ``False``, and an empty dictionary defaults to ``False``.
Notice, the default behaviour is logging stdout/stderr the
`logging` module is logged as a by product of the stderr logging
For example:
@@ -710,42 +709,42 @@ class Task(_Task):
For example:
The following code demonstrates calling ``Task.get_task`` to report a scalar to another Task. The output
of :meth:`.Logger.report_scalar` from testing is associated with the Task named ``training``. It allows
training and testing to run concurrently, because they initialized different Tasks (see :meth:`Task.init`
for information about initializing Tasks).
The following code demonstrates calling ``Task.get_task`` to report a scalar to another Task. The output
of :meth:`.Logger.report_scalar` from testing is associated with the Task named ``training``. It allows
training and testing to run concurrently, because they initialized different Tasks (see :meth:`Task.init`
for information about initializing Tasks).
The training script:
The training script:
.. code-block:: py
.. code-block:: py
# initialize the training Task
task = Task.init('myProject', 'training')
# initialize the training Task
task = Task.init('myProject', 'training')
# do some training
# do some training
The testing script:
The testing script:
.. code-block:: py
.. code-block:: py
# initialize the testing Task
task = Task.init('myProject', 'testing')
# initialize the testing Task
task = Task.init('myProject', 'testing')
# get the training Task
train_task = Task.get_task(project_name='myProject', task_name='training')
# get the training Task
train_task = Task.get_task(project_name='myProject', task_name='training')
# report metrics in the training Task
for x in range(10):
train_task.get_logger().report_scalar('title', 'series', value=x * 2, iteration=x)
# report metrics in the training Task
for x in range(10):
train_task.get_logger().report_scalar('title', 'series', value=x * 2, iteration=x)
:param str task_id: The Id (system UUID) of the experiment to get.
If specified, ``project_name`` and ``task_name`` are ignored.
If specified, ``project_name`` and ``task_name`` are ignored.
:param str project_name: The project name of the Task to get.
:param str task_name: The name of the Task within ``project_name`` to get.
:param bool allow_archived: Only applicable if *not* using specific ``task_id``,
If True (default) allow to return archived Tasks, if False filter out archived Tasks
If True (default) allow to return archived Tasks, if False filter out archived Tasks
:param bool task_filter: Only applicable if *not* using specific ``task_id``,
Pass additional query filters, on top of project/name. See details in Task.get_tasks.
Pass additional query filters, on top of project/name. See details in Task.get_tasks.
:return: The Task specified by ID, or project name / experiment name combination.
"""
@@ -1076,8 +1075,8 @@ class Task(_Task):
:param str name: A section name associated with the connected object. Default: 'General'
Currently only supported for `dict` / `TaskParameter` objects
Examples:
name='General' will put the connected dictionary under the General section in the hyper-parameters
name='Train' will put the connected dictionary under the Train section in the hyper-parameters
name='General' will put the connected dictionary under the General section in the hyper-parameters
name='Train' will put the connected dictionary under the Train section in the hyper-parameters
:return: The result returned when connecting the object, if supported.
@@ -1753,11 +1752,8 @@ class Task(_Task):
# type: (...) -> bool
"""
Set user properties for this task.
A user property ca contain the following fields (all of type string):
* name
* value
* description
* type
A user property can contain the following fields (all of type string):
name / value / description / type
Examples:
task.set_user_properties(backbone='great', stable=True)
@@ -1769,49 +1765,49 @@ class Task(_Task):
:param iterables: Properties iterables, each can be:
* A dictionary of string key (name) to either a string value (value) a dict (property details). If the value
is a dict, it must contain a "value" field. For example:
is a dict, it must contain a "value" field. For example:
.. code-block:: py
.. code-block:: javascript
{
"property_name": {"description": "This is a user property", "value": "property value"},
"another_property_name": {"description": "This is another user property", "value": "another value"},
"yet_another_property_name": "some value"
}
{
"property_name": {"description": "This is a user property", "value": "property value"},
"another_property_name": {"description": "This is another user property", "value": "another value"},
"yet_another_property_name": "some value"
}
* An iterable of dicts (each representing property details). Each dict must contain a "name" field and a
"value" field. For example:
"value" field. For example:
.. code-block:: py
.. code-block:: javascript
[
{
"name": "property_name",
"description": "This is a user property",
"value": "property value"
},
{
"name": "another_property_name",
"description": "This is another user property",
"value": "another value"
}
]
[
{
"name": "property_name",
"description": "This is a user property",
"value": "property value"
},
{
"name": "another_property_name",
"description": "This is another user property",
"value": "another value"
}
]
:param properties: Additional properties keyword arguments. Key is the property name, and value can be
a string (property value) or a dict (property details). If the value is a dict, it must contain a "value"
field. For example:
.. code-block:: py
.. code-block:: javascript
{
"property_name": "string as property value",
"another_property_name":
{
{
"property_name": "string as property value",
"another_property_name": {
"type": "string",
"description": "This is user property",
"value": "another value"
}
}
}
"""
if not Session.check_min_api_version("2.9"):
self.log.info("User properties are not supported by the server")
@@ -1829,7 +1825,8 @@ class Task(_Task):
"""
Delete hyper-parameters for this task.
:param iterables: Hyper parameter key iterables. Each an iterable whose possible values each represent
a hyper-parameter entry to delete, value formats are:
a hyper-parameter entry to delete, value formats are:
* A dictionary containing a 'section' and 'name' fields
* An iterable (e.g. tuple, list etc.) whose first two items denote 'section' and 'name'
"""
@@ -2245,8 +2242,10 @@ class Task(_Task):
.. code-block:: py
Task.set_credentials(api_host='http://localhost:8008', web_host='http://localhost:8080',
files_host='http://localhost:8081', key='optional_credentials', secret='optional_credentials')
Task.set_credentials(
api_host='http://localhost:8008', web_host='http://localhost:8080', files_host='http://localhost:8081',
key='optional_credentials', secret='optional_credentials'
)
task = Task.init('project name', 'experiment name')
:param str api_host: The API server url. For example, ``host='http://localhost:8008'``